summaryrefslogtreecommitdiffstats
path: root/fuel
diff options
context:
space:
mode:
Diffstat (limited to 'fuel')
-rw-r--r--fuel/LICENCE1
-rw-r--r--fuel/LICENSE.rst85
-rw-r--r--fuel/TODO7
-rw-r--r--fuel/build/f_ntp/puppet/modules/opnfv/manifests/ntp.pp11
-rw-r--r--fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp87
-rw-r--r--fuel/build/f_resolvconf/puppet/modules/opnfv/manifests/resolver.pp20
-rw-r--r--fuel/deploy/README.rst (renamed from fuel/prototypes/libvirt/README.rst)6
-rw-r--r--fuel/deploy/cloud_deploy/__init__.py1
-rw-r--r--fuel/deploy/cloud_deploy/cloud/__init__.py1
-rw-r--r--fuel/deploy/cloud_deploy/cloud/common.py51
-rw-r--r--fuel/deploy/cloud_deploy/cloud/configure_environment.py74
-rw-r--r--fuel/deploy/cloud_deploy/cloud/configure_network.py62
-rw-r--r--fuel/deploy/cloud_deploy/cloud/configure_nodes.py108
-rw-r--r--fuel/deploy/cloud_deploy/cloud/configure_settings.py47
-rw-r--r--fuel/deploy/cloud_deploy/cloud/dea.py (renamed from fuel/deploy/dea.py)56
-rw-r--r--fuel/deploy/cloud_deploy/cloud/deploy.py208
-rw-r--r--fuel/deploy/cloud_deploy/cloud/deployment.py100
-rw-r--r--fuel/deploy/cloud_deploy/cloud_deploy.py117
-rw-r--r--fuel/deploy/cloud_deploy/hardware_adapters/__init__.py1
-rw-r--r--fuel/deploy/cloud_deploy/hardware_adapters/dha.py (renamed from fuel/deploy/dha.py)5
-rw-r--r--fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py1
-rw-r--r--fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py288
-rw-r--r--fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py1
-rw-r--r--fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py153
-rw-r--r--fuel/deploy/cloud_deploy/ssh_client.py56
-rw-r--r--fuel/deploy/common.py29
-rw-r--r--fuel/deploy/configure_environment.py70
-rw-r--r--fuel/deploy/configure_network.py91
-rw-r--r--fuel/deploy/configure_settings.py88
-rw-r--r--fuel/deploy/dea.yaml958
-rw-r--r--fuel/deploy/deploy.py212
-rwxr-xr-xfuel/deploy/deploy.sh (renamed from fuel/prototypes/libvirt/deploy/deploy.sh)14
-rwxr-xr-xfuel/deploy/deploy_fuel.sh106
-rwxr-xr-xfuel/deploy/functions/common.sh (renamed from fuel/prototypes/libvirt/deploy/functions/common.sh)0
-rwxr-xr-xfuel/deploy/functions/install_iso.sh (renamed from fuel/prototypes/libvirt/deploy/functions/install_iso.sh)0
-rw-r--r--fuel/deploy/functions/isolinux.cfg.patch (renamed from fuel/prototypes/libvirt/deploy/functions/isolinux.cfg.patch)0
-rw-r--r--fuel/deploy/functions/ks.cfg.patch (renamed from fuel/prototypes/libvirt/deploy/functions/ks.cfg.patch)0
-rwxr-xr-xfuel/deploy/functions/patch-iso.sh (renamed from fuel/prototypes/libvirt/deploy/functions/patch-iso.sh)0
-rw-r--r--fuel/deploy/hardware_adapters/hp/hp_adapter.py411
-rw-r--r--fuel/deploy/hardware_adapters/hp/run_oa_command.py113
-rw-r--r--fuel/deploy/libvirt/networks/fuel1 (renamed from fuel/prototypes/libvirt/examples/networks/fuel1)0
-rw-r--r--fuel/deploy/libvirt/networks/fuel2 (renamed from fuel/prototypes/libvirt/examples/networks/fuel2)0
-rw-r--r--fuel/deploy/libvirt/networks/fuel3 (renamed from fuel/prototypes/libvirt/examples/networks/fuel3)0
-rw-r--r--fuel/deploy/libvirt/networks/fuel4 (renamed from fuel/prototypes/libvirt/examples/networks/fuel4)0
-rw-r--r--fuel/deploy/libvirt/vms/fuel-master (renamed from fuel/prototypes/libvirt/examples/vms/fuel-master)0
-rw-r--r--fuel/deploy/libvirt/vms/s1_b1 (renamed from fuel/prototypes/libvirt/examples/vms/controller2)2
-rw-r--r--fuel/deploy/libvirt/vms/s1_b2 (renamed from fuel/prototypes/libvirt/examples/vms/controller3)2
-rw-r--r--fuel/deploy/libvirt/vms/s1_b3100
-rw-r--r--fuel/deploy/libvirt/vms/s1_b4101
-rw-r--r--fuel/deploy/libvirt/vms/s1_b5100
-rw-r--r--fuel/deploy/libvirt/vms/s1_b6100
-rwxr-xr-xfuel/deploy/setup_vms/apply_setup.sh (renamed from fuel/prototypes/libvirt/setup_vms/apply_setup.sh)4
-rwxr-xr-x[-rw-r--r--]fuel/deploy/setup_vms/setup-vm-host.sh (renamed from fuel/docs/src/tmp/BUILD/README.examples)7
-rw-r--r--fuel/docs/src/build-instructions.rst177
-rw-r--r--fuel/docs/src/installation-instructions.rst19
-rw-r--r--fuel/docs/src/release-notes.rst9
-rw-r--r--fuel/docs/src/tmp/BUILD/README.build98
-rw-r--r--fuel/prototypes/deploy/README.rst21
-rw-r--r--fuel/prototypes/deploy/TODO.txt34
-rwxr-xr-xfuel/prototypes/deploy/create_templates/create_templates.sh184
-rwxr-xr-xfuel/prototypes/deploy/create_templates/generate_fuel_node_info.py36
-rwxr-xr-xfuel/prototypes/deploy/create_templates/generate_node_info.py61
-rwxr-xr-xfuel/prototypes/deploy/create_templates/reap_fuel_settings.py (renamed from fuel/prototypes/libvirt/create_dea/reap_interfaces.py)24
-rwxr-xr-xfuel/prototypes/deploy/create_templates/reap_network_scheme.py (renamed from fuel/prototypes/libvirt/create_dea/reap_network_scheme.py)0
-rwxr-xr-xfuel/prototypes/deploy/create_templates/reap_network_settings.py (renamed from fuel/prototypes/libvirt/create_dea/reap_network_settings.py)11
-rwxr-xr-xfuel/prototypes/deploy/create_templates/reap_opnfv_astute.py49
-rwxr-xr-xfuel/prototypes/deploy/create_templates/reap_settings.py (renamed from fuel/prototypes/libvirt/create_dea/reap_settings.py)0
-rwxr-xr-xfuel/prototypes/deploy/deploy/deploy.sh176
-rwxr-xr-xfuel/prototypes/deploy/deploy/dha-adapters/dhaParse.py87
-rwxr-xr-xfuel/prototypes/deploy/deploy/dha-adapters/libvirt.sh334
-rwxr-xr-xfuel/prototypes/deploy/deploy/functions/common.sh67
-rwxr-xr-xfuel/prototypes/deploy/deploy/functions/dea-api.sh171
-rwxr-xr-xfuel/prototypes/deploy/deploy/functions/deaParse.py85
-rwxr-xr-xfuel/prototypes/deploy/deploy/functions/deploy_env.sh136
-rwxr-xr-xfuel/prototypes/deploy/deploy/functions/install_iso.sh91
-rw-r--r--fuel/prototypes/deploy/deploy/functions/isolinux.cfg.patch14
-rw-r--r--fuel/prototypes/deploy/deploy/functions/ks.cfg.patch19
-rwxr-xr-xfuel/prototypes/deploy/deploy/functions/patch-iso.sh87
-rwxr-xr-xfuel/prototypes/deploy/deploy/tools/transplant0.sh40
-rwxr-xr-xfuel/prototypes/deploy/deploy/tools/transplant1.sh (renamed from fuel/prototypes/libvirt/deploy/tools/transplant1.sh)8
-rwxr-xr-xfuel/prototypes/deploy/deploy/tools/transplant2.sh (renamed from fuel/prototypes/libvirt/deploy/tools/transplant2.sh)38
-rwxr-xr-xfuel/prototypes/deploy/deploy/tools/transplant_fuel_settings.py50
-rwxr-xr-xfuel/prototypes/deploy/deploy/tools/transplant_interfaces.py (renamed from fuel/prototypes/libvirt/deploy/tools/transplant_interfaces.py)16
-rwxr-xr-xfuel/prototypes/deploy/deploy/tools/transplant_network_scheme.py (renamed from fuel/prototypes/libvirt/deploy/tools/transplant_network_scheme.py)0
-rwxr-xr-xfuel/prototypes/deploy/deploy/tools/transplant_network_settings.py (renamed from fuel/prototypes/libvirt/deploy/tools/transplant_network_settings.py)0
-rwxr-xr-xfuel/prototypes/deploy/deploy/tools/transplant_opnfv_settings.py42
-rwxr-xr-xfuel/prototypes/deploy/deploy/tools/transplant_settings.py (renamed from fuel/prototypes/libvirt/deploy/tools/transplant_settings.py)0
-rwxr-xr-xfuel/prototypes/deploy/deploy/verify_dea.sh79
-rwxr-xr-xfuel/prototypes/deploy/deploy/verify_dha.sh126
-rw-r--r--fuel/prototypes/deploy/documentation/1-introduction.txt36
-rw-r--r--fuel/prototypes/deploy/documentation/2-dea.txt1082
-rw-r--r--fuel/prototypes/deploy/documentation/3-dha.txt65
-rw-r--r--fuel/prototypes/deploy/documentation/4-dha-adapter-api.txt128
-rw-r--r--fuel/prototypes/deploy/documentation/5-dea-api.txt47
-rw-r--r--fuel/prototypes/deploy/examples/libvirt/README.txt25
-rw-r--r--fuel/prototypes/deploy/examples/libvirt/conf/dea.yaml (renamed from fuel/prototypes/libvirt/examples/libvirt_dea.yaml)94
-rw-r--r--fuel/prototypes/deploy/examples/libvirt/conf/dha.yaml49
-rw-r--r--fuel/prototypes/deploy/examples/libvirt/conf/networks/fuel112
-rw-r--r--fuel/prototypes/deploy/examples/libvirt/conf/networks/fuel25
-rw-r--r--fuel/prototypes/deploy/examples/libvirt/conf/networks/fuel35
-rw-r--r--fuel/prototypes/deploy/examples/libvirt/conf/networks/fuel412
-rw-r--r--fuel/prototypes/deploy/examples/libvirt/conf/vms/compute4 (renamed from fuel/prototypes/libvirt/examples/vms/compute4)5
-rw-r--r--fuel/prototypes/deploy/examples/libvirt/conf/vms/compute5 (renamed from fuel/prototypes/libvirt/examples/vms/compute5)5
-rw-r--r--fuel/prototypes/deploy/examples/libvirt/conf/vms/controller1 (renamed from fuel/prototypes/libvirt/examples/vms/controller1)5
-rw-r--r--fuel/prototypes/deploy/examples/libvirt/conf/vms/fuel-master103
-rwxr-xr-xfuel/prototypes/deploy/examples/libvirt/install-ubuntu-packages.sh (renamed from fuel/prototypes/libvirt/setup_vms/setup-vm-host.sh)0
-rwxr-xr-xfuel/prototypes/deploy/examples/libvirt/setup_example_vms.sh105
-rwxr-xr-xfuel/prototypes/deploy/examples/libvirt/tools/cleanup_example_vms.sh58
-rwxr-xr-xfuel/prototypes/deploy/examples/libvirt/tools/dump_setup.sh (renamed from fuel/prototypes/libvirt/setup_vms/dump_setup.sh)43
-rwxr-xr-x[-rw-r--r--]fuel/prototypes/deploy/list_fixmes.sh (renamed from fuel/docs/src/tmp/BUILD/README.architecture)9
-rwxr-xr-xfuel/prototypes/libvirt/create_dea/create_dea.sh86
-rwxr-xr-xfuel/prototypes/libvirt/deploy/functions/deploy_env.sh81
112 files changed, 6928 insertions, 1576 deletions
diff --git a/fuel/LICENCE b/fuel/LICENCE
deleted file mode 100644
index 8b13789..0000000
--- a/fuel/LICENCE
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/fuel/LICENSE.rst b/fuel/LICENSE.rst
new file mode 100644
index 0000000..9537658
--- /dev/null
+++ b/fuel/LICENSE.rst
@@ -0,0 +1,85 @@
+Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+
+Open Platform for NFV Project Software Licence
+==============================================
+Any software developed by the "Open Platform for NFV" Project is licenced under the
+Apache License, Version 2.0 (the "License");
+you may not use the content of this software bundle except in compliance with the License.
+You may obtain a copy of the License at <http://www.apache.org/licenses/LICENSE-2.0>
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Open Platform for NFV Project Documentation Licence
+===================================================
+Any documentation developed by the "Open Platform for NFV Project"
+is licensed under a Creative Commons Attribution 4.0 International License.
+You should have received a copy of the license along with this. If not,
+see <http://creativecommons.org/licenses/by/4.0/>.
+
+Unless required by applicable law or agreed to in writing, documentation
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Other applicable upstream project Licenses relevant for Fuel@OPNFV
+==================================================================
+You may not use the content of this software bundle except in compliance with the
+Licenses as listed below:
+
++----------------+-----------------------------------------------------+
+| **Component** | **Licence** |
++----------------+-----------------------------------------------------+
+| OpenStack | Apache License 2.0 |
+| | https://www.apache.org/licenses/LICENSE-2.0 |
++----------------+-----------------------------------------------------+
+| OpenDaylight | Eclipse Public License 1.0 |
+| | https://www.eclipse.org/legal/epl-v10.html |
++----------------+-----------------------------------------------------+
+| PostgreSQL | PostgreSQL Licence: |
+| | http://opensource.org/licenses/postgresql |
++----------------+-----------------------------------------------------+
+| MongoDB | GNU AGPL v3.0. |
+| | http://www.fsf.org/licensing/licenses/agpl-3.0.html |
++----------------+-----------------------------------------------------+
+| CoroSync | BSD 2-Clause |
+| | http://opensource.org/licenses/bsd-license.php |
++----------------+-----------------------------------------------------+
+| Pacemaker | GPL v2 |
+| | https://www.gnu.org/licenses/gpl-2.0.html |
++----------------+-----------------------------------------------------+
+| RabbitMQ | Mozilla Public License |
+| | https://www.rabbitmq.com/mpl.html |
++----------------+-----------------------------------------------------+
+| Linux | GPLv3 |
+| | https://www.gnu.org/copyleft/gpl.html |
++----------------+-----------------------------------------------------+
+| Docker | Apache License 2.0 |
+| | https://www.apache.org/licenses/LICENSE-2.0
++----------------+-----------------------------------------------------+
+| Fuel | Apache License 2.0 |
+| | https://www.apache.org/licenses/LICENSE-2.0 |
++----------------+-----------------------------------------------------+
+| OpenJDK/JRE | GPL v2 |
+| | https://www.gnu.org/licenses/gpl-2.0.html |
++----------------+-----------------------------------------------------+
+| Ceph | GPL v2 |
+| | https://www.gnu.org/licenses/gpl-2.0.html |
++----------------+-----------------------------------------------------+
+| Puppet | Apache License 2.0 |
+| | https://www.apache.org/licenses/LICENSE-2.0 |
++----------------+-----------------------------------------------------+
+| Cobbler | GPL v2 |
+| | https://www.gnu.org/licenses/gpl-2.0.html |
++----------------+-----------------------------------------------------+
+| Nailgun | Apache License 2.0 |
+| | https://www.apache.org/licenses/LICENSE-2.0 |
++----------------+-----------------------------------------------------+
+| Astute | Apache License 2.0 |
+| | https://www.apache.org/licenses/LICENSE-2.0 |
++----------------+-----------------------------------------------------+
+
diff --git a/fuel/TODO b/fuel/TODO
index 906dfb5..7aa42d2 100644
--- a/fuel/TODO
+++ b/fuel/TODO
@@ -4,6 +4,7 @@
#########################################################################
Following items needs to be done to achieve an OPNFV/BGS ARNO Fuel Stack:
1) Add support for CentOS 6.5 - REMAINING
-2) Add Autodeployment "deploy.sh" for Jenkins
-3) Dry-run Funktest (Jenkins/Robot/etc.)
-4) Finalize Documentation \ No newline at end of file
+2) Add Local GIT repo mirror
+3) Add Auto-deployment for Linux-Foundation Lab.
+4) Dry-run Funktest (Jenkins/Robot/etc.)
+5) Finalize Documentation \ No newline at end of file
diff --git a/fuel/build/f_ntp/puppet/modules/opnfv/manifests/ntp.pp b/fuel/build/f_ntp/puppet/modules/opnfv/manifests/ntp.pp
index 8180e3d..c5dce1b 100644
--- a/fuel/build/f_ntp/puppet/modules/opnfv/manifests/ntp.pp
+++ b/fuel/build/f_ntp/puppet/modules/opnfv/manifests/ntp.pp
@@ -31,6 +31,15 @@ class opnfv::ntp(
$file='/etc/ntp.conf'
) {
+ case $::operatingsystem {
+ centos, redhat: {
+ $service_name = 'ntpd'
+ }
+ debian, ubuntu: {
+ $service_name = 'ntp'
+ }
+ }
+
if $::fuel_settings['role'] {
if ($::fuel_settings['opnfv'] and
$::fuel_settings['opnfv']['ntp']) {
@@ -63,9 +72,9 @@ class opnfv::ntp(
service { 'ntp':
ensure => running,
+ name => $service_name,
enable => true,
require => [ Package['ntp'], File[$file]]
}
}
}
-
diff --git a/fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp b/fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp
index 922ab41..7370169 100644
--- a/fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp
+++ b/fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp
@@ -1,44 +1,45 @@
class opnfv::odl_docker
{
- case $::fuel_settings['role'] {
- /controller/: {
-
- file { "/opt":
- ensure => "directory",
- }
-
- file { "/opt/opnfv":
- ensure => "directory",
- owner => "root",
- group => "root",
- mode => 777,
- }
-
- file { "/opt/opnfv/odl":
- ensure => "directory",
- }
-
- file { "/opt/opnfv/odl/odl_docker_image.tar":
- ensure => present,
- source => "/etc/puppet/modules/opnfv/odl_docker/odl_docker_image.tar",
- mode => 750,
- }
-
- file { "/opt/opnfv/odl/docker-latest":
- ensure => present,
- source => "/etc/puppet/modules/opnfv/odl_docker/docker-latest",
- mode => 750,
- }
-
- file { "/opt/opnfv/odl/start_odl_conatiner.sh":
- ensure => present,
- source => "/etc/puppet/modules/opnfv/scripts/start_odl_container.sh",
- mode => 750,
- }
-
- # fix failed to find the cgroup root issue
- # https://github.com/docker/docker/issues/8791
- if $::operatingsystem == 'Ubuntu' {
+ case $::fuel_settings['role'] {
+ /controller/: {
+
+ file { '/opt':
+ ensure => 'directory',
+ }
+
+ file { '/opt/opnfv':
+ ensure => 'directory',
+ owner => 'root',
+ group => 'root',
+ mode => 777,
+ }
+
+ file { '/opt/opnfv/odl':
+ ensure => 'directory',
+ }
+
+ file { '/opt/opnfv/odl/odl_docker_image.tar':
+ ensure => present,
+ source => '/etc/puppet/modules/opnfv/odl_docker/odl_docker_image.tar',
+ mode => 750,
+ }
+
+ file { '/opt/opnfv/odl/docker-latest':
+ ensure => present,
+ source => '/etc/puppet/modules/opnfv/odl_docker/docker-latest',
+ mode => 750,
+ }
+
+ file { '/opt/opnfv/odl/start_odl_conatiner.sh':
+ ensure => present,
+ source => '/etc/puppet/modules/opnfv/scripts/start_odl_container.sh',
+ mode => 750,
+ }
+
+ # fix failed to find the cgroup root issue
+ # https://github.com/docker/docker/issues/8791
+ case $::operatingsystem {
+ 'ubuntu': {
package {'cgroup-lite':
ensure => present,
}
@@ -49,6 +50,12 @@ class opnfv::odl_docker
require => Package['cgroup-lite'],
}
}
+ 'centos': {
+ package {'docker-io':
+ ensure => latest,
+ }
+ }
+ }
+ }
}
- }
}
diff --git a/fuel/build/f_resolvconf/puppet/modules/opnfv/manifests/resolver.pp b/fuel/build/f_resolvconf/puppet/modules/opnfv/manifests/resolver.pp
index be4e67d..44f36a2 100644
--- a/fuel/build/f_resolvconf/puppet/modules/opnfv/manifests/resolver.pp
+++ b/fuel/build/f_resolvconf/puppet/modules/opnfv/manifests/resolver.pp
@@ -48,16 +48,26 @@ class opnfv::resolver()
mode => '0644',
content => template('opnfv/resolv.conf.erb'),
}
-# /etc/resolv.conf is re-generated at each boot by resolvconf, so we
-# need to store there as well.
- file { '/etc/resolvconf/resolv.conf.d/head':
+
+ # /etc/resolv.conf is re-generated at each boot by resolvconf, so we
+ # need to store there as well.
+
+ case $::operatingsystem {
+ 'ubuntu': {
+ file { '/etc/resolvconf/resolv.conf.d/head':
owner => root,
group => root,
mode => '0644',
content => template('opnfv/resolv.conf.erb'),
+ }
+ }
+ 'centos': {
+ exec { 'for file in ifcfg-eth*; do grep -q -F "PEERDNS=" $file || echo "PEERDNS=no" >> $file; done ':
+ provider => 'shell',
+ cwd => '/etc/sysconfig/network-scripts',
+ }
+ }
}
}
}
}
-
-
diff --git a/fuel/prototypes/libvirt/README.rst b/fuel/deploy/README.rst
index e0ceb6f..f7b5711 100644
--- a/fuel/prototypes/libvirt/README.rst
+++ b/fuel/deploy/README.rst
@@ -15,18 +15,22 @@ instead.
Pre-condition 1: The host needs to be Ubuntu 14.x
Pre-condition 2: Necessary packages installed by running
-genesis/fuel/prototypes/libvirt/setup_vms/setup-vm-host.sh
+sudo genesis/fuel/prototypes/libvirt/setup_vms/setup-vm-host.sh
Pre-condition 3: Example VM configuration deployed by running
genesis/fuel/prototypes/libvirt/setup_vms/apply_setup.sh The VMs and
networks to be setup are in genesis/fuel/prototypes/libvirt/examples:
"vms" and "networks"
+sudo mkdir /mnt/images
+cd setup-vms
+sudo ./apply_setup.sh /mnt/images 50
In order to run the automated install, it's just a matter of running
genesis/fuel/prototypes/libvirt/deploy.sh <isofile> [<deafile>] The
deafile will be optional, if not specified the example one in
genesis/fuel/prototypes/libvirt/examples/libvirt_dea.yaml will be
used.
+sudo ./deploy.sh ~/ISO/opnfv-P0000.iso ~/DEPLOY/deploy/dea.yaml
Now either this will succeed (return code 0) or fail. I'll have a
three hours safety catch to kill off things if something is hanging,
diff --git a/fuel/deploy/cloud_deploy/__init__.py b/fuel/deploy/cloud_deploy/__init__.py
new file mode 100644
index 0000000..c274feb
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/__init__.py
@@ -0,0 +1 @@
+__author__ = 'eszicse'
diff --git a/fuel/deploy/cloud_deploy/cloud/__init__.py b/fuel/deploy/cloud_deploy/cloud/__init__.py
new file mode 100644
index 0000000..c274feb
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/cloud/__init__.py
@@ -0,0 +1 @@
+__author__ = 'eszicse'
diff --git a/fuel/deploy/cloud_deploy/cloud/common.py b/fuel/deploy/cloud_deploy/cloud/common.py
new file mode 100644
index 0000000..365f6fb
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/cloud/common.py
@@ -0,0 +1,51 @@
+import subprocess
+import sys
+import os
+import logging
+
+N = {'id': 0, 'status': 1, 'name': 2, 'cluster': 3, 'ip': 4, 'mac': 5,
+ 'roles': 6, 'pending_roles': 7, 'online': 8}
+E = {'id': 0, 'status': 1, 'name': 2, 'mode': 3, 'release_id': 4,
+ 'changes': 5, 'pending_release_id': 6}
+R = {'id': 0, 'name': 1, 'state': 2, 'operating_system': 3, 'version': 4}
+RO = {'name': 0, 'conflicts': 1}
+
+LOG = logging.getLogger(__name__)
+LOG.setLevel(logging.DEBUG)
+formatter = logging.Formatter('%(message)s')
+out_handler = logging.StreamHandler(sys.stdout)
+out_handler.setFormatter(formatter)
+LOG.addHandler(out_handler)
+out_handler = logging.FileHandler('autodeploy.log', mode='w')
+out_handler.setFormatter(formatter)
+LOG.addHandler(out_handler)
+
+def exec_cmd(cmd):
+ process = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ shell=True)
+ return process.communicate()[0], process.returncode
+
+def run_proc(cmd):
+ process = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ shell=True)
+ return process
+
+def parse(printout, *args):
+ parsed_list = []
+ lines = printout[0].splitlines()
+ for l in lines[2:]:
+ parsed = [e.strip() for e in l.split('|')]
+ parsed_list.append(parsed)
+ return parsed_list
+
+def err(error_message):
+ LOG.error(error_message)
+ sys.exit(1)
+
+def check_file_exists(file_path):
+ if not os.path.isfile(file_path):
+ err('ERROR: File %s not found\n' % file_path)
diff --git a/fuel/deploy/cloud_deploy/cloud/configure_environment.py b/fuel/deploy/cloud_deploy/cloud/configure_environment.py
new file mode 100644
index 0000000..426bbd1
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/cloud/configure_environment.py
@@ -0,0 +1,74 @@
+import common
+import os
+import shutil
+
+from configure_settings import ConfigureSettings
+from configure_network import ConfigureNetwork
+from configure_nodes import ConfigureNodes
+
+N = common.N
+E = common.E
+R = common.R
+RO = common.RO
+exec_cmd = common.exec_cmd
+parse = common.parse
+err = common.err
+LOG = common.LOG
+
+class ConfigureEnvironment(object):
+
+ def __init__(self, dea, yaml_config_dir, release_id, node_id_roles_dict):
+ self.env_id = None
+ self.dea = dea
+ self.yaml_config_dir = yaml_config_dir
+ self.env_name = dea.get_environment_name()
+ self.release_id = release_id
+ self.node_id_roles_dict = node_id_roles_dict
+ self.required_networks = []
+
+ def env_exists(self, env_name):
+ env_list = parse(exec_cmd('fuel env --list'))
+ for env in env_list:
+ if env[E['name']] == env_name and env[E['status']] == 'new':
+ self.env_id = env[E['id']]
+ return True
+ return False
+
+ def configure_environment(self):
+ LOG.debug('Configure environment\n')
+ if os.path.exists(self.yaml_config_dir):
+ LOG.debug('Deleting existing config directory %s\n'
+ % self.yaml_config_dir)
+ shutil.rmtree(self.yaml_config_dir)
+ LOG.debug('Creating new config directory %s\n' % self.yaml_config_dir)
+ os.makedirs(self.yaml_config_dir)
+
+ LOG.debug('Creating environment %s release %s, mode ha, network-mode '
+ 'neutron, net-segment-type vlan\n'
+ % (self.env_name, self.release_id))
+ exec_cmd('fuel env create --name %s --release %s --mode ha '
+ '--network-mode neutron --net-segment-type vlan'
+ % (self.env_name, self.release_id))
+
+ if not self.env_exists(self.env_name):
+ err("Failed to create environment %s\n" % self.env_name)
+ self.config_settings()
+ self.config_network()
+ self.config_nodes()
+
+ def config_settings(self):
+ settings = ConfigureSettings(self.yaml_config_dir, self.env_id,
+ self.dea)
+ settings.config_settings()
+
+ def config_network(self):
+ network = ConfigureNetwork(self.yaml_config_dir, self.env_id, self.dea)
+ network.config_network()
+
+ def config_nodes(self):
+ nodes = ConfigureNodes(self.yaml_config_dir, self.env_id,
+ self.node_id_roles_dict, self.dea)
+ nodes.config_nodes()
+
+
+
diff --git a/fuel/deploy/cloud_deploy/cloud/configure_network.py b/fuel/deploy/cloud_deploy/cloud/configure_network.py
new file mode 100644
index 0000000..f4d6f87
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/cloud/configure_network.py
@@ -0,0 +1,62 @@
+import common
+import yaml
+import io
+
+N = common.N
+E = common.E
+R = common.R
+RO = common.RO
+exec_cmd = common.exec_cmd
+parse = common.parse
+err = common.err
+check_file_exists = common.check_file_exists
+LOG = common.LOG
+
+class ConfigureNetwork(object):
+
+ def __init__(self, yaml_config_dir, env_id, dea):
+ self.yaml_config_dir = yaml_config_dir
+ self.env_id = env_id
+ self.dea = dea
+ self.required_networks = []
+
+ def download_network_config(self):
+ LOG.debug('Download network config for environment %s\n' % self.env_id)
+ exec_cmd('fuel network --env %s --download --dir %s'
+ % (self.env_id, self.yaml_config_dir))
+
+ def upload_network_config(self):
+ LOG.debug('Upload network config for environment %s\n' % self.env_id)
+ exec_cmd('fuel network --env %s --upload --dir %s'
+ % (self.env_id, self.yaml_config_dir))
+
+ def config_network(self):
+ LOG.debug('Configure network\n')
+ self.download_network_config()
+ self.modify_network_config()
+ self.upload_network_config()
+
+ def modify_network_config(self):
+ LOG.debug('Modify network config for environment %s\n' % self.env_id)
+ network_yaml = (self.yaml_config_dir + '/network_%s.yaml'
+ % self.env_id)
+ check_file_exists(network_yaml)
+
+ network_config = self.dea.get_networks()
+
+
+ with io.open(network_yaml) as stream:
+ network = yaml.load(stream)
+
+ net_names = self.dea.get_network_names()
+ net_id = {}
+ for net in network['networks']:
+ if net['name'] in net_names:
+ net_id[net['name']] = {'id': net['id'],
+ 'group_id': net['group_id']}
+
+ for network in network_config['networks']:
+ network.update(net_id[network['name']])
+
+ with io.open(network_yaml, 'w') as stream:
+ yaml.dump(network_config, stream, default_flow_style=False) \ No newline at end of file
diff --git a/fuel/deploy/cloud_deploy/cloud/configure_nodes.py b/fuel/deploy/cloud_deploy/cloud/configure_nodes.py
new file mode 100644
index 0000000..a5e24a8
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/cloud/configure_nodes.py
@@ -0,0 +1,108 @@
+import common
+import yaml
+import io
+import glob
+
+N = common.N
+E = common.E
+R = common.R
+RO = common.RO
+exec_cmd = common.exec_cmd
+parse = common.parse
+err = common.err
+check_file_exists = common.check_file_exists
+LOG = common.LOG
+
+
+class ConfigureNodes(object):
+
+ def __init__(self, yaml_config_dir, env_id, node_id_roles_dict, dea):
+ self.yaml_config_dir = yaml_config_dir
+ self.env_id = env_id
+ self.node_id_roles_dict = node_id_roles_dict
+ self.dea = dea
+
+ def config_nodes(self):
+ LOG.debug('Configure nodes\n')
+ for node_id, roles_shelf_blade in self.node_id_roles_dict.iteritems():
+ exec_cmd('fuel node set --node-id %s --role %s --env %s'
+ % (node_id, ','.join(roles_shelf_blade[0]), self.env_id))
+
+ self.download_deployment_config()
+ self.modify_node_network_schemes()
+ self.upload_deployment_config()
+
+ for node_id, roles_shelf_blade in self.node_id_roles_dict.iteritems():
+ self.download_interface_config(node_id)
+ self.modify_node_interface(node_id)
+ self.upload_interface_config(node_id)
+
+ def modify_node_network_schemes(self):
+ LOG.debug('Modify node network schemes in environment %s\n' % self.env_id)
+ for node_file in glob.glob('%s/deployment_%s/*.yaml'
+ % (self.yaml_config_dir, self.env_id)):
+ check_file_exists(node_file)
+
+ if 'compute' in node_file:
+ node_type = 'compute'
+ else:
+ node_type = 'controller'
+
+ network_scheme = self.dea.get_network_scheme(node_type)
+
+ with io.open(node_file) as stream:
+ node = yaml.load(stream)
+
+ node['network_scheme']['transformations'] = network_scheme
+
+ with io.open(node_file, 'w') as stream:
+ yaml.dump(node, stream, default_flow_style=False)
+
+
+ def download_deployment_config(self):
+ LOG.debug('Download deployment config for environment %s\n' % self.env_id)
+ r, c = exec_cmd('fuel deployment --env %s --default --dir %s'
+ % (self.env_id, self.yaml_config_dir))
+
+ def upload_deployment_config(self):
+ LOG.debug('Upload deployment config for environment %s\n' % self.env_id)
+ r, c = exec_cmd('fuel deployment --env %s --upload --dir %s'
+ % (self.env_id, self.yaml_config_dir))
+
+ def download_interface_config(self, node_id):
+ LOG.debug('Download interface config for node %s\n' % node_id)
+ r, c = exec_cmd('fuel node --env %s --node %s --network --download '
+ '--dir %s' % (self.env_id, node_id,
+ self.yaml_config_dir))
+
+ def upload_interface_config(self, node_id):
+ LOG.debug('Upload interface config for node %s\n' % node_id)
+ r, c = exec_cmd('fuel node --env %s --node %s --network --upload '
+ '--dir %s' % (self.env_id, node_id,
+ self.yaml_config_dir))
+
+ def modify_node_interface(self, node_id):
+ LOG.debug('Modify interface config for node %s\n' % node_id)
+ interface_yaml = (self.yaml_config_dir + '/node_%s/interfaces.yaml'
+ % node_id)
+
+ with io.open(interface_yaml) as stream:
+ interfaces = yaml.load(stream)
+
+ net_name_id = {}
+ for interface in interfaces:
+ for network in interface['assigned_networks']:
+ net_name_id[network['name']] = network['id']
+
+ interface_config = self.dea.get_interfaces()
+
+ for interface in interfaces:
+ interface['assigned_networks'] = []
+ for net_name in interface_config[interface['name']]:
+ net = {}
+ net['id'] = net_name_id[net_name]
+ net['name'] = net_name
+ interface['assigned_networks'].append(net)
+
+ with io.open(interface_yaml, 'w') as stream:
+ yaml.dump(interfaces, stream, default_flow_style=False) \ No newline at end of file
diff --git a/fuel/deploy/cloud_deploy/cloud/configure_settings.py b/fuel/deploy/cloud_deploy/cloud/configure_settings.py
new file mode 100644
index 0000000..3a3e4d5
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/cloud/configure_settings.py
@@ -0,0 +1,47 @@
+import common
+import yaml
+import io
+
+N = common.N
+E = common.E
+R = common.R
+RO = common.RO
+exec_cmd = common.exec_cmd
+parse = common.parse
+err = common.err
+check_file_exists = common.check_file_exists
+LOG = common.LOG
+
+class ConfigureSettings(object):
+
+ def __init__(self, yaml_config_dir, env_id, dea):
+ self.yaml_config_dir = yaml_config_dir
+ self.env_id = env_id
+ self.dea = dea
+
+ def download_settings(self):
+ LOG.debug('Download settings for environment %s\n' % self.env_id)
+ r, c = exec_cmd('fuel settings --env %s --download --dir %s'
+ % (self.env_id, self.yaml_config_dir))
+
+ def upload_settings(self):
+ LOG.debug('Upload settings for environment %s\n' % self.env_id)
+ r, c = exec_cmd('fuel settings --env %s --upload --dir %s'
+ % (self.env_id, self.yaml_config_dir))
+
+ def config_settings(self):
+ LOG.debug('Configure settings\n')
+ self.download_settings()
+ self.modify_settings()
+ self.upload_settings()
+
+ def modify_settings(self):
+ LOG.debug('Modify settings for environment %s\n' % self.env_id)
+ settings_yaml = (self.yaml_config_dir + '/settings_%s.yaml'
+ % self.env_id)
+ check_file_exists(settings_yaml)
+
+ settings = self.dea.get_settings()
+
+ with io.open(settings_yaml, 'w') as stream:
+ yaml.dump(settings, stream, default_flow_style=False)
diff --git a/fuel/deploy/dea.py b/fuel/deploy/cloud_deploy/cloud/dea.py
index 5f306a2..295636a 100644
--- a/fuel/deploy/dea.py
+++ b/fuel/deploy/cloud_deploy/cloud/dea.py
@@ -7,13 +7,15 @@ class DeploymentEnvironmentAdapter(object):
self.blade_ids_per_shelves = {}
self.blades_per_shelves = {}
self.shelf_ids = []
- self.networks = {}
+ self.info_per_shelves = {}
+ self.network_names = []
def parse_yaml(self, yaml_path):
with io.open(yaml_path) as yaml_file:
self.dea_struct = yaml.load(yaml_file)
self.collect_shelf_and_blade_info()
- self.collect_network_info()
+ self.collect_shelf_info()
+ self.collect_network_names()
def get_no_of_blades(self):
no_of_blades = 0
@@ -21,14 +23,16 @@ class DeploymentEnvironmentAdapter(object):
no_of_blades += len(shelf['blade'])
return no_of_blades
- def get_server_type(self):
- return self.dea_struct['server']['type']
+ def collect_shelf_info(self):
+ self.info_per_shelves = {}
+ for shelf in self.dea_struct['shelf']:
+ self.info_per_shelves[shelf['id']] = shelf
- def get_server_info(self):
- return (self.dea_struct['server']['type'],
- self.dea_struct['server']['mgmt_ip'],
- self.dea_struct['server']['username'],
- self.dea_struct['server']['password'])
+ def get_shelf_info(self, shelf):
+ return (self.info_per_shelves[shelf]['type'],
+ self.info_per_shelves[shelf]['mgmt_ip'],
+ self.info_per_shelves[shelf]['username'],
+ self.info_per_shelves[shelf]['password'])
def get_environment_name(self):
return self.dea_struct['name']
@@ -54,19 +58,29 @@ class DeploymentEnvironmentAdapter(object):
blade_ids.append(blade['id'])
blades[blade['id']] = blade
- def is_controller(self, shelf_id, blade_id):
- blade = self.blades[shelf_id][blade_id]
- return (True if 'role' in blade and blade['role'] == 'controller'
+ def has_role(self, role, shelf, blade):
+ blade = self.blades_per_shelves[shelf][blade]
+ if role == 'compute':
+ return True if 'roles' not in blade else False
+ return (True if 'roles' in blade and role in blade['roles']
else False)
- def is_compute_host(self, shelf_id, blade_id):
- blade = self.blades[shelf_id][blade_id]
- return True if 'role' not in blade else False
-
- def collect_network_info(self):
- self.networks = {}
- for network in self.dea_struct['network']:
- self.networks[network['name']] = network
+ def collect_network_names(self):
+ self.network_names = []
+ for network in self.dea_struct['networks']['networks']:
+ self.network_names.append(network['name'])
def get_networks(self):
- return self.networks \ No newline at end of file
+ return self.dea_struct['networks']
+
+ def get_network_names(self):
+ return self.network_names
+
+ def get_settings(self):
+ return self.dea_struct['settings']
+
+ def get_network_scheme(self, node_type):
+ return self.dea_struct[node_type]
+
+ def get_interfaces(self):
+ return self.dea_struct['interfaces'] \ No newline at end of file
diff --git a/fuel/deploy/cloud_deploy/cloud/deploy.py b/fuel/deploy/cloud_deploy/cloud/deploy.py
new file mode 100644
index 0000000..ea33f8b
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/cloud/deploy.py
@@ -0,0 +1,208 @@
+import time
+import yaml
+import io
+import os
+
+import common
+from dea import DeploymentEnvironmentAdapter
+from configure_environment import ConfigureEnvironment
+from deployment import Deployment
+
+SUPPORTED_RELEASE = 'Juno on CentOS 6.5'
+
+N = common.N
+E = common.E
+R = common.R
+RO = common.RO
+exec_cmd = common.exec_cmd
+parse = common.parse
+err = common.err
+check_file_exists = common.check_file_exists
+LOG = common.LOG
+
+class Deploy(object):
+
+ def __init__(self, yaml_config_dir):
+ self.supported_release = None
+ self.yaml_config_dir = yaml_config_dir
+ self.macs_per_shelf_dict = {}
+ self.node_ids_dict = {}
+ self.node_id_roles_dict = {}
+ self.env_id = None
+ self.shelf_blades_dict = {}
+
+ def cleanup_fuel_environments(self, env_list):
+ WAIT_LOOP = 60
+ SLEEP_TIME = 10
+ for env in env_list:
+ LOG.debug('Deleting environment %s\n' % env[E['id']])
+ exec_cmd('fuel env --env %s --delete' % env[E['id']])
+ all_env_erased = False
+ for i in range(WAIT_LOOP):
+ env_list = parse(exec_cmd('fuel env list'))
+ if env_list[0][0]:
+ time.sleep(SLEEP_TIME)
+ else:
+ all_env_erased = True
+ break
+ if not all_env_erased:
+ err('Could not erase these environments %s'
+ % [(env[E['id']], env[E['status']]) for env in env_list])
+
+ def cleanup_fuel_nodes(self, node_list):
+ for node in node_list:
+ if node[N['status']] == 'discover':
+ LOG.debug('Deleting node %s\n' % node[N['id']])
+ exec_cmd('fuel node --node-id %s --delete-from-db'
+ % node[N['id']])
+ exec_cmd('cobbler system remove --name node-%s'
+ % node[N['id']])
+
+ def check_previous_installation(self):
+ LOG.debug('Check previous installation\n')
+ env_list = parse(exec_cmd('fuel env list'))
+ if env_list[0][0]:
+ self.cleanup_fuel_environments(env_list)
+ node_list = parse(exec_cmd('fuel node list'))
+ if node_list[0][0]:
+ self.cleanup_fuel_nodes(node_list)
+
+ def check_supported_release(self):
+ LOG.debug('Check supported release: %s\n' % SUPPORTED_RELEASE)
+ release_list = parse(exec_cmd('fuel release -l'))
+ for release in release_list:
+ if release[R['name']] == SUPPORTED_RELEASE:
+ self.supported_release = release
+ break
+ if not self.supported_release:
+ err('This Fuel does not contain the following '
+ 'release: %s\n' % SUPPORTED_RELEASE)
+
+ def check_prerequisites(self):
+ LOG.debug('Check prerequisites\n')
+ self.check_supported_release()
+ self.check_previous_installation()
+
+ def find_mac_in_dict(self, mac):
+ for shelf, blade_dict in self.macs_per_shelf_dict.iteritems():
+ for blade, mac_list in blade_dict.iteritems():
+ if mac in mac_list:
+ return shelf, blade
+
+ def all_blades_discovered(self):
+ for shelf, blade_dict in self.node_ids_dict.iteritems():
+ for blade, node_id in blade_dict.iteritems():
+ if not node_id:
+ return False
+ return True
+
+ def not_discovered_blades_summary(self):
+ summary = ''
+ for shelf, blade_dict in self.node_ids_dict.iteritems():
+ for blade, node_id in blade_dict.iteritems():
+ if not node_id:
+ summary += '[shelf %s, blade %s]\n' % (shelf, blade)
+ return summary
+
+ def collect_blade_ids_per_shelves(self, dea):
+ self.shelf_blades_dict = dea.get_blade_ids_per_shelves()
+
+ def node_discovery(self, node_list, discovered_macs):
+ for node in node_list:
+ if (node[N['status']] == 'discover' and
+ node[N['online']] == 'True' and
+ node[N['mac']] not in discovered_macs):
+ discovered_macs.append(node[N['mac']])
+ shelf_blade = self.find_mac_in_dict(node[N['mac']])
+ if shelf_blade:
+ self.node_ids_dict[shelf_blade[0]][shelf_blade[1]] = \
+ node[N['id']]
+
+ def discovery_waiting_loop(self, discovered_macs):
+ WAIT_LOOP = 180
+ SLEEP_TIME = 10
+ all_discovered = False
+ for i in range(WAIT_LOOP):
+ node_list = parse(exec_cmd('fuel node list'))
+ if node_list[0][0]:
+ self.node_discovery(node_list, discovered_macs)
+ if self.all_blades_discovered():
+ all_discovered = True
+ break
+ else:
+ time.sleep(SLEEP_TIME)
+ return all_discovered
+
+ def wait_for_discovered_blades(self):
+ LOG.debug('Wait for discovered blades\n')
+ discovered_macs = []
+ for shelf, blade_list in self.shelf_blades_dict.iteritems():
+ self.node_ids_dict[shelf] = {}
+ for blade in blade_list:
+ self.node_ids_dict[shelf][blade] = None
+ all_discovered = self.discovery_waiting_loop(discovered_macs)
+ if not all_discovered:
+ err('Not all blades have been discovered: %s\n'
+ % self.not_discovered_blades_summary())
+
+ def get_mac_addresses(self, macs_yaml):
+ with io.open(macs_yaml, 'r') as stream:
+ self.macs_per_shelf_dict = yaml.load(stream)
+
+ def assign_roles_to_cluster_node_ids(self, dea):
+ self.node_id_roles_dict = {}
+ for shelf, blades_dict in self.node_ids_dict.iteritems():
+ for blade, node_id in blades_dict.iteritems():
+ role_list = []
+ if dea.has_role('controller', shelf, blade):
+ role_list.extend(['controller', 'mongo'])
+ if dea.has_role('cinder', shelf, blade):
+ role_list.extend(['cinder'])
+ elif dea.has_role('compute', shelf, blade):
+ role_list.extend(['compute'])
+ self.node_id_roles_dict[node_id] = (role_list, shelf, blade)
+
+ def configure_environment(self, dea):
+ config_env = ConfigureEnvironment(dea, self.yaml_config_dir,
+ self.supported_release[R['id']],
+ self.node_id_roles_dict)
+ config_env.configure_environment()
+ self.env_id = config_env.env_id
+
+ def deploy(self, dea):
+ dep = Deployment(dea, self.yaml_config_dir, self.env_id,
+ self.node_id_roles_dict)
+ dep.deploy()
+
+
+def main():
+
+ base_dir = os.path.dirname(os.path.realpath(__file__))
+ dea_yaml = base_dir + '/dea.yaml'
+ check_file_exists(dea_yaml)
+ macs_yaml = base_dir + '/macs.yaml'
+ check_file_exists(macs_yaml)
+
+ yaml_config_dir = '/var/lib/opnfv/pre_deploy'
+
+ deploy = Deploy(yaml_config_dir)
+ dea = DeploymentEnvironmentAdapter()
+ dea.parse_yaml(dea_yaml)
+
+ deploy.get_mac_addresses(macs_yaml)
+
+ deploy.collect_blade_ids_per_shelves(dea)
+
+ deploy.check_prerequisites()
+
+ deploy.wait_for_discovered_blades()
+
+ deploy.assign_roles_to_cluster_node_ids(dea)
+
+ deploy.configure_environment(dea)
+
+ deploy.deploy(dea)
+
+
+if __name__ == '__main__':
+ main() \ No newline at end of file
diff --git a/fuel/deploy/cloud_deploy/cloud/deployment.py b/fuel/deploy/cloud_deploy/cloud/deployment.py
new file mode 100644
index 0000000..831059b
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/cloud/deployment.py
@@ -0,0 +1,100 @@
+import common
+import os
+import shutil
+import glob
+import yaml
+import io
+import time
+
+N = common.N
+E = common.E
+R = common.R
+RO = common.RO
+exec_cmd = common.exec_cmd
+run_proc = common.run_proc
+parse = common.parse
+err = common.err
+LOG = common.LOG
+
+
+class Deployment(object):
+
+ def __init__(self, dea, yaml_config_dir, env_id, node_id_roles_dict):
+ self.dea = dea
+ self.env_name = dea.get_environment_name()
+ self.yaml_config_dir = yaml_config_dir
+ self.env_id = env_id
+ self.node_id_roles_dict = node_id_roles_dict
+ self.node_id_list = []
+ for node_id in self.node_id_roles_dict.iterkeys():
+ self.node_id_list.append(node_id)
+ self.node_id_list.sort()
+
+ def download_deployment_info(self):
+ LOG.debug('Download deployment info for environment %s\n' % self.env_id)
+ deployment_dir = self.yaml_config_dir + '/deployment_%s' % self.env_id
+ if os.path.exists(deployment_dir):
+ shutil.rmtree(deployment_dir)
+ r, c = exec_cmd('fuel --env %s deployment --default --dir %s'
+ % (self.env_id, self.yaml_config_dir))
+ if c > 0:
+ err('Error: Could not download deployment info for env %s,'
+ ' reason: %s\n' % (self.env_id, r))
+
+ def upload_deployment_info(self):
+ LOG.debug('Upload deployment info for environment %s\n' % self.env_id)
+ r, c = exec_cmd('fuel --env %s deployment --upload --dir %s'
+ % (self.env_id, self.yaml_config_dir))
+ if c > 0:
+ err('Error: Could not upload deployment info for env %s,'
+ ' reason: %s\n' % (self.env_id, r))
+
+ def pre_deploy(self):
+ LOG.debug('Running pre-deploy on environment %s\n' % self.env_name)
+ self.download_deployment_info()
+ opnfv = {'opnfv': {}}
+
+ for node_file in glob.glob('%s/deployment_%s/*.yaml'
+ % (self.yaml_config_dir, self.env_id)):
+ with io.open(node_file) as stream:
+ node = yaml.load(stream)
+
+ if 'opnfv' not in node:
+ node.update(opnfv)
+
+ with io.open(node_file, 'w') as stream:
+ yaml.dump(node, stream, default_flow_style=False)
+ self.upload_deployment_info()
+
+
+ def deploy(self):
+ WAIT_LOOP = 180
+ SLEEP_TIME = 60
+
+ self.pre_deploy()
+
+ log_file = 'cloud.log'
+
+ LOG.debug('Starting deployment of environment %s\n' % self.env_name)
+ run_proc('fuel --env %s deploy-changes | strings | tee %s'
+ % (self.env_id, log_file))
+
+ ready = False
+ for i in range(WAIT_LOOP):
+ env = parse(exec_cmd('fuel env --env %s' % self.env_id))
+ LOG.debug('Environment status: %s\n' % env[0][E['status']])
+ r, _ = exec_cmd('tail -2 %s | head -1' % log_file)
+ if r:
+ LOG.debug('%s\n' % r)
+ if env[0][E['status']] == 'operational':
+ ready = True
+ break
+ else:
+ time.sleep(SLEEP_TIME)
+ exec_cmd('rm %s' % log_file)
+
+ if ready:
+ LOG.debug('Environment %s successfully deployed\n' % self.env_name)
+ else:
+ err('Deployment failed, environment %s is not operational\n'
+ % self.env_name)
diff --git a/fuel/deploy/cloud_deploy/cloud_deploy.py b/fuel/deploy/cloud_deploy/cloud_deploy.py
new file mode 100644
index 0000000..4197519
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/cloud_deploy.py
@@ -0,0 +1,117 @@
+import os
+import io
+import yaml
+
+from cloud import common
+from cloud.dea import DeploymentEnvironmentAdapter
+from hardware_adapters.dha import DeploymentHardwareAdapter
+from ssh_client import SSHClient
+
+exec_cmd = common.exec_cmd
+err = common.err
+check_file_exists = common.check_file_exists
+LOG = common.LOG
+
+class CloudDeploy(object):
+
+ def __init__(self, fuel_ip, fuel_username, fuel_password):
+ self.fuel_ip = fuel_ip
+ self.fuel_username = fuel_username
+ self.fuel_password = fuel_password
+ self.shelf_blades_dict = {}
+ self.macs_per_shelf_dict = {}
+
+ def copy_to_fuel_master(self, dir_path=None, file_path=None, target='~'):
+ if dir_path:
+ path = '-r ' + dir_path
+ elif file_path:
+ path = file_path
+ LOG.debug('Copying %s to Fuel Master %s' % (path, target))
+ if path:
+ exec_cmd('sshpass -p %s scp -o UserKnownHostsFile=/dev/null'
+ ' -o StrictHostKeyChecking=no -o ConnectTimeout=15'
+ ' %s %s@%s:%s'
+ % (self.fuel_password, path, self.fuel_username,
+ self.fuel_ip, target))
+
+ def run_cloud_deploy(self, deploy_dir, deploy_app):
+ LOG.debug('START CLOUD DEPLOYMENT')
+ ssh = SSHClient(self.fuel_ip, self.fuel_username, self.fuel_password)
+ ssh.open()
+ ssh.run('python %s/%s' % (deploy_dir, deploy_app))
+ ssh.close()
+
+ def power_off_blades(self, dea):
+ for shelf, blade_list in self.shelf_blades_dict.iteritems():
+ type, mgmt_ip, username, password = dea.get_shelf_info(shelf)
+ dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password)
+ dha.power_off_blades(shelf, blade_list)
+
+ def power_on_blades(self, dea):
+ for shelf, blade_list in self.shelf_blades_dict.iteritems():
+ type, mgmt_ip, username, password = dea.get_shelf_info(shelf)
+ dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password)
+ dha.power_on_blades(shelf, blade_list)
+
+ def set_boot_order(self, dea):
+ for shelf, blade_list in self.shelf_blades_dict.iteritems():
+ type, mgmt_ip, username, password = dea.get_shelf_info(shelf)
+ dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password)
+ dha.set_boot_order_blades(shelf, blade_list)
+
+ def get_mac_addresses(self, dea, macs_yaml):
+ self.macs_per_shelf_dict = {}
+ for shelf, blade_list in self.shelf_blades_dict.iteritems():
+ type, mgmt_ip, username, password = dea.get_shelf_info(shelf)
+ dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password)
+ self.macs_per_shelf_dict[shelf] = dha.get_blades_mac_addresses(
+ shelf, blade_list)
+
+ with io.open(macs_yaml, 'w') as stream:
+ yaml.dump(self.macs_per_shelf_dict, stream,
+ default_flow_style=False)
+
+ def collect_blade_ids_per_shelves(self, dea):
+ self.shelf_blades_dict = dea.get_blade_ids_per_shelves()
+
+
+
+def main():
+
+ fuel_ip = '10.20.0.2'
+ fuel_username = 'root'
+ fuel_password = 'r00tme'
+ deploy_dir = '~/cloud'
+
+ cloud = CloudDeploy(fuel_ip, fuel_username, fuel_password)
+
+ base_dir = os.path.dirname(os.path.realpath(__file__))
+ deployment_dir = base_dir + '/cloud'
+ macs_yaml = base_dir + '/macs.yaml'
+ dea_yaml = base_dir + '/dea.yaml'
+ check_file_exists(dea_yaml)
+
+ cloud.copy_to_fuel_master(dir_path=deployment_dir)
+ cloud.copy_to_fuel_master(file_path=dea_yaml, target=deploy_dir)
+
+ dea = DeploymentEnvironmentAdapter()
+ dea.parse_yaml(dea_yaml)
+
+ cloud.collect_blade_ids_per_shelves(dea)
+
+ cloud.power_off_blades(dea)
+
+ cloud.set_boot_order(dea)
+
+ cloud.power_on_blades(dea)
+
+ cloud.get_mac_addresses(dea, macs_yaml)
+ check_file_exists(dea_yaml)
+
+ cloud.copy_to_fuel_master(file_path=macs_yaml, target=deploy_dir)
+
+ cloud.run_cloud_deploy(deploy_dir, 'deploy.py')
+
+
+if __name__ == '__main__':
+ main() \ No newline at end of file
diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/__init__.py
new file mode 100644
index 0000000..c274feb
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/hardware_adapters/__init__.py
@@ -0,0 +1 @@
+__author__ = 'eszicse'
diff --git a/fuel/deploy/dha.py b/fuel/deploy/cloud_deploy/hardware_adapters/dha.py
index 87ac6e2..2764aeb 100644
--- a/fuel/deploy/dha.py
+++ b/fuel/deploy/cloud_deploy/hardware_adapters/dha.py
@@ -1,4 +1,5 @@
-from hardware_adapters.hp.hp_adapter import HpAdapter
+from hp.hp_adapter import HpAdapter
+from libvirt.libvirt_adapter import LibvirtAdapter
class DeploymentHardwareAdapter(object):
def __new__(cls, server_type, *args):
@@ -55,8 +56,6 @@ class EsxiAdapter(HardwareAdapter):
def get_blade_mac_addresses(self, shelf, blade):
return self.environment[shelf][blade]['mac']
-class LibvirtAdapter(HardwareAdapter):
- pass
class DellAdapter(HardwareAdapter):
pass
diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py
new file mode 100644
index 0000000..c274feb
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py
@@ -0,0 +1 @@
+__author__ = 'eszicse'
diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py b/fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py
new file mode 100644
index 0000000..930d234
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py
@@ -0,0 +1,288 @@
+import re
+import time
+from netaddr import EUI, mac_unix
+from cloud import common
+from ssh_client import SSHClient
+
+LOG = common.LOG
+err = common.err
+
+S = {'bay': 0, 'ilo_name': 1, 'ilo_ip': 2, 'status': 3, 'power': 4,
+ 'uid_partner': 5}
+
+class HpAdapter(object):
+
+ def __init__(self, mgmt_ip, username, password):
+ self.mgmt_ip = mgmt_ip
+ self.username = username
+ self.password = password
+
+ class mac_dhcp(mac_unix):
+ word_fmt = '%.2x'
+
+ def next_ip(self):
+ digit_list = self.mgmt_ip.split('.')
+ digit_list[3] = str(int(digit_list[3]) + 1)
+ self.mgmt_ip = '.'.join(digit_list)
+
+ def connect(self):
+ verified_ips = [self.mgmt_ip]
+ ssh = SSHClient(self.mgmt_ip, self.username, self.password)
+ try:
+ ssh.open()
+ except Exception:
+ self.next_ip()
+ verified_ips.append(self.mgmt_ip)
+ ssh = SSHClient(self.mgmt_ip, self.username, self.password)
+ try:
+ ssh.open()
+ except Exception as e:
+ err('Could not connect to HP Onboard Administrator through '
+ 'these IPs: %s, reason: %s' % (verified_ips, e))
+
+ lines = self.clean_lines(ssh.execute('show oa status'))
+ for line in lines:
+ if 'Role: Standby' in line:
+ ssh.close()
+ if self.mgmt_ip != verified_ips[0]:
+ err('Can only talk to OA %s which is the standby OA\n'
+ % self.mgmt_ip)
+ else:
+ LOG.debug('%s is the standby OA, trying next OA\n'
+ % self.mgmt_ip)
+ self.next_ip()
+ verified_ips.append(self.mgmt_ip)
+ ssh = SSHClient(self.mgmt_ip, self.username, self.password)
+ try:
+ ssh.open()
+ except Exception as e:
+ err('Could not connect to HP Onboard Administrator'
+ ' through these IPs: %s, reason: %s'
+ % (verified_ips, e))
+
+ elif 'Role: Active' in line:
+ return ssh
+ err('Could not reach Active OA through these IPs %s' % verified_ips)
+
+ def get_blades_mac_addresses(self, shelf, blade_list):
+ macs_per_blade_dict = {}
+ LOG.debug('Getting MAC addresses for shelf %s, blades %s'
+ % (shelf, blade_list))
+ ssh = self.connect()
+ for blade in blade_list:
+ lines = self.clean_lines(
+ ssh.execute('show server info %s' % blade))
+ left, right = self.find_mac(lines, shelf, blade)
+
+ left = EUI(left, dialect=self.mac_dhcp)
+ right = EUI(right, dialect=self.mac_dhcp)
+ macs_per_blade_dict[blade] = [str(left), str(right)]
+ ssh.close()
+ return macs_per_blade_dict
+
+ def find_mac(self, printout, shelf, blade):
+ left = False
+ right = False
+ for line in printout:
+ if ('No Server Blade Installed' in line or
+ 'Invalid Arguments' in line):
+ err('Blade %d in shelf %d does not exist' % (blade, shelf))
+
+ seobj = re.search(r'LOM1:1-a\s+([0-9A-F:]+)', line, re.I)
+ if seobj:
+ left = seobj.group(1)
+ else:
+ seobj = re.search(r'LOM1:2-a\s+([0-9A-F:]+)', line, re.I)
+ if seobj:
+ right = seobj.group(1)
+ if left and right:
+ return left, right
+
+ def get_hardware_info(self, shelf, blade=None):
+ ssh = self.connect()
+ if ssh and not blade:
+ ssh.close()
+ return 'HP'
+
+ lines = self.clean_lines(ssh.execute('show server info %s' % blade))
+ ssh.close()
+
+ match = r'Product Name:\s+(.+)\Z'
+ if not re.search(match, str(lines[:])):
+ LOG.debug('Blade %s in shelf %s does not exist\n' % (blade, shelf))
+ return False
+
+ for line in lines:
+ seobj = re.search(match, line)
+ if seobj:
+ return 'HP %s' % seobj.group(1)
+ return False
+
+ def power_off_blades(self, shelf, blade_list):
+ return self.set_state(shelf, 'locked', blade_list)
+
+ def power_on_blades(self, shelf, blade_list):
+ return self.set_state(shelf, 'unlocked', blade_list)
+
+ def set_boot_order_blades(self, shelf, blade_list):
+ return self.set_boot_order(shelf, blade_list=blade_list)
+
+ def parse(self, lines):
+ parsed_list = []
+ for l in lines[5:-2]:
+ parsed = []
+ cluttered = [e.strip() for e in l.split(' ')]
+ for p in cluttered:
+ if p:
+ parsed.append(p)
+ parsed_list.append(parsed)
+ return parsed_list
+
+ def set_state(self, shelf, state, blade_list):
+ if state not in ['locked', 'unlocked']:
+ LOG.debug('Incorrect state: %s' % state)
+ return None
+
+ LOG.debug('Setting state %s for blades %s in shelf %s'
+ % (state, blade_list, shelf))
+
+ blade_list = sorted(blade_list)
+ ssh = self.connect()
+
+ LOG.debug('Check if blades are present')
+ server_list = self.parse(
+ self.clean_lines(ssh.execute('show server list')))
+
+ for blade in blade_list:
+ if server_list[S['status']] == 'Absent':
+ LOG.debug('Blade %s in shelf %s is missing. '
+ 'Set state %s not performed\n'
+ % (blade, shelf, state))
+ blade_list.remove(blade)
+
+ bladelist = ','.join(blade_list)
+
+ # Use leading upper case on On/Off so it can be reused in match
+ force = ''
+ if state == 'locked':
+ powerstate = 'Off'
+ force = 'force'
+ else:
+ powerstate = 'On'
+ cmd = 'power%s server %s' % (powerstate, bladelist)
+ if force:
+ cmd += ' %s' % force
+
+ LOG.debug(cmd)
+ ssh.execute(cmd)
+
+ # Check that all blades reach the state which can take some time,
+ # so re-try a couple of times
+ LOG.debug('Check if state %s successfully set' % state)
+
+ WAIT_LOOP = 2
+ SLEEP_TIME = 3
+
+ set_blades = []
+
+ for i in range(WAIT_LOOP):
+ server_list = self.parse(
+ self.clean_lines(ssh.execute('show server list')))
+
+ for blade in blade_list:
+ for server in server_list:
+ if (server[S['bay']] == blade and
+ server[S['power']] == powerstate):
+ set_blades.append(blade)
+ break
+
+ all_set = set(blade_list) == set(set_blades)
+ if all_set:
+ break
+ else:
+ time.sleep(SLEEP_TIME)
+
+ ssh.close()
+
+ if all_set:
+ LOG.debug('State %s successfully set on blades %s in shelf %d'
+ % (state, set_blades, shelf))
+ return True
+ else:
+ LOG.debug('Could not set state %s on blades %s in shelf %s\n'
+ % (state, set(blade_list) - set(set_blades), shelf))
+ return False
+
+
+ def clean_lines(self, printout):
+ lines = []
+ for p in [l.strip() for l in printout.splitlines()]:
+ if p:
+ lines.append(p)
+ return lines
+
+
+ def set_boot_order_blades(self, shelf, blade_list, boot_dev_list=None):
+
+ boot_dict = {'Hard Drive': 'hdd',
+ 'PXE NIC': 'pxe',
+ 'CD-ROM': 'cd',
+ 'USB': 'usb',
+ 'Diskette Driver': 'disk'}
+
+ boot_options = [b for b in boot_dict.itervalues()]
+ diff = list(set(boot_dev_list) - set(boot_options))
+ if diff:
+ err('The following boot options %s are not valid' % diff)
+
+ blade_list = sorted(blade_list)
+ LOG.debug('Setting boot order %s for blades %s in shelf %s'
+ % (boot_dev_list, blade_list, shelf))
+
+ ssh = self.connect()
+
+ LOG.debug('Check if blades are present')
+ server_list = self.parse(
+ self.clean_lines(ssh.execute('show server list')))
+
+ for blade in blade_list:
+ if server_list[S['status']] == 'Absent':
+ LOG.debug('Blade %s in shelf %s is missing. '
+ 'Change boot order %s not performed.\n'
+ % (blade, shelf, boot_dev_list))
+ blade_list.remove(blade)
+
+ bladelist = ','.join(blade_list)
+
+ for boot_dev in reversed(boot_dev_list):
+ ssh.execute('set server boot first %s %s' % (boot_dev, bladelist))
+
+ LOG.debug('Check if boot order is successfully set')
+
+ success_list = []
+ boot_keys = [b for b in boot_dict.iterkeys()]
+ for blade in blade_list:
+ lines = self.clean_lines(ssh.execute('show server boot %s'
+ % blade))
+ boot_order = lines[lines.index('IPL Devices (Boot Order):')+1:]
+ boot_list = []
+ success = False
+ for b in boot_order:
+ for k in boot_keys:
+ if k in b:
+ boot_list.append(boot_dict[k])
+ break
+ if boot_list == boot_dev_list:
+ success = True
+ break
+
+ success_list.append(success)
+ if success:
+ LOG.debug('Boot order %s successfully set on blade %s in '
+ 'shelf %s\n' % (boot_dev_list, blade, shelf))
+ else:
+ LOG.debug('Failed to set boot order %s on blade %s in '
+ 'shelf %s\n' % (boot_dev_list, blade, shelf))
+
+ ssh.close()
+ return all(success_list)
diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py
new file mode 100644
index 0000000..c274feb
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py
@@ -0,0 +1 @@
+__author__ = 'eszicse'
diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py
new file mode 100644
index 0000000..d332e59
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py
@@ -0,0 +1,153 @@
+from lxml import etree
+from cloud import common
+from ssh_client import SSHClient
+
+exec_cmd = common.exec_cmd
+err = common.err
+LOG = common.LOG
+
+
+class LibvirtAdapter(object):
+
+ def __init__(self, mgmt_ip, username, password):
+ self.mgmt_ip = mgmt_ip
+ self.username = username
+ self.password = password
+ self.parser = etree.XMLParser(remove_blank_text=True)
+
+ def power_off_blades(self, shelf, blade_list):
+ ssh = SSHClient(self.mgmt_ip, self.username, self.password)
+ ssh.open()
+ for blade in blade_list:
+ LOG.debug('Power off blade %s in shelf %s' % (blade, shelf))
+ vm_name = 's%s_b%s' % (shelf, blade)
+ resp = ssh.execute('virsh destroy %s' % vm_name)
+ LOG.debug('response: %s' % resp)
+ ssh.close()
+
+ def power_on_blades(self, shelf, blade_list):
+ ssh = SSHClient(self.mgmt_ip, self.username, self.password)
+ ssh.open()
+ for blade in blade_list:
+ LOG.debug('Power on blade %s in shelf %s' % (blade, shelf))
+ vm_name = 's%s_b%s' % (shelf, blade)
+ resp = ssh.execute('virsh start %s' % vm_name)
+ LOG.debug('response: %s' % resp)
+ ssh.close()
+
+ def set_boot_order_blades(self, shelf, blade_list, boot_dev_list=None):
+ if not boot_dev_list:
+ boot_dev_list = ['network', 'hd']
+ ssh = SSHClient(self.mgmt_ip, self.username, self.password)
+ ssh.open()
+ temp_dir= ssh.execute('mktemp -d').strip()
+ for blade in blade_list:
+ LOG.debug('Set boot order %s on blade %s in shelf %s'
+ % (boot_dev_list, blade, shelf))
+ vm_name = 's%s_b%s' % (shelf, blade)
+ resp = ssh.execute('virsh dumpxml %s' % vm_name)
+ xml_dump = etree.fromstring(resp, self.parser)
+ os = xml_dump.xpath('/domain/os')
+ for o in os:
+ for bootelem in ['boot', 'bootmenu']:
+ boot = o.xpath(bootelem)
+ for b in boot:
+ b.getparent().remove(b)
+ for dev in boot_dev_list:
+ b = etree.Element('boot')
+ b.set('dev', dev)
+ o.append(b)
+ bmenu = etree.Element('bootmenu')
+ bmenu.set('enable', 'no')
+ o.append(bmenu)
+ tree = etree.ElementTree(xml_dump)
+ xml_file = temp_dir + '/%s.xml' % vm_name
+ with open(xml_file, 'w') as f:
+ tree.write(f, pretty_print=True, xml_declaration=True)
+ ssh.execute('virsh define %s' % xml_file)
+ ssh.execute('rm -fr %s' % temp_dir)
+ ssh.close()
+
+ def get_blades_mac_addresses(self, shelf, blade_list):
+ LOG.debug('Get the MAC addresses of blades %s in shelf %s'
+ % (blade_list, shelf))
+ macs_per_blade_dict = {}
+ ssh = SSHClient(self.mgmt_ip, self.username, self.password)
+ ssh.open()
+ for blade in blade_list:
+ vm_name = 's%s_b%s' % (shelf, blade)
+ mac_list = macs_per_blade_dict[blade] = []
+ resp = ssh.execute('virsh dumpxml %s' % vm_name)
+ xml_dump = etree.fromstring(resp)
+ interfaces = xml_dump.xpath('/domain/devices/interface')
+ for interface in interfaces:
+ macs = interface.xpath('mac')
+ for mac in macs:
+ mac_list.append(mac.get('address'))
+ ssh.close()
+ return macs_per_blade_dict
+
+ def load_image_file(self, shelf=None, blade=None, vm=None,
+ image_path=None):
+ if shelf and blade:
+ vm_name = 's%s_b%s' % (shelf, blade)
+ else:
+ vm_name = vm
+
+ LOG.debug('Load media file %s into %s '
+ % (image_path, 'vm %s' % vm if vm else 'blade %s in shelf %s'
+ % (shelf, blade)))
+
+ ssh = SSHClient(self.mgmt_ip, self.username, self.password)
+ ssh.open()
+ temp_dir= ssh.execute('mktemp -d').strip()
+ resp = ssh.execute('virsh dumpxml %s' % vm_name)
+ xml_dump = etree.fromstring(resp)
+
+ disks = xml_dump.xpath('/domain/devices/disk')
+ for disk in disks:
+ if disk.get('device') == 'cdrom':
+ disk.set('type', 'file')
+ sources = disk.xpath('source')
+ for source in sources:
+ disk.remove(source)
+ source = etree.SubElement(disk, 'source')
+ source.set('file', image_path)
+ tree = etree.ElementTree(xml_dump)
+ xml_file = temp_dir + '/%s.xml' % vm_name
+ with open(xml_file, 'w') as f:
+ tree.write(f, pretty_print=True, xml_declaration=True)
+ ssh.execute('virsh define %s' % xml_file)
+ ssh.execute('rm -fr %s' % temp_dir)
+ ssh.close()
+
+ def eject_image_file(self, shelf=None, blade=None, vm=None):
+ if shelf and blade:
+ vm_name = 's%s_b%s' % (shelf, blade)
+ else:
+ vm_name = vm
+
+ LOG.debug('Eject media file from %s '
+ % 'vm %s' % vm if vm else 'blade %s in shelf %s'
+ % (shelf, blade))
+
+ ssh = SSHClient(self.mgmt_ip, self.username, self.password)
+ ssh.open()
+ temp_dir= ssh.execute('mktemp -d').strip()
+ resp = ssh.execute('virsh dumpxml %s' % vm_name)
+ xml_dump = etree.fromstring(resp)
+
+ disks = xml_dump.xpath('/domain/devices/disk')
+ for disk in disks:
+ if disk.get('device') == 'cdrom':
+ disk.set('type', 'block')
+ sources = disk.xpath('source')
+ for source in sources:
+ disk.remove(source)
+ tree = etree.ElementTree(xml_dump)
+ xml_file = temp_dir + '/%s.xml' % vm_name
+ with open(xml_file, 'w') as f:
+ tree.write(f, pretty_print=True, xml_declaration=True)
+ ssh.execute('virsh define %s' % xml_file)
+ ssh.execute('rm -fr %s' % temp_dir)
+ ssh.close()
diff --git a/fuel/deploy/cloud_deploy/ssh_client.py b/fuel/deploy/cloud_deploy/ssh_client.py
new file mode 100644
index 0000000..b9aad6c
--- /dev/null
+++ b/fuel/deploy/cloud_deploy/ssh_client.py
@@ -0,0 +1,56 @@
+import paramiko
+from cloud import common
+
+TIMEOUT = 600
+LOG = common.LOG
+
+class SSHClient(object):
+
+ def __init__(self, host, username, password):
+ self.host = host
+ self.username = username
+ self.password = password
+ self.client = None
+
+ def open(self, timeout=TIMEOUT):
+ self.client = paramiko.SSHClient()
+ self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ self.client.connect(self.host, username=self.username,
+ password=self.password, timeout=timeout)
+
+ def close(self):
+ if self.client is not None:
+ self.client.close()
+ self.client = None
+
+ def execute(self, command, sudo=False, timeout=TIMEOUT):
+ if sudo and self.username != 'root':
+ command = "sudo -S -p '' %s" % command
+ stdin, stdout, stderr = self.client.exec_command(command,
+ timeout=timeout)
+ if sudo:
+ stdin.write(self.password + '\n')
+ stdin.flush()
+ return ''.join(''.join(stderr.readlines()) +
+ ''.join(stdout.readlines()))
+
+ def run(self, command):
+ transport = self.client.get_transport()
+ transport.set_keepalive(1)
+ chan = transport.open_session()
+ chan.exec_command(command)
+
+ while not chan.exit_status_ready():
+ if chan.recv_ready():
+ data = chan.recv(1024)
+ while data:
+ print data
+ data = chan.recv(1024)
+
+ if chan.recv_stderr_ready():
+ error_buff = chan.recv_stderr(1024)
+ while error_buff:
+ print error_buff
+ error_buff = chan.recv_stderr(1024)
+ exit_status = chan.recv_exit_status()
+ LOG.debug('Exit status %s' % exit_status) \ No newline at end of file
diff --git a/fuel/deploy/common.py b/fuel/deploy/common.py
deleted file mode 100644
index cd5085c..0000000
--- a/fuel/deploy/common.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import subprocess
-import sys
-
-
-N = {'id': 0, 'status': 1, 'name': 2, 'cluster': 3, 'ip': 4, 'mac': 5,
- 'roles': 6, 'pending_roles': 7, 'online': 8}
-E = {'id': 0, 'status': 1, 'name': 2, 'mode': 3, 'release_id': 4,
- 'changes': 5, 'pending_release_id': 6}
-R = {'id': 0, 'name': 1, 'state': 2, 'operating_system': 3, 'version': 4}
-RO = {'name': 0, 'conflicts': 1}
-
-def exec_cmd(cmd):
- process = subprocess.Popen(cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- shell=True)
- return process.communicate()[0]
-
-def parse(printout):
- parsed_list = []
- lines = printout.splitlines()
- for l in lines[2:]:
- parsed = [e.strip() for e in l.split('|')]
- parsed_list.append(parsed)
- return parsed_list
-
-def err(error_message):
- sys.stderr.write(error_message)
- sys.exit(1)
diff --git a/fuel/deploy/configure_environment.py b/fuel/deploy/configure_environment.py
deleted file mode 100644
index 9aca904..0000000
--- a/fuel/deploy/configure_environment.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import common
-import os
-import shutil
-import yaml
-
-
-from configure_settings import ConfigureSettings
-from configure_network import ConfigureNetwork
-
-N = common.N
-E = common.E
-R = common.R
-RO = common.RO
-exec_cmd = common.exec_cmd
-parse = common.parse
-err = common.err
-
-class ConfigureEnvironment(object):
-
- def __init__(self, dea, yaml_config_dir):
- self.env_id = None
- self.dea = dea
- self.yaml_config_dir = yaml_config_dir
- self.env_name = dea.get_environment_name()
-
- def env_exists(self, env_name):
- env_list = parse(exec_cmd('fuel env --list'))
- for env in env_list:
- if env[E['name']] == env_name and env[E['status']] == 'new':
- return True
- return False
-
- def get_env_id(self, env_name):
- env_list = parse(exec_cmd('fuel env --list'))
- for env in env_list:
- if env[E['name']] == env_name:
- return env[E['id']]
-
- def configure_environment(self, dea):
- exec_cmd('fuel env -c --name %s --release %s --mode ha --net neutron '
- '--nst vlan' % (self.env_name,
- self.supported_release[R['id']]))
-
- self.env_id = self.get_env_id(self.env_name)
- if not self.env_exists(self.env_name):
- err("Failed to create environment %s" % self.env_name)
-
- self.config_settings()
- self.config_network()
-
- def config_settings(self):
- if os.path.exists(self.yaml_config_dir):
- shutil.rmtree(self.yaml_config_dir)
- os.makedirs(self.yaml_config_dir)
-
- settings = ConfigureSettings(self.yaml_config_dir, self.env_id)
- settings.config_settings()
-
-
- def config_network(self):
- network_yaml=self.yaml_config_dir + '/network_%s.yaml' % self.env_id
- os.remove(network_yaml)
-
- network = ConfigureNetwork(self.yaml_config_dir, network_yaml,
- self.env_id, self.dea)
- network.config_network()
-
-
-
-
diff --git a/fuel/deploy/configure_network.py b/fuel/deploy/configure_network.py
deleted file mode 100644
index 0b298e5..0000000
--- a/fuel/deploy/configure_network.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import common
-import os
-import yaml
-import io
-import re
-
-N = common.N
-E = common.E
-R = common.R
-RO = common.RO
-exec_cmd = common.exec_cmd
-parse = common.parse
-err = common.err
-
-P1 = re.compile('!\s.*')
-
-class ConfigureNetwork(object):
-
- def __init__(self, yaml_config_dir, network_yaml, env_id, dea):
- self.yaml_config_dir = yaml_config_dir
- self.network_yaml = network_yaml
- self.env_id = env_id
- self.dea = dea
-
- def download_settings(self):
- exec_cmd('fuel network --env %s --download --dir %s'
- % (self.env_id, self.yaml_config_dir))
-
- def upload_settings(self):
- exec_cmd('fuel network --env %s --upload --dir %s'
- % (self.env_id, self.yaml_config_dir))
-
- def config_network(self):
-
- self.download_settings()
-
- self.apply_network_config()
-
- self.upload_settings()
-
- self.verify()
-
- def apply_network_config(self):
-
- with io.open(self.network_yaml) as stream:
- network_config = yaml.load(stream)
- networks = network_config['networks']
-
- net = self.dea.get_networks()
- net['fuelweb_admin'] = net['management']
- if 'vlan' in net['fuelweb_admin']:
- del net['fuelweb_admin']['vlan']
- del net['management']
- net_names = [n for n in net.iterkeys()]
-
- for i in range(len(networks)):
- if networks[i]['name'] == 'management':
- networks = networks[:i] + networks[i+1:]
- network_config['networks'] = networks
- break
-
- for network in networks:
- name = network['name']
- if name in net_names:
- if ('vlan' in net[name] and net[name]['vlan'] is not None):
- network['vlan_start'] = net[name]['vlan']
- network['cidr'] = net[name]['cidr']
- network['ip_ranges'][0][0] = net[name]['start']
- network['ip_ranges'][0][1] = net[name]['end']
-
- with io.open(self.network_yaml, 'w') as stream:
- yaml.dump(network_config, stream, default_flow_style=False)
-
- def verify(self):
- ret = exec_cmd('mktemp -d')
- temp_dir = ret.splitlines()[0]
-
- exec_cmd('fuel network --env %s --download --dir %s'
- % (self.env_id, temp_dir))
-
- ret = exec_cmd('diff -C0 %s %s'
- % (self.network_yaml,
- temp_dir + '/network_%s.yaml' % self.env_id))
- diff_list = []
- for l in ret.splitlines():
- m = P1.match(l)
- if m and '_vip' not in l:
- diff_list.append(l)
- if diff_list:
- err('Uploaded network yaml rejected by Fuel\n')
- \ No newline at end of file
diff --git a/fuel/deploy/configure_settings.py b/fuel/deploy/configure_settings.py
deleted file mode 100644
index cdeea49..0000000
--- a/fuel/deploy/configure_settings.py
+++ /dev/null
@@ -1,88 +0,0 @@
-import common
-import os
-import yaml
-import io
-import re
-
-N = common.N
-E = common.E
-R = common.R
-RO = common.RO
-exec_cmd = common.exec_cmd
-parse = common.parse
-err = common.err
-
-class ConfigureSettings(object):
-
- def __init__(self, yaml_config_dir, env_id):
- self.yaml_config_dir = yaml_config_dir
- self.env_id = env_id
-
- def download_settings(self):
- exec_cmd('fuel --env %s settings --download' % self.env_id)
-
- def upload_settings(self):
- exec_cmd('fuel --env %s settings --upload' % self.env_id)
-
-
- def config_settings(self):
- self.download_settings()
- self.modify_settings()
- self.upload_settings()
-
- # Fix console speed
- def fix_console_speed(data):
- # First remove all console= from the kernel cmdline
- cmdline = data["editable"]["kernel_params"]["kernel"]["value"]
- pat = re.compile(r"console=[\w,]+\s+")
- repl = 1
- while repl != 0:
- cmdline, repl = pat.subn("", cmdline)
-
- # Then add the console info we want
- cmdline = re.sub(r"^", "console=tty0 console=ttyS0,115200 ", cmdline)
- data["editable"]["kernel_params"]["kernel"]["value"] = cmdline
-
- # Initialize kernel audit
- def initialize_kernel_audit(data):
- cmdline = data["editable"]["kernel_params"]["kernel"]["value"]
- cmdline = "audit=1 " + cmdline
- data["editable"]["kernel_params"]["kernel"]["value"] = cmdline
-
- # Add crashkernel parameter to boot parameters. W/o this we can't
- # make crash dumps after initial deploy. Standard grub setup will add
- # crashkernel= options - with bad values but that is another issue - but
- # that only enables crash dumps after first reboot
- def add_crashkernel_support(data):
- cmdline = data["editable"]["kernel_params"]["kernel"]["value"]
- cmdline += " crashkernel=256M"
- data["editable"]["kernel_params"]["kernel"]["value"] = cmdline
-
-
- def modify_settings(self):
-
- filename = "%s/settings_%d.yaml" % (self.yaml_config_dir, self.env_id)
- if not os.path.isfile(filename):
- err("Failed to find %s\n" % filename)
-
- with io.open(filename) as stream:
- data = yaml.load(stream)
-
- self.fix_console_speed(data)
-
- self.initialize_kernel_audit(data)
-
- self.add_crashkernel_support(data)
-
- # Make sure we have the correct libvirt type
- data["editable"]["common"]["libvirt_type"]["value"] = "kvm"
-
-
- # Save the settings into the file from which we loaded them
- with io.open(filename, "w") as stream:
- yaml.dump(data, stream, default_flow_style=False)
-
-
-
-
-
diff --git a/fuel/deploy/dea.yaml b/fuel/deploy/dea.yaml
index 420dae7..b83ddea 100644
--- a/fuel/deploy/dea.yaml
+++ b/fuel/deploy/dea.yaml
@@ -1,37 +1,947 @@
---
name: ENV-1
-server:
- type: hp
- mgmt_ip: 10.118.32.197
- username: opnfv
- password: E///@work
shelf:
- id: 1
+ type: libvirt
+ mgmt_ip: 10.20.0.1
+ username: user
+ password: systemabc
blade:
- id: 1
- role: controller
+ roles:
+ - controller
- id: 2
+ roles:
+ - controller
- id: 3
- role: controller
+ roles:
+ - controller
- id: 4
- id: 5
- id: 6
-network:
- - name: management
- cidr: 192.168.0.0/24
- start: 192.168.0.1
- end: 192.168.0.253
- - name: private
- vlan:
- cidr: 192.168.11.0/24
- start: 192.168.11.1
- end: 192.168.11.253
- - name: storage
- vlan:
- cidr: 192.168.12.0/24
- start: 192.168.12.1
- end: 192.168.12.253
- - name: public
- vlan:
+networks:
+ management_vip: 192.168.0.2
+ networking_parameters:
+ base_mac: fa:16:3e:00:00:00
+ dns_nameservers:
+ - 8.8.4.4
+ - 8.8.8.8
+ floating_ranges:
+ - - 172.16.0.130
+ - 172.16.0.254
+ gre_id_range:
+ - 2
+ - 65535
+ internal_cidr: 192.168.111.0/24
+ internal_gateway: 192.168.111.1
+ net_l23_provider: ovs
+ segmentation_type: vlan
+ vlan_range:
+ - 1000
+ - 1200
+ networks:
+ - cidr: 172.16.0.0/24
+ gateway: 172.16.0.1
+ ip_ranges:
+ - - 172.16.0.2
+ - 172.16.0.126
+ meta:
+ assign_vip: true
+ cidr: 172.16.0.0/24
+ configurable: true
+ floating_range_var: floating_ranges
+ ip_range:
+ - 172.16.0.2
+ - 172.16.0.126
+ map_priority: 1
+ name: public
+ notation: ip_ranges
+ render_addr_mask: public
+ render_type: null
+ use_gateway: true
+ vlan_start: null
+ name: public
+ vlan_start: null
+ - cidr: null
+ gateway: null
+ ip_ranges: []
+ meta:
+ assign_vip: false
+ configurable: false
+ map_priority: 2
+ name: private
+ neutron_vlan_range: true
+ notation: null
+ render_addr_mask: null
+ render_type: null
+ seg_type: vlan
+ use_gateway: false
+ vlan_start: null
+ name: private
+ vlan_start: null
+ - cidr: 192.168.0.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.0.2
+ - 192.168.0.254
+ meta:
+ assign_vip: true
+ cidr: 192.168.0.0/24
+ configurable: true
+ map_priority: 2
+ name: management
+ notation: cidr
+ render_addr_mask: internal
+ render_type: cidr
+ use_gateway: false
+ vlan_start: 101
+ name: management
+ vlan_start: 101
+ - cidr: 192.168.1.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.1.2
+ - 192.168.1.254
+ meta:
+ assign_vip: false
+ cidr: 192.168.1.0/24
+ configurable: true
+ map_priority: 2
+ name: storage
+ notation: cidr
+ render_addr_mask: storage
+ render_type: cidr
+ use_gateway: false
+ vlan_start: 102
+ name: storage
+ vlan_start: 102
+ - cidr: 10.20.0.0/24
+ gateway: null
+ ip_ranges:
+ - - 10.20.0.3
+ - 10.20.0.254
+ meta:
+ assign_vip: false
+ configurable: false
+ map_priority: 0
+ notation: ip_ranges
+ render_addr_mask: null
+ render_type: null
+ unmovable: true
+ use_gateway: true
+ name: fuelweb_admin
+ vlan_start: null
+ public_vip: 172.16.0.2
+controller:
+- action: add-br
+ name: br-eth0
+- action: add-port
+ bridge: br-eth0
+ name: eth0
+- action: add-br
+ name: br-eth1
+- action: add-port
+ bridge: br-eth1
+ name: eth1
+- action: add-br
+ name: br-eth2
+- action: add-port
+ bridge: br-eth2
+ name: eth2
+- action: add-br
+ name: br-eth3
+- action: add-port
+ bridge: br-eth3
+ name: eth3
+- action: add-br
+ name: br-ex
+- action: add-br
+ name: br-mgmt
+- action: add-br
+ name: br-storage
+- action: add-br
+ name: br-fw-admin
+- action: add-patch
+ bridges:
+ - br-eth1
+ - br-storage
+ tags:
+ - 102
+ - 0
+ vlan_ids:
+ - 102
+ - 0
+- action: add-patch
+ bridges:
+ - br-eth0
+ - br-mgmt
+ tags:
+ - 101
+ - 0
+ vlan_ids:
+ - 101
+ - 0
+- action: add-patch
+ bridges:
+ - br-eth0
+ - br-fw-admin
+ trunks:
+ - 0
+- action: add-patch
+ bridges:
+ - br-eth3
+ - br-ex
+ trunks:
+ - 0
+- action: add-br
+ name: br-prv
+- action: add-patch
+ bridges:
+ - br-eth2
+ - br-prv
+compute:
+- action: add-br
+ name: br-eth0
+- action: add-port
+ bridge: br-eth0
+ name: eth0
+- action: add-br
+ name: br-eth1
+- action: add-port
+ bridge: br-eth1
+ name: eth1
+- action: add-br
+ name: br-eth2
+- action: add-port
+ bridge: br-eth2
+ name: eth2
+- action: add-br
+ name: br-eth3
+- action: add-port
+ bridge: br-eth3
+ name: eth3
+- action: add-br
+ name: br-mgmt
+- action: add-br
+ name: br-storage
+- action: add-br
+ name: br-fw-admin
+- action: add-patch
+ bridges:
+ - br-eth1
+ - br-storage
+ tags:
+ - 102
+ - 0
+ vlan_ids:
+ - 102
+ - 0
+- action: add-patch
+ bridges:
+ - br-eth0
+ - br-mgmt
+ tags:
+ - 101
+ - 0
+ vlan_ids:
+ - 101
+ - 0
+- action: add-patch
+ bridges:
+ - br-eth0
+ - br-fw-admin
+ trunks:
+ - 0
+- action: add-br
+ name: br-prv
+- action: add-patch
+ bridges:
+ - br-eth2
+ - br-prv
+interfaces:
+ eth0:
+ - fuelweb_admin
+ - management
+ eth1:
+ - storage
+ eth2:
+ - private
+ eth3:
+ - public
+settings:
+ editable:
+ access:
+ email:
+ description: Email address for Administrator
+ label: email
+ type: text
+ value: admin@localhost
+ weight: 40
+ metadata:
+ label: Access
+ weight: 10
+ password:
+ description: Password for Administrator
+ label: password
+ type: password
+ value: admin
+ weight: 20
+ tenant:
+ description: Tenant (project) name for Administrator
+ label: tenant
+ regex:
+ error: Invalid tenant name
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+ type: text
+ value: admin
+ weight: 30
+ user:
+ description: Username for Administrator
+ label: username
+ regex:
+ error: Invalid username
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+ type: text
+ value: admin
+ weight: 10
+ additional_components:
+ ceilometer:
+ description: If selected, Ceilometer component will be installed
+ label: Install Ceilometer
+ type: checkbox
+ value: true
+ weight: 40
+ heat:
+ description: ''
+ label: ''
+ type: hidden
+ value: true
+ weight: 30
+ metadata:
+ label: Additional Components
+ weight: 20
+ murano:
+ description: If selected, Murano component will be installed
+ label: Install Murano
+ restrictions:
+ - cluster:net_provider != 'neutron'
+ type: checkbox
+ value: false
+ weight: 20
+ sahara:
+ description: If selected, Sahara component will be installed
+ label: Install Sahara
+ type: checkbox
+ value: false
+ weight: 10
+ common:
+ auth_key:
+ description: Public key(s) to include in authorized_keys on deployed nodes
+ label: Public Key
+ type: text
+ value: ''
+ weight: 70
+ auto_assign_floating_ip:
+ description: If selected, OpenStack will automatically assign a floating IP
+ to a new instance
+ label: Auto assign floating IP
+ restrictions:
+ - cluster:net_provider == 'neutron'
+ type: checkbox
+ value: false
+ weight: 40
+ compute_scheduler_driver:
+ label: Scheduler driver
+ type: radio
+ value: nova.scheduler.filter_scheduler.FilterScheduler
+ values:
+ - data: nova.scheduler.filter_scheduler.FilterScheduler
+ description: Currently the most advanced OpenStack scheduler. See the OpenStack
+ documentation for details.
+ label: Filter scheduler
+ - data: nova.scheduler.simple.SimpleScheduler
+ description: This is 'naive' scheduler which tries to find the least loaded
+ host
+ label: Simple scheduler
+ weight: 40
+ debug:
+ description: Debug logging mode provides more information, but requires more
+ disk space.
+ label: OpenStack debug logging
+ type: checkbox
+ value: false
+ weight: 20
+ disable_offload:
+ description: If set, generic segmentation offload (gso) and generic receive
+ offload (gro) on physical nics will be disabled. See ethtool man.
+ label: Disable generic offload on physical nics
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type
+ == 'gre'
+ type: checkbox
+ value: true
+ weight: 80
+ libvirt_type:
+ label: Hypervisor type
+ type: radio
+ value: kvm
+ values:
+ - data: kvm
+ description: Choose this type of hypervisor if you run OpenStack on hardware
+ label: KVM
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ - data: qemu
+ description: Choose this type of hypervisor if you run OpenStack on virtual
+ hosts.
+ label: QEMU
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ - data: vcenter
+ description: Choose this type of hypervisor if you run OpenStack in a vCenter
+ environment.
+ label: vCenter
+ restrictions:
+ - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider
+ == 'neutron'
+ weight: 30
+ metadata:
+ label: Common
+ weight: 30
+ nova_quota:
+ description: Quotas are used to limit CPU and memory usage for tenants. Enabling
+ quotas will increase load on the Nova database.
+ label: Nova quotas
+ type: checkbox
+ value: false
+ weight: 25
+ resume_guests_state_on_host_boot:
+ description: Whether to resume previous guests state when the host reboots.
+ If enabled, this option causes guests assigned to the host to resume their
+ previous state. If the guest was running a restart will be attempted when
+ nova-compute starts. If the guest was not running previously, a restart
+ will not be attempted.
+ label: Resume guests state on host boot
+ type: checkbox
+ value: false
+ weight: 60
+ use_cow_images:
+ description: For most cases you will want qcow format. If it's disabled, raw
+ image format will be used to run VMs. OpenStack with raw format currently
+ does not support snapshotting.
+ label: Use qcow format for images
+ type: checkbox
+ value: true
+ weight: 50
+ corosync:
+ group:
+ description: ''
+ label: Group
+ type: text
+ value: 226.94.1.1
+ weight: 10
+ metadata:
+ label: Corosync
+ restrictions:
+ - action: hide
+ condition: 'true'
+ weight: 50
+ port:
+ description: ''
+ label: Port
+ type: text
+ value: '12000'
+ weight: 20
+ verified:
+ description: Set True only if multicast is configured correctly on router.
+ label: Need to pass network verification.
+ type: checkbox
+ value: false
+ weight: 10
+ external_dns:
+ dns_list:
+ description: List of upstream DNS servers, separated by comma
+ label: DNS list
+ type: text
+ value: 8.8.8.8, 8.8.4.4
+ weight: 10
+ metadata:
+ label: Upstream DNS
+ weight: 90
+ external_ntp:
+ metadata:
+ label: Upstream NTP
+ weight: 100
+ ntp_list:
+ description: List of upstream NTP servers, separated by comma
+ label: NTP servers list
+ type: text
+ value: 0.pool.ntp.org, 1.pool.ntp.org
+ weight: 10
+ kernel_params:
+ kernel:
+ description: Default kernel parameters
+ label: Initial parameters
+ type: text
+ value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset
+ weight: 45
+ metadata:
+ label: Kernel parameters
+ weight: 40
+ neutron_mellanox:
+ metadata:
+ enabled: true
+ label: Mellanox Neutron components
+ toggleable: false
+ weight: 50
+ plugin:
+ label: Mellanox drivers and SR-IOV plugin
+ type: radio
+ value: disabled
+ values:
+ - data: disabled
+ description: If selected, Mellanox drivers, Neutron and Cinder plugin will
+ not be installed.
+ label: Mellanox drivers and plugins disabled
+ restrictions:
+ - settings:storage.iser.value == true
+ - data: drivers_only
+ description: If selected, Mellanox Ethernet drivers will be installed to
+ support networking over Mellanox NIC. Mellanox Neutron plugin will not
+ be installed.
+ label: Install only Mellanox drivers
+ restrictions:
+ - settings:common.libvirt_type.value != 'kvm'
+ - data: ethernet
+ description: If selected, both Mellanox Ethernet drivers and Mellanox network
+ acceleration (Neutron) plugin will be installed.
+ label: Install Mellanox drivers and SR-IOV plugin
+ restrictions:
+ - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
+ == 'neutron' and networking_parameters:segmentation_type == 'vlan')
+ weight: 60
+ vf_num:
+ description: Note that one virtual function will be reserved to the storage
+ network, in case of choosing iSER.
+ label: Number of virtual NICs
+ restrictions:
+ - settings:neutron_mellanox.plugin.value != 'ethernet'
+ type: text
+ value: '16'
+ weight: 70
+ nsx_plugin:
+ connector_type:
+ description: Default network transport type to use
+ label: NSX connector type
+ type: select
+ value: stt
+ values:
+ - data: gre
+ label: GRE
+ - data: ipsec_gre
+ label: GRE over IPSec
+ - data: stt
+ label: STT
+ - data: ipsec_stt
+ label: STT over IPSec
+ - data: bridge
+ label: Bridge
+ weight: 80
+ l3_gw_service_uuid:
+ description: UUID for the default L3 gateway service to use with this cluster
+ label: L3 service UUID
+ regex:
+ error: Invalid L3 gateway service UUID
+ source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+ type: text
+ value: ''
+ weight: 50
+ metadata:
+ enabled: false
+ label: VMware NSX
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider
+ != 'nsx'
+ weight: 20
+ nsx_controllers:
+ description: One or more IPv4[:port] addresses of NSX controller node, separated
+ by comma (e.g. 10.30.30.2,192.168.110.254:443)
+ label: NSX controller endpoint
+ regex:
+ error: Invalid controller endpoints, specify valid IPv4[:port] pair
+ source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$
+ type: text
+ value: ''
+ weight: 60
+ nsx_password:
+ description: Password for Administrator
+ label: NSX password
+ regex:
+ error: Empty password
+ source: \S
+ type: password
+ value: ''
+ weight: 30
+ nsx_username:
+ description: NSX administrator's username
+ label: NSX username
+ regex:
+ error: Empty username
+ source: \S
+ type: text
+ value: admin
+ weight: 20
+ packages_url:
+ description: URL to NSX specific packages
+ label: URL to NSX bits
+ regex:
+ error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g.
+ http://10.20.0.2/nsx)
+ source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$
+ type: text
+ value: ''
+ weight: 70
+ replication_mode:
+ description: ''
+ label: NSX cluster has Service nodes
+ type: checkbox
+ value: true
+ weight: 90
+ transport_zone_uuid:
+ description: UUID of the pre-existing default NSX Transport zone
+ label: Transport zone UUID
+ regex:
+ error: Invalid transport zone UUID
+ source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+ type: text
+ value: ''
+ weight: 40
+ provision:
+ metadata:
+ label: Provision
+ restrictions:
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
+ weight: 80
+ method:
+ description: Which provision method to use for this cluster.
+ label: Provision method
+ type: radio
+ value: cobbler
+ values:
+ - data: image
+ description: Copying pre-built images on a disk.
+ label: Image
+ - data: cobbler
+ description: Install from scratch using anaconda or debian-installer.
+ label: Classic (use anaconda or debian-installer)
+ public_network_assignment:
+ assign_to_all_nodes:
+ description: When disabled, public network will be assigned to controllers
+ and zabbix-server only
+ label: Assign public network to all nodes
+ type: checkbox
+ value: false
+ weight: 10
+ metadata:
+ label: Public network assignment
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'neutron'
+ weight: 50
+ storage:
+ ephemeral_ceph:
+ description: Configures Nova to store ephemeral volumes in RBD. This works
+ best if Ceph is enabled for volumes and images, too. Enables live migration
+ of all types of Ceph backed VMs (without this option, live migration will
+ only work with VMs launched from Cinder volumes).
+ label: Ceph RBD for ephemeral volumes (Nova)
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ type: checkbox
+ value: false
+ weight: 75
+ images_ceph:
+ description: Configures Glance to use the Ceph RBD backend to store images.
+ If enabled, this option will prevent Swift from installing.
+ label: Ceph RBD for images (Glance)
+ type: checkbox
+ value: false
+ weight: 30
+ images_vcenter:
+ description: Configures Glance to use the vCenter/ESXi backend to store images.
+ If enabled, this option will prevent Swift from installing.
+ label: VMWare vCenter/ESXi datastore for images (Glance)
+ restrictions:
+ - settings:common.libvirt_type.value != 'vcenter'
+ type: checkbox
+ value: false
+ weight: 35
+ iser:
+ description: 'High performance block storage: Cinder volumes over iSER protocol
+ (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC,
+ and will use a dedicated virtual function for the storage network.'
+ label: iSER protocol for volumes (Cinder)
+ restrictions:
+ - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
+ != 'kvm'
+ type: checkbox
+ value: false
+ weight: 11
+ metadata:
+ label: Storage
+ weight: 60
+ objects_ceph:
+ description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
+ Swift API Interfaces. If enabled, this option will prevent Swift from installing.
+ label: Ceph RadosGW for objects (Swift API)
+ restrictions:
+ - settings:storage.images_ceph.value == false
+ type: checkbox
+ value: false
+ weight: 80
+ osd_pool_size:
+ description: Configures the default number of object replicas in Ceph. This
+ number must be equal to or lower than the number of deployed 'Storage -
+ Ceph OSD' nodes.
+ label: Ceph object replication factor
+ regex:
+ error: Invalid number
+ source: ^[1-9]\d*$
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ type: text
+ value: '2'
+ weight: 85
+ vc_datacenter:
+ description: Inventory path to a datacenter. If you want to use ESXi host
+ as datastore, it should be "ha-datacenter".
+ label: Datacenter name
+ regex:
+ error: Empty datacenter
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 65
+ vc_datastore:
+ description: Datastore associated with the datacenter.
+ label: Datastore name
+ regex:
+ error: Empty datastore
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 60
+ vc_host:
+ description: IP Address of vCenter/ESXi
+ label: vCenter/ESXi IP
+ regex:
+ error: Specify valid IPv4 address
+ source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 45
+ vc_image_dir:
+ description: The name of the directory where the glance images will be stored
+ in the VMware datastore.
+ label: Datastore Images directory
+ regex:
+ error: Empty images directory
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: /openstack_glance
+ weight: 70
+ vc_password:
+ description: vCenter/ESXi admin password
+ label: Password
+ regex:
+ error: Empty password
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: password
+ value: ''
+ weight: 55
+ vc_user:
+ description: vCenter/ESXi admin username
+ label: Username
+ regex:
+ error: Empty username
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 50
+ volumes_ceph:
+ description: Configures Cinder to store volumes in Ceph RBD images.
+ label: Ceph RBD for volumes (Cinder)
+ restrictions:
+ - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value
+ == 'vcenter'
+ type: checkbox
+ value: false
+ weight: 20
+ volumes_lvm:
+ description: Requires at least one Storage - Cinder LVM node.
+ label: Cinder LVM over iSCSI for volumes
+ restrictions:
+ - settings:storage.volumes_ceph.value == true
+ type: checkbox
+ value: false
+ weight: 10
+ volumes_vmdk:
+ description: Configures Cinder to store volumes via VMware vCenter.
+ label: VMware vCenter for volumes (Cinder)
+ restrictions:
+ - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value
+ == true
+ type: checkbox
+ value: false
+ weight: 15
+ syslog:
+ metadata:
+ label: Syslog
+ weight: 50
+ syslog_port:
+ description: Remote syslog port
+ label: Port
+ regex:
+ error: Invalid Syslog port
+ source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: text
+ value: '514'
+ weight: 20
+ syslog_server:
+ description: Remote syslog hostname
+ label: Hostname
+ type: text
+ value: ''
+ weight: 10
+ syslog_transport:
+ label: Syslog transport protocol
+ type: radio
+ value: tcp
+ values:
+ - data: udp
+ description: ''
+ label: UDP
+ - data: tcp
+ description: ''
+ label: TCP
+ weight: 30
+ vcenter:
+ cluster:
+ description: vCenter cluster name. If you have multiple clusters, use comma
+ to separate names
+ label: Cluster
+ regex:
+ error: Invalid cluster list
+ source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$
+ type: text
+ value: ''
+ weight: 40
+ datastore_regex:
+ description: The Datastore regexp setting specifies the data stores to use
+ with Compute. For example, "nas.*". If you want to use all available datastores,
+ leave this field blank
+ label: Datastore regexp
+ regex:
+ error: Invalid datastore regexp
+ source: ^(\S.*\S|\S|)$
+ type: text
+ value: ''
+ weight: 50
+ host_ip:
+ description: IP Address of vCenter
+ label: vCenter IP
+ regex:
+ error: Specify valid IPv4 address
+ source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+ type: text
+ value: ''
+ weight: 10
+ metadata:
+ label: vCenter
+ restrictions:
+ - action: hide
+ condition: settings:common.libvirt_type.value != 'vcenter'
+ weight: 20
+ use_vcenter:
+ description: ''
+ label: ''
+ type: hidden
+ value: true
+ weight: 5
+ vc_password:
+ description: vCenter admin password
+ label: Password
+ regex:
+ error: Empty password
+ source: \S
+ type: password
+ value: ''
+ weight: 30
+ vc_user:
+ description: vCenter admin username
+ label: Username
+ regex:
+ error: Empty username
+ source: \S
+ type: text
+ value: ''
+ weight: 20
+ vlan_interface:
+ description: Physical ESXi host ethernet adapter for VLAN networking (e.g.
+ vmnic1). If empty "vmnic0" is used by default
+ label: ESXi VLAN interface
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager
+ != 'VlanManager'
+ type: text
+ value: ''
+ weight: 60
+ zabbix:
+ metadata:
+ label: Zabbix Access
+ restrictions:
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
+ weight: 70
+ password:
+ description: Password for Zabbix Administrator
+ label: password
+ type: password
+ value: zabbix
+ weight: 20
+ username:
+ description: Username for Zabbix Administrator
+ label: username
+ type: text
+ value: admin
+ weight: 10
...
diff --git a/fuel/deploy/deploy.py b/fuel/deploy/deploy.py
deleted file mode 100644
index 4037c1d..0000000
--- a/fuel/deploy/deploy.py
+++ /dev/null
@@ -1,212 +0,0 @@
-import time
-import os
-import sys
-
-import common
-from dha import DeploymentHardwareAdapter
-from dea import DeploymentEnvironmentAdapter
-from configure_environment import ConfigureEnvironment
-
-
-SUPPORTED_RELEASE = 'Juno on CentOS 6.5'
-
-N = common.N
-E = common.E
-R = common.R
-RO = common.RO
-exec_cmd = common.exec_cmd
-parse = common.parse
-err = common.err
-
-class Deploy(object):
-
- def __init__(self, yaml_config_dir):
- self.supported_release = None
- self.yaml_config_dir = yaml_config_dir
-
- def get_id_list(self, list):
- return [l[0] for l in list]
-
- def cleanup_fuel_environments(self, env_list):
- WAIT_LOOP = 10
- SLEEP_TIME = 2
- id_list = self.get_id_list(env_list)
- for id in id_list:
- exec_cmd('fuel env --env %s --delete' % id)
- for i in range(WAIT_LOOP):
- if id in self.get_id_list(parse(exec_cmd('fuel env list'))):
- time.sleep(SLEEP_TIME)
- else:
- continue
-
- def cleanup_fuel_nodes(self, node_list):
- for node in node_list:
- if node[N['status']] == 'discover':
- exec_cmd('fuel node --node-id %s --delete-from-db'
- % node[N['id']])
- exec_cmd('dockerctl shell cobbler cobbler system remove '
- '--name node-%s' % node[N['id']])
-
- def check_previous_installation(self):
- env_list = parse(exec_cmd('fuel env list'))
- if env_list:
- self.cleanup_fuel_environments(env_list)
- node_list = parse(exec_cmd('fuel node list'))
- if node_list:
- self.cleanup_fuel_nodes(node_list)
-
- def check_supported_release(self):
- release_list= parse(exec_cmd('fuel release -l'))
- for release in release_list:
- if release[R['name']] == SUPPORTED_RELEASE:
- self.supported_release = release
- break
- if not self.supported_release:
- err("This Fuel doesn't contain the following "
- "release: %s\n" % SUPPORTED_RELEASE)
-
- def check_role_definitions(self):
- role_list= parse(exec_cmd('fuel role --release %s'
- % self.supported_release[R['id']]))
- roles = [role[RO['name']] for role in role_list]
- if 'compute' not in roles:
- err("Role compute does not exist in release %"
- % self.supported_release[R['name']])
- if 'controller' not in roles:
- err("Role controller does not exist in release %"
- % self.supported_release[R['name']])
-
- def check_prerequisites(self):
- self.check_supported_release()
- self.check_role_definitions()
- self.check_previous_installation()
-
- def power_off_blades(self, dha, shelf_blades_dict):
- for shelf, blade_list in shelf_blades_dict.iteritems():
- dha.power_off_blades(shelf, blade_list)
-
- def power_on_blades(self, dha, shelf_blades_dict):
- for shelf, blade_list in shelf_blades_dict.iteritems():
- dha.power_on_blades(shelf, blade_list)
-
- def set_boot_order(self, dha, shelf_blades_dict):
- for shelf, blade_list in shelf_blades_dict.iteritems():
- dha.set_boot_order_blades(shelf, blade_list)
-
- def count_discovered_nodes(self, node_list):
- discovered_nodes = 0
- for node in node_list:
- if node[N['status']] == 'discover':
- discovered_nodes += 1
- return discovered_nodes
-
- def wait_for_discovered_blades(self, no_of_blades):
- WAIT_LOOP = 10
- SLEEP_TIME = 2
- all_discovered = False
- node_list = parse(exec_cmd('fuel node list'))
- for i in range(WAIT_LOOP):
- if (self.count_discovered_nodes(node_list) < no_of_blades):
- time.sleep(SLEEP_TIME)
- node_list = parse(exec_cmd('fuel node list'))
- else:
- all_discovered = True
- break
- if not all_discovered:
- err("There are %s blades defined, but not all of "
- "them have been discovered\n" % no_of_blades)
-
- def assign_cluster_node_ids(self, dha, dea, controllers, compute_hosts):
- node_list= parse(exec_cmd('fuel node list'))
- for shelf_id in dea.get_shelf_ids():
- for blade_id in dea.get_blade_ids_per_shelf(shelf_id):
- blade_mac_list = dha.get_blade_mac_addresses(
- shelf_id, blade_id)
-
- found = False
- for node in node_list:
- if (node[N['mac']] in blade_mac_list and
- node[N['status']] == 'discover'):
- found = True
- break
- if found:
- if dea.is_controller(shelf_id, blade_id):
- controllers.append(node[N['id']])
- if dea.is_compute_host(shelf_id, blade_id):
- compute_hosts.append(node[N['id']])
- else:
- err("Could not find the Node ID for blade "
- "with MACs %s or blade is not in "
- "discover status\n" % blade_mac_list)
-
-
- def configure_environment(self, dea):
- config_env = ConfigureEnvironment(dea, self.yaml_config_dir)
-
-
-
- def provision(self):
-
-
-
- def fix_power_address(self):
-
-
-
-
- def deploy(self):
-
- if id in self.get_id_list(parse(exec_cmd('fuel env list'))):
-
- self.fix_power_address()
-
-
-
-
-def main():
-
- yaml_path = exec_cmd('pwd').strip() + '/dea.yaml'
- yaml_config_dir = '/var/lib/opnfv/pre_deploy'
-
- deploy = Deploy(yaml_config_dir)
-
- dea = DeploymentEnvironmentAdapter()
-
- if not os.path.isfile(yaml_path):
- sys.stderr.write("ERROR: File %s not found\n" % yaml_path)
- sys.exit(1)
-
- dea.parse_yaml(yaml_path)
-
- server_type, mgmt_ip, username, password = dea.get_server_info()
- shelf_blades_dict = dea.get_blade_ids_per_shelves()
-
- dha = DeploymentHardwareAdapter(server_type, mgmt_ip, username, password)
-
- deploy.check_prerequisites()
-
- deploy.power_off_blades(dha, shelf_blades_dict)
-
- deploy.set_boot_order(dha, shelf_blades_dict)
-
- deploy.power_on_blades(dha, shelf_blades_dict)
-
- macs = dha.get_blade_mac_addresses()
-
- deploy.wait_for_discovered_blades(dea.get_no_of_blades())
-
-
- controllers = []
- compute_hosts = []
- deploy.assign_cluster_node_ids(dha, dea, controllers, compute_hosts)
-
-
-
- deploy.configure_environment(dea)
-
- deploy.deploy(dea)
-
-
-
-if __name__ == '__main__':
- main() \ No newline at end of file
diff --git a/fuel/prototypes/libvirt/deploy/deploy.sh b/fuel/deploy/deploy.sh
index ba7f7cd..916125e 100755
--- a/fuel/prototypes/libvirt/deploy/deploy.sh
+++ b/fuel/deploy/deploy.sh
@@ -11,10 +11,10 @@
# Setup locations
topdir=$(cd `dirname $0`; pwd)
-exampledir=$(cd $topdir/../examples; pwd)
functions=${topdir}/functions
tmpdir=$HOME/fueltmp
deployiso=${tmpdir}/deploy.iso
+cloud_deploy=$(cd ${topdir}/cloud_deploy; pwd)
# Define common functions
. ${functions}/common.sh
@@ -41,7 +41,7 @@ fi
# Setup tmpdir
if [ -d $tmpdir ]; then
- rm -Rf $tmpdir || error_exit "Coul not remove tmpdir $tmpdir"
+ rm -Rf $tmpdir || error_exit "Could not remove tmpdir $tmpdir"
fi
mkdir $tmpdir || error_exit "Could not create tmpdir $tmpdir"
@@ -54,16 +54,16 @@ fi
# If no DEA specified, use the example one
if [ $# -eq 1 ]; then
- deafile=${exampledir}/libvirt_dea.yaml
+ deafile=${topdir}/dea.yaml
else
deafile=$(cd `dirname $2`; echo `pwd`/`basename $2`)
fi
+cp ${deafile} ${cloud_deploy}/
if [ ! -f $deafile ]; then
error-exit "Could not find DEA file $deafile"
fi
-
# Enable safety catch
echo "Enabling auto-kill if deployment exceeds $MAXDEPLOYTIME"
(sleep $MAXDEPLOYTIME; echo "Auto-kill of deploy after a timeout of $MAXDEPLOYTIME"; kill $$) &
@@ -73,11 +73,12 @@ killpid=$!
trap exit_handler exit
# Stop all VMs
-for node in controller1 controller2 controller3 compute4 compute5 fuel-master
+for node in `ls libvirt/vms`
do
virsh destroy $node >/dev/null 2>&1
done
+
# Install the Fuel master
# (Convert to functions at later stage)
echo "Patching iso file"
@@ -85,7 +86,8 @@ ${functions}/patch-iso.sh $isofile $deployiso $tmpdir || error_exit "Failed to p
# Swap isofiles from now on
isofile=$deployiso
. ${functions}/install_iso.sh
-. ${functions}/deploy_env.sh
+
+python ${cloud_deploy}/cloud_deploy.py
echo "Waiting for five minutes for deploy to stabilize"
sleep 5m
diff --git a/fuel/deploy/deploy_fuel.sh b/fuel/deploy/deploy_fuel.sh
deleted file mode 100755
index 8cb72b7..0000000
--- a/fuel/deploy/deploy_fuel.sh
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/bin/bash
-# Deploy in deployFuel has the "configure host-network,
-# install fuel, configure vm and start it" meaning
-set -o xtrace
-set -o errexit
-set -o nounset
-set -o pipefail
-
-if [ $# -ne 2 ]; then
- echo "Usage: $0 <iso-file> <interface>"
- exit 1
-fi
-
-readonly iso_file=$1
-readonly interface=$2
-readonly vm_name="fuel_opnfv"
-readonly ssh_fuel_vm="sshpass -p r00tme
- ssh -o UserKnownHostsFile=/dev/null
- -o StrictHostKeyChecking=no
- -q
- root@192.168.0.11"
-readonly RUN_INSTALL="${RUN_INSTALL:-false}"
-readonly DEV="${DEV:-false}"
-
-# poll is not real timeout, commands can take some undefined time to execute
-# it is a count of how many times to try while sleeping shortly
-# in between checks
-readonly poll_virtinstall=1800
-readonly poll_fuel_startup=1200
-readonly poll_deployment=2150
-readonly fuel_logfile="/var/log/puppet/bootstrap_admin_node.log"
-
-cat >$interface.xml <<EOF
-<network>
- <name>$interface</name>
- <forward dev='$interface' mode='bridge'>
- <interface dev='$interface'/>
- </forward>
-</network>
-EOF
-
-cleanup_previous_run() {
- echo "Cleaning up previous run"
- set +eu
- virsh net-destroy $interface > /dev/null 2>&1
- virsh net-undefine $interface > /dev/null 2>&1
- virsh destroy $vm_name > /dev/null 2>&1
- virsh undefine $vm_name > /dev/null 2>&1
- set -eu
-}
-
-create_disk_and_install() {
- rm -rf $vm_name.qcow2
- qemu-img create -f qcow2 -o preallocation=metadata $vm_name.qcow2 60G
- virt-install --connect=qemu:///system \
- --name=$vm_name \
- --network=network:$interface \
- --ram 2048 --vcpus=4,cores=2 --check-cpu --hvm \
- --disk path=$vm_name.qcow2,format=qcow2,device=disk,bus=virtio \
- --noautoconsole --vnc \
- --cdrom $iso_file
-}
-
-wait_for_virtinstall() {
- # Workaround for virt-install --wait which restarts vm
- # too fast too attach disk
- echo "Waiting for virt-install to finish..."
- set +eu
- stopped=false
- for i in $(seq 0 $poll_virtinstall); do
- virsh_out=`virsh list | grep "$vm_name"`
- if [ -z "$virsh_out" ]; then
- stopped=true
- break
- fi
- sleep 2
- done
- set -eu
-}
-
-wait_for_fuel_startup() {
- echo "Wait for fuel to start up..."
- for i in $(seq 0 $poll_fuel_startup); do
- sleep 2 && echo -n "$i "
- $ssh_fuel_vm grep complete $fuel_logfile &&
- echo "Fuel bootstrap is done, deployment should have started now" &&
- return 0
- done
- return 1
-}
-
-
-cleanup_previous_run
-virsh net-define $interface.xml
-virsh net-start $interface
-create_disk_and_install
-wait_for_virtinstall
-
-echo "Starting $vm_name after installation in 6s..." && sleep 6s
-set +eu
-
-virsh start $vm_name
-if ! wait_for_fuel_startup; then
- echo "Fuel failed to start up"
- exit 1
-fi
diff --git a/fuel/prototypes/libvirt/deploy/functions/common.sh b/fuel/deploy/functions/common.sh
index f6cceb4..f6cceb4 100755
--- a/fuel/prototypes/libvirt/deploy/functions/common.sh
+++ b/fuel/deploy/functions/common.sh
diff --git a/fuel/prototypes/libvirt/deploy/functions/install_iso.sh b/fuel/deploy/functions/install_iso.sh
index 0a92cd5..0a92cd5 100755
--- a/fuel/prototypes/libvirt/deploy/functions/install_iso.sh
+++ b/fuel/deploy/functions/install_iso.sh
diff --git a/fuel/prototypes/libvirt/deploy/functions/isolinux.cfg.patch b/fuel/deploy/functions/isolinux.cfg.patch
index 298a057..298a057 100644
--- a/fuel/prototypes/libvirt/deploy/functions/isolinux.cfg.patch
+++ b/fuel/deploy/functions/isolinux.cfg.patch
diff --git a/fuel/prototypes/libvirt/deploy/functions/ks.cfg.patch b/fuel/deploy/functions/ks.cfg.patch
index 1896957..1896957 100644
--- a/fuel/prototypes/libvirt/deploy/functions/ks.cfg.patch
+++ b/fuel/deploy/functions/ks.cfg.patch
diff --git a/fuel/prototypes/libvirt/deploy/functions/patch-iso.sh b/fuel/deploy/functions/patch-iso.sh
index 782737e..782737e 100755
--- a/fuel/prototypes/libvirt/deploy/functions/patch-iso.sh
+++ b/fuel/deploy/functions/patch-iso.sh
diff --git a/fuel/deploy/hardware_adapters/hp/hp_adapter.py b/fuel/deploy/hardware_adapters/hp/hp_adapter.py
deleted file mode 100644
index 7ce0dc9..0000000
--- a/fuel/deploy/hardware_adapters/hp/hp_adapter.py
+++ /dev/null
@@ -1,411 +0,0 @@
-import re
-import time
-from netaddr import EUI, mac_unix
-import logging
-
-from run_oa_command import RunOACommand
-
-
-LOG = logging.getLogger(__name__)
-out_hdlr = logging.FileHandler(__file__.split('.')[0] + '.log', mode='w')
-out_hdlr.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
-LOG.addHandler(out_hdlr)
-LOG.setLevel(logging.DEBUG)
-
-class HpAdapter(object):
-
- # Exception thrown at any kind of failure to get the requested
- # information.
- class NoInfoFoundError(Exception):
- pass
-
- # Totally failed to connect so a re-try with other HW should
- # be done. This exception should never escape this class.
- class InternalConnectError(Exception):
- pass
-
- # Format MAC so leading zeroes are displayed
- class mac_dhcp(mac_unix):
- word_fmt = "%.2x"
-
- def __init__(self, mgmt_ip, username, password):
- self.mgmt_ip = mgmt_ip
- self.username = username
- self.password = password
- self.oa_error_message = ''
-
- def get_blade_mac_addresses(self, shelf, blade):
-
- LOG.debug("Entering: get_mac_addr_hp(%d,%d)" % (shelf, blade))
- self.oa_error_message = ''
- oa = RunOACommand(self.mgmt_ip, self.username, self.password)
-
- LOG.debug("Connect to active OA for shelf %d" % shelf)
- try:
- res = oa.connect_to_active()
- except:
- raise self.InternalConnectError(oa.error_message)
- if res is None:
- raise self.InternalConnectError(oa.error_message)
- if not oa.connected():
- raise self.NoInfoFoundError(oa.error_message)
-
- cmd = ("show server info " + str(blade))
-
- LOG.debug("Send command to OA: %s" % cmd)
- try:
- serverinfo = oa.send_command(cmd)
- except:
- raise self.NoInfoFoundError(oa.error_message)
- finally:
- oa.close()
-
- (left, right) = self.find_mac(serverinfo, shelf, blade)
-
- left = EUI(left, dialect=self.mac_dhcp)
- right = EUI(right, dialect=self.mac_dhcp)
- return [str(left), str(right)]
-
- def get_blade_hardware_info(self, shelf, blade=None):
-
- if blade:
- LOG.debug("Entering: get_hp_info(%d,%d)" % (shelf, blade))
- else:
- LOG.debug("Entering: get_hp_info(%d)" % shelf)
-
- self.oa_error_message = ''
- oa = RunOACommand(self.mgmt_ip, self.username, self.password)
-
- LOG.debug("Connect to active OA for shelf %d" % shelf)
-
- try:
- res = oa.connect_to_active()
- except:
- self.oa_error_message = oa.error_message
- return None
- if res is None:
- self.oa_error_message = oa.error_message
- return None
- if not oa.connected():
- self.oa_error_message = oa.error_message
- return None
-
- # If no blade specified we're done we know this is an HP at this point
- if not blade:
- oa.close()
- return "HP"
-
- check = "show server info %d" % blade
- LOG.debug("Send command to OA: %s" % check)
- output = oa.send_command("%s" % check)
- oa.close()
-
- match = r"Product Name:\s+(.+)\Z"
- if re.search(match, str(output[:])) is None:
- self.oa_error_message = ("Blade %d in shelf %d does not exist\n"
- % (blade, shelf))
- return None
-
- for line in output:
- seobj = re.search(match, line)
- if seobj:
- return "HP %s" % seobj.group(1)
- return False
-
- def power_off_blades(self, shelf, blade_list):
- return self.set_state(shelf, 'locked', blade_list=blade_list)
-
- def power_on_blades(self, shelf, blade_list):
- return self.set_state(shelf, 'unlocked', blade_list=blade_list)
-
- def power_off_blade(self, shelf, blade):
- return self.set_state(shelf, 'locked', one_blade=blade)
-
- def power_on_blade(self, shelf, blade):
- return self.set_state(shelf, 'unlocked', one_blade=blade)
-
- def set_boot_order_blade(self, shelf, blade):
- return self.set_boot_order(shelf, one_blade=blade)
-
- def set_boot_order_blades(self, shelf, blade_list):
- return self.set_boot_order(shelf, blade_list=blade_list)
-
-
-
- # Search HP's OA server info for MAC for left and right control
- def find_mac(self, serverinfo, shelf, blade):
- left = False
- right = False
- for line in serverinfo:
- if ("No Server Blade Installed" in line or
- "Invalid Arguments" in line):
- raise self.NoInfoFoundError("Blade %d in shelf %d "
- "does not exist." % (blade, shelf))
- seobj = re.search(r"LOM1:1-a\s+([0-9A-F:]+)", line, re.I)
- if seobj:
- left = seobj.group(1)
- else:
- seobj = re.search(r"LOM1:2-a\s+([0-9A-F:]+)", line, re.I)
- if seobj:
- right = seobj.group(1)
- if left and right:
- return left, right
- raise self.NoInfoFoundError("Could not find MAC for blade %d "
- "in shelf %d." % (blade, shelf))
-
- # Do power on or off on all configured blades in shelf
- # Return None to indicate that no connection do OA succeeded,
- # Return False to indicate some connection to OA succeeded,
- # or config error
- # Return True to indicate that power state succesfully updated
- # state: locked, unlocked
- def set_state(self, shelf, state, one_blade=None, blade_list=None):
-
- if state not in ['locked', 'unlocked']:
- return None
-
- if one_blade:
- LOG.debug("Entering: set_state_hp(%d,%s,%d)" %
- (shelf, state, one_blade))
- else:
- LOG.debug("Entering: set_state_hp(%d,%s)" % (shelf, state))
-
- self.oa_error_message = ''
-
- oa = RunOACommand(self.mgmt_ip, self.username, self.password)
-
- LOG.debug("Connect to active OA for shelf %d" % shelf)
-
- try:
- res = oa.connect_to_active()
- except:
- self.oa_error_message = oa.error_message
- return None
- if res is None:
- self.oa_error_message = oa.error_message
- return None
- if not oa.connected():
- self.oa_error_message = oa.error_message
- return False
-
- if one_blade:
- blades = [one_blade]
- else:
- blades = sorted(blade_list)
-
- LOG.debug("Check if blades are present")
-
- check = "show server list"
-
- LOG.debug("Send command to OA: %s" % check)
- output = oa.send_command(check)
- first = True
- bladelist = ''
- for blade in blades:
- prog = re.compile(r"\s+" + str(blade) + r"\s+\[Absent\]",
- re.MULTILINE)
- if prog.search(str(output[:])) is not None:
- oa.close()
- self.oa_error_message = ("Blade %d in shelf %d "
- % (blade, shelf))
- if one_blade:
- self.oa_error_message += ("does not exist.\n"
- "Set state %s not performed.\n"
- % state)
- else:
- self.oa_error_message += (
- "specified but does not exist.\nSet "
- "state %s not performed on shelf %d\n"
- % (state, shelf))
- return False
- if not first:
- bladelist += ","
- else:
- first = False
- bladelist += str(blade)
-
- if blade_list:
- LOG.debug("All blades present")
-
- # Use leading upper case on On/Off so it can be reused in match
- extra = ""
- if state == "locked":
- powerstate = "Off"
- extra = "force"
- else:
- powerstate = "On"
-
- cmd = "power%s server %s" % (powerstate, bladelist)
-
- if extra != "":
- cmd += " %s" % extra
-
- LOG.debug("Send command to OA: %s" % cmd)
-
- try:
- oa.send_command(cmd)
- except:
- self.oa_error_message = oa.error_message
- oa.close()
- return False
-
- # Check that all blades reach the state which can take some time,
- # so re-try a couple of times
- LOG.debug("Check if state %s successfully set" % state)
- recheck = 2
- while True:
- LOG.debug("Send command to OA: %s" % check)
- try:
- output = oa.send_command(check)
- except:
- self.oa_error_message = oa.error_message
- oa.close()
- return False
- for blade in blades:
- match = (r"\s+" + str(blade) +
- r"\s+\w+\s+\w+.\w+.\w+.\w+\s+\w+\s+%s" %
- powerstate)
- prog = re.compile(match, re.MULTILINE)
- if prog.search(str(output[:])) is None:
- recheck -= 1
- if recheck >= 0:
- # Re-try
- time.sleep(3)
- break
- oa.close()
- self.oa_error_message = (
- "Could not set state %s on blade %d in shelf %d\n"
- % (state, one_blade, shelf))
- for line in output:
- self.oa_error_message += line
- return False
- else:
- # state reached for all blades, exit the infinite loop
- break
-
- if one_blade:
- LOG.debug("State %s successfully set on blade %d in shelf %d"
- % (state, one_blade, shelf))
- else:
- LOG.debug("State %s successfully set on blades %s in shelf %d"
- % (state, blade_list, shelf))
- oa.close()
- return True
-
- # Change boot order on all blades in shelf
- # Return None to indicate that no connection do OA succeeded,
- # Return False to indicate some connection to OA succeeded,
- # or config error,
- # Return True to indicate that boot order succesfully changed
- def set_boot_order(self, shelf, one_blade=None, blade_list=None):
-
- if one_blade:
- LOG.debug("Entering: set_bootorder_hp(%d,%d)" % (shelf, one_blade))
- else:
- LOG.debug("Entering: set_bootorder_hp(%d)" % shelf)
-
- self.oa_error_message = ''
-
- oa = RunOACommand(self.mgmt_ip, self.username, self.password)
-
- LOG.debug("Connect to active OA for shelf %d" % shelf)
-
- try:
- res = oa.connect_to_active()
- except:
- self.oa_error_message = oa.error_message
- return None
- if res is None:
- self.oa_error_message = oa.error_message
- return None
- if not oa.connected():
- self.oa_error_message = oa.error_message
- return False
-
- if one_blade:
- blades = [one_blade]
- else:
- blades = sorted(blade_list)
-
- LOG.debug("Check if blades are present")
-
- check = "show server list"
-
- LOG.debug("Send command to OA: %s" % check)
-
- output = oa.send_command(check)
- first = True
- bladelist = ''
- for blade in blades:
- prog = re.compile(r"\s+" + str(blade) + r"\s+\[Absent\]",
- re.MULTILINE)
- if prog.search(str(output[:])) is not None:
- oa.close()
- self.oa_error_message = ("Blade %d in shelf %d "
- % (blade, shelf))
- if one_blade:
- self.oa_error_message += (
- "does not exist.\nChange boot order not performed.\n")
- else:
- self.oa_error_message += (
- "specified but does not exist.\n"
- "Change boot order not performed on shelf %d\n"
- % shelf)
- return False
- if not first:
- bladelist += ','
- else:
- first = False
- bladelist += str(blade)
-
- if blade_list:
- LOG.debug("All blades present")
-
- # Boot origins are pushed so first set boot from hard disk, then PXE
- # NB! If we want to support boot from SD we must add USB to the "stack"
- cmd1 = "set server boot first hdd %s" % bladelist
- cmd2 = "set server boot first pxe %s" % bladelist
- for cmd in [cmd1, cmd2]:
-
- LOG.debug("Send command to OA: %s" % cmd)
- try:
- output = oa.send_command(cmd)
- except:
- self.oa_error_message = oa.error_message
- for line in output:
- self.oa_error_message += line
- oa.close()
- return False
-
- # Check that all blades got the correct boot order
- # Needs updating if USB is added
- LOG.debug("Check if boot order successfully set")
- match = (r"^.*Boot Order\):\',\s*\'(\\t)+PXE NIC 1\',\s*\'(\\t)"
- r"+Hard Drive")
- prog = re.compile(match)
- for blade in blades:
-
- check = "show server boot %d" % blade
-
- LOG.debug("Send command to OA: %s" % check)
- try:
- output = oa.send_command(check)
- except:
- self.oa_error_message = oa.error_message
- oa.close()
- return False
- if prog.search(str(output[:])) is None:
- oa.close()
- self.oa_error_message = ("Failed to set boot order on blade "
- "%d in shelf %d\n" % (blade, shelf))
- for line in output:
- self.oa_error_message += line
- return False
- LOG.debug("Boot order successfully set on blade %d in shelf %d"
- % (blade, shelf))
-
- if blade_list:
- LOG.debug("Boot order successfully set on all configured blades "
- "in shelf %d" % (shelf))
- oa.close()
- return True
diff --git a/fuel/deploy/hardware_adapters/hp/run_oa_command.py b/fuel/deploy/hardware_adapters/hp/run_oa_command.py
deleted file mode 100644
index 32135c3..0000000
--- a/fuel/deploy/hardware_adapters/hp/run_oa_command.py
+++ /dev/null
@@ -1,113 +0,0 @@
-import socket
-import paramiko
-import logging
-
-LOG = logging.getLogger(__name__)
-out_hdlr = logging.FileHandler(__file__.split('.')[0] + '.log', mode='w')
-out_hdlr.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
-LOG.addHandler(out_hdlr)
-LOG.setLevel(logging.DEBUG)
-
-class RunOACommand:
-
- def __init__(self, mgmt_ip, username, password):
- self.ssh = None
- self.mgmt_ip = mgmt_ip
- self.username = username
- self.password = password
- self.error_message = ""
-
- def connected(self):
- return self.ssh is not None
-
- def close(self):
- if self.connected():
- self.ssh.close()
- self.ssh = None
- self.error_message = ""
-
- def connect(self):
- LOG.info("Trying to connect to OA at %s" % self.mgmt_ip)
- try:
- self.ssh.connect(self.mgmt_ip,
- username=self.username,
- password=self.password,
- look_for_keys=False,
- allow_agent=False)
- return True
- except socket.error, (err, message):
- self.error_message += ("Can not talk to OA %s: %s\n" %
- (self.mgmt_ip, message))
- except Exception as e:
- self.error_message += ("Can not talk to OA %s: %s\n" %
- (self.mgmt_ip, e.args))
- LOG.error("Failed to connect to OA at %s" % self.mgmt_ip)
- return False
-
- # Return None if this most likely is not an OA
- # False if we failed to connect to an active OA
- # True if connected
- def connect_to_active(self):
- self.error_message = "OA connect failed with these errors:\n"
-
- self.ssh = paramiko.SSHClient()
- self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
-
- initial_mgmt_ip = self.mgmt_ip
- if not self.connect(self.mgmt_ip, self.username, self.password):
- octets = self.mgmt_ip.split(".")
- self.mgmt_ip = "%s.%s.%s.%s" % (octets[0],
- octets[1],
- octets[2],
- str(int(octets[3]) + 1))
- if not self.connect(self.mgmt_ip, self.username, self.password):
- self.ssh = None
- LOG.error("Failed to connect to OA at %s (and %s)" %
- (initial_mgmt_ip, self.mgmt_ip))
- return None
-
- output = self.send_command("show oa status")
- for line in output:
- if "Standby" in line:
- self.ssh.close()
- self.error_message += (
- "%s is the standby OA, trying next OA\n" % self.mgmt_ip)
- LOG.info("%s is the standby OA" % self.mgmt_ip)
- if self.mgmt_ip != initial_mgmt_ip:
- self.error_message += (
- "Can only talk to OA %s which is the standby OA\n" %
- self.mgmt_ip)
- self.ssh = None
- return False
- else:
- octets = self.mgmt_ip.split(".")
- self.mgmt_ip = "%s.%s.%s.%s" % (octets[0],
- octets[1],
- octets[2],
- str(int(octets[3]) + 1))
- if not self.connect(self.mgmt_ip, self.username,
- self.password):
- self.ssh = None
- return False
- LOG.info("Connected to active OA at %s" % self.mgmt_ip)
- self.error_message = ""
- return True
-
- def send_command(self, cmd):
- if not self.connected():
- self.error_message = (
- "Not connected, cannot send command %s\n" % (cmd))
- raise
-
- LOG.info('Sending "%s" to %s' % (cmd, self.mgmt_ip))
- stdin, stdout, stderr = self.ssh.exec_command(cmd)
- output = []
- for line in stdout.read().splitlines():
- if line != '':
- output.append(line)
- return output
-
- def __exit__(self, type, value, traceback):
- if self.connected():
- self.close()
- self.ssh = None \ No newline at end of file
diff --git a/fuel/prototypes/libvirt/examples/networks/fuel1 b/fuel/deploy/libvirt/networks/fuel1
index 7b2b154..7b2b154 100644
--- a/fuel/prototypes/libvirt/examples/networks/fuel1
+++ b/fuel/deploy/libvirt/networks/fuel1
diff --git a/fuel/prototypes/libvirt/examples/networks/fuel2 b/fuel/deploy/libvirt/networks/fuel2
index 615c920..615c920 100644
--- a/fuel/prototypes/libvirt/examples/networks/fuel2
+++ b/fuel/deploy/libvirt/networks/fuel2
diff --git a/fuel/prototypes/libvirt/examples/networks/fuel3 b/fuel/deploy/libvirt/networks/fuel3
index 2383e6c..2383e6c 100644
--- a/fuel/prototypes/libvirt/examples/networks/fuel3
+++ b/fuel/deploy/libvirt/networks/fuel3
diff --git a/fuel/prototypes/libvirt/examples/networks/fuel4 b/fuel/deploy/libvirt/networks/fuel4
index 5b69f91..5b69f91 100644
--- a/fuel/prototypes/libvirt/examples/networks/fuel4
+++ b/fuel/deploy/libvirt/networks/fuel4
diff --git a/fuel/prototypes/libvirt/examples/vms/fuel-master b/fuel/deploy/libvirt/vms/fuel-master
index 1b2d86f..1b2d86f 100644
--- a/fuel/prototypes/libvirt/examples/vms/fuel-master
+++ b/fuel/deploy/libvirt/vms/fuel-master
diff --git a/fuel/prototypes/libvirt/examples/vms/controller2 b/fuel/deploy/libvirt/vms/s1_b1
index 63ad86a..a879163 100644
--- a/fuel/prototypes/libvirt/examples/vms/controller2
+++ b/fuel/deploy/libvirt/vms/s1_b1
@@ -1,5 +1,5 @@
<domain type='kvm'>
- <name>controller2</name>
+ <name>s1_b1</name>
<memory unit='KiB'>2097152</memory>
<currentMemory unit='KiB'>2097152</currentMemory>
<vcpu placement='static'>2</vcpu>
diff --git a/fuel/prototypes/libvirt/examples/vms/controller3 b/fuel/deploy/libvirt/vms/s1_b2
index 7c64a9d..27eebcf 100644
--- a/fuel/prototypes/libvirt/examples/vms/controller3
+++ b/fuel/deploy/libvirt/vms/s1_b2
@@ -1,5 +1,5 @@
<domain type='kvm'>
- <name>controller3</name>
+ <name>s1_b2</name>
<memory unit='KiB'>2097152</memory>
<currentMemory unit='KiB'>2097152</currentMemory>
<vcpu placement='static'>2</vcpu>
diff --git a/fuel/deploy/libvirt/vms/s1_b3 b/fuel/deploy/libvirt/vms/s1_b3
new file mode 100644
index 0000000..37a4d2f
--- /dev/null
+++ b/fuel/deploy/libvirt/vms/s1_b3
@@ -0,0 +1,100 @@
+<domain type='kvm'>
+ <name>s1_b3</name>
+ <memory unit='KiB'>2097152</memory>
+ <currentMemory unit='KiB'>2097152</currentMemory>
+ <vcpu placement='static'>2</vcpu>
+ <os>
+ <type arch='x86_64' machine='pc-1.0'>hvm</type>
+ <boot dev='network'/>
+ <boot dev='hd'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <cpu mode='custom' match='exact'>
+ <model fallback='allow'>SandyBridge</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='vme'/>
+ <feature policy='require' name='dtes64'/>
+ <feature policy='require' name='vmx'/>
+ <feature policy='require' name='erms'/>
+ <feature policy='require' name='xtpr'/>
+ <feature policy='require' name='smep'/>
+ <feature policy='require' name='pcid'/>
+ <feature policy='require' name='est'/>
+ <feature policy='require' name='monitor'/>
+ <feature policy='require' name='smx'/>
+ <feature policy='require' name='tm'/>
+ <feature policy='require' name='acpi'/>
+ <feature policy='require' name='osxsave'/>
+ <feature policy='require' name='ht'/>
+ <feature policy='require' name='pdcm'/>
+ <feature policy='require' name='fsgsbase'/>
+ <feature policy='require' name='f16c'/>
+ <feature policy='require' name='ds'/>
+ <feature policy='require' name='tm2'/>
+ <feature policy='require' name='ss'/>
+ <feature policy='require' name='pbe'/>
+ <feature policy='require' name='ds_cpl'/>
+ <feature policy='require' name='rdrand'/>
+ </cpu>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>/usr/bin/kvm</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='raw'/>
+ <source file='disk.raw'/>
+ <target dev='vda' bus='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+ </disk>
+ <controller type='usb' index='0'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
+ </controller>
+ <controller type='pci' index='0' model='pci-root'/>
+ <interface type='network'>
+ <source network='fuel1'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
+ </interface>
+ <interface type='network'>
+ <source network='fuel2'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
+ </interface>
+ <interface type='network'>
+ <source network='fuel3'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
+ </interface>
+ <interface type='network'>
+ <source network='fuel4'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>
+ </interface>
+ <serial type='pty'>
+ <target port='0'/>
+ </serial>
+ <console type='pty'>
+ <target type='serial' port='0'/>
+ </console>
+ <input type='mouse' bus='ps2'/>
+ <input type='keyboard' bus='ps2'/>
+ <graphics type='vnc' port='-1' autoport='yes' keymap='sv'/>
+ <sound model='ich6'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+ </sound>
+ <video>
+ <model type='cirrus' vram='9216' heads='1'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+ </video>
+ <memballoon model='virtio'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
+ </memballoon>
+ </devices>
+</domain>
+
diff --git a/fuel/deploy/libvirt/vms/s1_b4 b/fuel/deploy/libvirt/vms/s1_b4
new file mode 100644
index 0000000..97384ba
--- /dev/null
+++ b/fuel/deploy/libvirt/vms/s1_b4
@@ -0,0 +1,101 @@
+<domain type='kvm'>
+ <name>s1_b4</name>
+ <memory unit='KiB'>8388608</memory>
+ <currentMemory unit='KiB'>8388608</currentMemory>
+ <vcpu placement='static'>2</vcpu>
+ <os>
+ <type arch='x86_64' machine='pc-1.0'>hvm</type>
+ <boot dev='network'/>
+ <boot dev='hd'/>
+ <bootmenu enable='yes'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <cpu mode='custom' match='exact'>
+ <model fallback='allow'>SandyBridge</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='vme'/>
+ <feature policy='require' name='dtes64'/>
+ <feature policy='require' name='vmx'/>
+ <feature policy='require' name='erms'/>
+ <feature policy='require' name='xtpr'/>
+ <feature policy='require' name='smep'/>
+ <feature policy='require' name='pcid'/>
+ <feature policy='require' name='est'/>
+ <feature policy='require' name='monitor'/>
+ <feature policy='require' name='smx'/>
+ <feature policy='require' name='tm'/>
+ <feature policy='require' name='acpi'/>
+ <feature policy='require' name='osxsave'/>
+ <feature policy='require' name='ht'/>
+ <feature policy='require' name='pdcm'/>
+ <feature policy='require' name='fsgsbase'/>
+ <feature policy='require' name='f16c'/>
+ <feature policy='require' name='ds'/>
+ <feature policy='require' name='tm2'/>
+ <feature policy='require' name='ss'/>
+ <feature policy='require' name='pbe'/>
+ <feature policy='require' name='ds_cpl'/>
+ <feature policy='require' name='rdrand'/>
+ </cpu>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>/usr/bin/kvm</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='raw'/>
+ <source file='disk.raw'/>
+ <target dev='vda' bus='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+ </disk>
+ <controller type='usb' index='0'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
+ </controller>
+ <controller type='pci' index='0' model='pci-root'/>
+ <interface type='network'>
+ <source network='fuel1'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
+ </interface>
+ <interface type='network'>
+ <source network='fuel2'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
+ </interface>
+ <interface type='network'>
+ <source network='fuel3'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
+ </interface>
+ <interface type='network'>
+ <source network='fuel4'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>
+ </interface>
+ <serial type='pty'>
+ <target port='0'/>
+ </serial>
+ <console type='pty'>
+ <target type='serial' port='0'/>
+ </console>
+ <input type='mouse' bus='ps2'/>
+ <input type='keyboard' bus='ps2'/>
+ <graphics type='vnc' port='-1' autoport='yes' keymap='sv'/>
+ <sound model='ich6'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+ </sound>
+ <video>
+ <model type='cirrus' vram='9216' heads='1'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+ </video>
+ <memballoon model='virtio'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
+ </memballoon>
+ </devices>
+</domain>
+
diff --git a/fuel/deploy/libvirt/vms/s1_b5 b/fuel/deploy/libvirt/vms/s1_b5
new file mode 100644
index 0000000..97218c3
--- /dev/null
+++ b/fuel/deploy/libvirt/vms/s1_b5
@@ -0,0 +1,100 @@
+<domain type='kvm'>
+ <name>s1_b5</name>
+ <memory unit='KiB'>8388608</memory>
+ <currentMemory unit='KiB'>8388608</currentMemory>
+ <vcpu placement='static'>2</vcpu>
+ <os>
+ <type arch='x86_64' machine='pc-1.0'>hvm</type>
+ <boot dev='network'/>
+ <boot dev='hd'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <cpu mode='custom' match='exact'>
+ <model fallback='allow'>SandyBridge</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='vme'/>
+ <feature policy='require' name='dtes64'/>
+ <feature policy='require' name='vmx'/>
+ <feature policy='require' name='erms'/>
+ <feature policy='require' name='xtpr'/>
+ <feature policy='require' name='smep'/>
+ <feature policy='require' name='pcid'/>
+ <feature policy='require' name='est'/>
+ <feature policy='require' name='monitor'/>
+ <feature policy='require' name='smx'/>
+ <feature policy='require' name='tm'/>
+ <feature policy='require' name='acpi'/>
+ <feature policy='require' name='osxsave'/>
+ <feature policy='require' name='ht'/>
+ <feature policy='require' name='pdcm'/>
+ <feature policy='require' name='fsgsbase'/>
+ <feature policy='require' name='f16c'/>
+ <feature policy='require' name='ds'/>
+ <feature policy='require' name='tm2'/>
+ <feature policy='require' name='ss'/>
+ <feature policy='require' name='pbe'/>
+ <feature policy='require' name='ds_cpl'/>
+ <feature policy='require' name='rdrand'/>
+ </cpu>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>/usr/bin/kvm</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='raw'/>
+ <source file='disk.raw'/>
+ <target dev='vda' bus='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+ </disk>
+ <controller type='usb' index='0'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
+ </controller>
+ <controller type='pci' index='0' model='pci-root'/>
+ <interface type='network'>
+ <source network='fuel1'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
+ </interface>
+ <interface type='network'>
+ <source network='fuel2'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
+ </interface>
+ <interface type='network'>
+ <source network='fuel3'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
+ </interface>
+ <interface type='network'>
+ <source network='fuel4'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>
+ </interface>
+ <serial type='pty'>
+ <target port='0'/>
+ </serial>
+ <console type='pty'>
+ <target type='serial' port='0'/>
+ </console>
+ <input type='mouse' bus='ps2'/>
+ <input type='keyboard' bus='ps2'/>
+ <graphics type='vnc' port='-1' autoport='yes' keymap='sv'/>
+ <sound model='ich6'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+ </sound>
+ <video>
+ <model type='cirrus' vram='9216' heads='1'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+ </video>
+ <memballoon model='virtio'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
+ </memballoon>
+ </devices>
+</domain>
+
diff --git a/fuel/deploy/libvirt/vms/s1_b6 b/fuel/deploy/libvirt/vms/s1_b6
new file mode 100644
index 0000000..0cd3028
--- /dev/null
+++ b/fuel/deploy/libvirt/vms/s1_b6
@@ -0,0 +1,100 @@
+<domain type='kvm'>
+ <name>s1_b6</name>
+ <memory unit='KiB'>8388608</memory>
+ <currentMemory unit='KiB'>8388608</currentMemory>
+ <vcpu placement='static'>2</vcpu>
+ <os>
+ <type arch='x86_64' machine='pc-1.0'>hvm</type>
+ <boot dev='network'/>
+ <boot dev='hd'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <cpu mode='custom' match='exact'>
+ <model fallback='allow'>SandyBridge</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='vme'/>
+ <feature policy='require' name='dtes64'/>
+ <feature policy='require' name='vmx'/>
+ <feature policy='require' name='erms'/>
+ <feature policy='require' name='xtpr'/>
+ <feature policy='require' name='smep'/>
+ <feature policy='require' name='pcid'/>
+ <feature policy='require' name='est'/>
+ <feature policy='require' name='monitor'/>
+ <feature policy='require' name='smx'/>
+ <feature policy='require' name='tm'/>
+ <feature policy='require' name='acpi'/>
+ <feature policy='require' name='osxsave'/>
+ <feature policy='require' name='ht'/>
+ <feature policy='require' name='pdcm'/>
+ <feature policy='require' name='fsgsbase'/>
+ <feature policy='require' name='f16c'/>
+ <feature policy='require' name='ds'/>
+ <feature policy='require' name='tm2'/>
+ <feature policy='require' name='ss'/>
+ <feature policy='require' name='pbe'/>
+ <feature policy='require' name='ds_cpl'/>
+ <feature policy='require' name='rdrand'/>
+ </cpu>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>/usr/bin/kvm</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='raw'/>
+ <source file='disk.raw'/>
+ <target dev='vda' bus='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+ </disk>
+ <controller type='usb' index='0'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
+ </controller>
+ <controller type='pci' index='0' model='pci-root'/>
+ <interface type='network'>
+ <source network='fuel1'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
+ </interface>
+ <interface type='network'>
+ <source network='fuel2'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
+ </interface>
+ <interface type='network'>
+ <source network='fuel3'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
+ </interface>
+ <interface type='network'>
+ <source network='fuel4'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>
+ </interface>
+ <serial type='pty'>
+ <target port='0'/>
+ </serial>
+ <console type='pty'>
+ <target type='serial' port='0'/>
+ </console>
+ <input type='mouse' bus='ps2'/>
+ <input type='keyboard' bus='ps2'/>
+ <graphics type='vnc' port='-1' autoport='yes' keymap='sv'/>
+ <sound model='ich6'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+ </sound>
+ <video>
+ <model type='cirrus' vram='9216' heads='1'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+ </video>
+ <memballoon model='virtio'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
+ </memballoon>
+ </devices>
+</domain>
+
diff --git a/fuel/prototypes/libvirt/setup_vms/apply_setup.sh b/fuel/deploy/setup_vms/apply_setup.sh
index 1598d66..b38cf5d 100755
--- a/fuel/prototypes/libvirt/setup_vms/apply_setup.sh
+++ b/fuel/deploy/setup_vms/apply_setup.sh
@@ -14,8 +14,8 @@ error_exit () {
exit 1
}
-netdir='../examples/networks'
-vmdir='../examples/vms'
+netdir='../libvirt/networks'
+vmdir='../libvirt/vms'
tmpfile=/tmp/foo
if [ ! -d $netdir ]; then
diff --git a/fuel/docs/src/tmp/BUILD/README.examples b/fuel/deploy/setup_vms/setup-vm-host.sh
index 4629763..fd469e6 100644..100755
--- a/fuel/docs/src/tmp/BUILD/README.examples
+++ b/fuel/deploy/setup_vms/setup-vm-host.sh
@@ -1,3 +1,4 @@
+#!/bin/bash
##############################################################################
# Copyright (c) 2015 Ericsson AB and others.
# stefan.k.berg@ericsson.com
@@ -8,3 +9,9 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+# Tools for installation on the libvirt server/base host
+#
+apt-get install -y libvirt-bin qemu-kvm tightvncserver virt-manager \
+ sshpass fuseiso genisoimage blackbox xterm python-yaml python-netaddr \
+ python-paramiko python-lxml
+restart libvirt-bin
diff --git a/fuel/docs/src/build-instructions.rst b/fuel/docs/src/build-instructions.rst
new file mode 100644
index 0000000..e923cab
--- /dev/null
+++ b/fuel/docs/src/build-instructions.rst
@@ -0,0 +1,177 @@
+:Authors: Jonas Bjurel (Ericsson)
+:Version: 0.1.0
+
+================================================================
+OPNFV Build instructions for - < Component denomination >
+================================================================
+
+Abstract
+========
+
+This document describes how to build <Component>, build system dependencies and required system resources.
+
+License
+=======
+Fuel@OPNFV DOCs (c) by Jonas Bjurel (Ericsson AB)
+
+Fuel@OPNFV DOCs (c) is licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
+
+
+**Contents**
+
+1 Version history
+
+2 Introduction
+
+3 Requirements
+
+4 Building
+
+5 Artifacts
+
+
+1 Version history
+===================
+
++--------------------+--------------------+--------------------+--------------------+
+| **Date** | **Ver.** | **Author** | **Comment** |
+| | | | |
++--------------------+--------------------+--------------------+--------------------+
+| 2015-04-23 | 0.1.0 | Jonas Bjurel | First draft |
+| | | | |
++--------------------+--------------------+--------------------+--------------------+
+
+2 Introduction
+================
+
+This document describes build system used to build Fuel@OPNFV, required dependencies and minimum requirements on the host to be used for the buildsystem.
+
+The Fuel build system is desigened around Docker containers such that dependencies outside of the build system can be kept to a minimum. It also shields the host from any potential dangerous operations performed by the build system.
+
+The audience of this document is assumed to have good knowledge in network and Unix/Linux administration.
+
+3 Requirements
+================
+
+3.1 Minimum Hardware Requirements
+---------------------------------
+
+- An x86_64 host (Bare-metal or VM) with Ubuntu 14.04 LTS installed
+
+- ~30 GB available disc
+
+- 4 GB RAM
+
+3.2 Minimum Software Requirements
+---------------------------------
+
+The build host should run Ubuntu 14.04 operating system.
+
+On the host, the following packages must be installed:
+
+- docker - see https://docs.docker.com/installation/ubuntulinux/ for installation notes for Ubuntu 14.04. Note: only use the Ubuntu stock distro of Docker (docker.io)
+
+- git (simply available through sudo apt-get install git)
+
+- make (simply available through sudo apt-get install make)
+
+- curl (simply available through sudo apt-get install curl)
+
+3.3 Preparations
+----------------
+
+3.3.1 Setting up the Docker build container
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+After having installed Docker, add yourself to the docker group:
+
+<usermod -a -G docker [userid]>
+
+Also make sure to define relevant DNS servers part of the global dns chain in
+in your </etc/default/docker> configuration file, eg.
+
+<DOCKER_OPTS=" --dns=8.8.8.8 --dns=8.8.8.4">
+
+Then restart docker:
+
+<sudo service docker.io restart>
+
+3.3.2 Setting up OPNFV Gerrit in order to being able to clone the code
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+- Start setting up OPNFV gerrit by creating a SSH key (unless you don't already have one), create one with ssh-keygen
+
+- Add your generated public key in OPNFV Gerrit <https://gerrit.opnfv.org/>
+ (this requires a linuxfoundation account, create one if you do not already have one)
+
+- Select "SSH Public Keys" to the left and then "Add Key" and paste your public key in.
+
+3.3.3 Clone the OPNFV code git repository
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Now it is time to clone the code repository:
+
+<git clone ssh://[Linux foundation user]@gerrit.opnfv.org:29418/genesis>
+
+Now you should have the OPNFV genesis repository with Fuel@OPNFV stored locally on your build host.
+
+4 Building
+============
+
+There are two methods available for building Fuel@OPNFV:
+
+- A low level method using Make
+
+- An abstracted method using build.sh
+
+4.1 Configure your build environment
+-------------------------------------
+
+Select the versions of the components you want to build by editing the fuel/build/config.mk file.
+Note if you want to build with OpenDaylight SDN controller you need to uncomment the lines starting
+with odl-main and java-main
+
+4.2 Low level build method using make
+--------------------------------------
+The low level method is based on Make:
+
+From the <fuel/build directory> invoke <make [target]>
+
+Following targets exist:
+
+- none/all - this will:
+
+ - If not allready existing, initialize the docker build environment
+
+ - If not already done, build OpenDaylight from upstream (as defined by fuel-build config-spec)
+
+ - If not already done, build fuel from upstream (as defined by fuel-build/config-spec)
+
+ - Build the defined additions to fuel (as defined by the structure of this framework)
+
+ - Apply changes and patches to fuel (as defined by the structure of this framework)
+
+ - Reconstruct a fuel .iso image
+
+- clean - this will remove all artifacts from earlier builds.
+
+If the build is successful, you will find the generated ISO file in the <fuel/build/release> subdirectory!
+
+4.3 Abstracted build method using build.sh
+===========================================
+The abstracted build method useses the <fuel/ci/build.sh> script which allows you to:
+
+- Create and use a build cache - significantly speeding up the buildtime if upstream repositories have not changed.
+
+- push/pull cache and artifacts to an arbitrary URI (http(s):, file:, ftp:)
+
+For more info type <fuel/ci/build.sh -h>.
+
+5 Artifacts
+=============
+
+The artifacts produced are:
+
+- <OPNFV_XXXX.iso> - Which represents the bootable Fuel@OPNFV image, XXXX is replaced with the build identity provided to the build system
+
+- <OPNFV_XXXX.iso.txt> - Which holds version metadata.
+
+6 References
+=============
diff --git a/fuel/docs/src/installation-instructions.rst b/fuel/docs/src/installation-instructions.rst
index 234dfc2..05f56f3 100644
--- a/fuel/docs/src/installation-instructions.rst
+++ b/fuel/docs/src/installation-instructions.rst
@@ -1,5 +1,5 @@
:Authors: Jonas Bjurel (Ericsson AB)
-:Version: 0.0.1
+:Version: 0.0.2
================================================
OPNFV Installation instructions for - Fuel@OPNFV
@@ -14,7 +14,7 @@ License
=======
Fuel@OPNFV DOCs (c) by Jonas Bjurel (Ericsson AB)
-Fuel@OPNFV DOCs are licensed under a Creative Commons Attribution 4.0 Unported License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
+Fuel@OPNFV DOCs are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
**Contents**
@@ -47,6 +47,9 @@ Fuel@OPNFV DOCs are licensed under a Creative Commons Attribution 4.0 Unported L
| 2015-04-15 | 0.0.1 | Jonas Bjurel | First draft |
| | | (Ericsson AB) | |
+--------------------+--------------------+--------------------+--------------------+
+| 2015-04-23 | 0.0.2 | Jonas Bjurel | Minor changes |
+| | | (Ericsson AB) | |
++--------------------+--------------------+--------------------+--------------------+
2 Introduction
@@ -231,8 +234,8 @@ This section describes the installation of the Fuel@OPNFV installation server (F
14. Select network mode.
- - Select Neutron with VLAN segmentation
-
+ - Select Neutron with VLAN segmentation
+
** Note: This will later be overridden to VXLAN by OpenDaylight.**
15. Select Storage Backends.
@@ -311,13 +314,13 @@ This section describes the installation of the Fuel@OPNFV installation server (F
31. Assign roles.
- Check <Controller and Telemetry MongoDB>.
-
+
- Check the three servers you want to be installed as Controllesr in pane <Assign Role>.
- Click <Apply Changes>.
-
+
- Check <Compute>.
-
+
- Check nodes to be installed as Compute nodes in pane Assign Role.
- Click <Apply Changes>.
@@ -327,7 +330,7 @@ This section describes the installation of the Fuel@OPNFV installation server (F
- Check Select <All> to select all nodes with Control, Telemetry . MongoDB and Compute node roles.
- Click <Configure Interfaces>
-
+
- Screen Configure interfaces on number of <number of nodes> nodes is shown.
- Assign interfaces (bonded) for mgmt-, admin-, private-, public- and storage networks
diff --git a/fuel/docs/src/release-notes.rst b/fuel/docs/src/release-notes.rst
index 467de6e..221b7f7 100644
--- a/fuel/docs/src/release-notes.rst
+++ b/fuel/docs/src/release-notes.rst
@@ -1,5 +1,5 @@
:Authors: Jonas Bjurel (Ericsson)
-:Version: 0.1
+:Version: 0.2
================================================================
OPNFV Release Note for "Arno-RC2 release candidate" - Fuel@OPNFV
@@ -14,7 +14,7 @@ License
=======
Fuel@OPNFV DOCs (c) by Jonas Bjurel (Ericsson AB)
-Fuel@OPNFV DOCs are licensed under a Creative Commons Attribution 4.0 Unported License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
+Fuel@OPNFV DOCs are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
**Contents**
@@ -40,7 +40,10 @@ Fuel@OPNFV DOCs are licensed under a Creative Commons Attribution 4.0 Unported L
| **Date** | **Ver.** | **Author** | **Comment** |
| | | | |
+--------------------+--------------------+--------------------+--------------------+
-| 2015-04-16 | 0.1.0 | Jonas Bjurel | First draft |
+| 2015-04-16 | 0.1 | Jonas Bjurel | First draft |
+| | | | |
++--------------------+--------------------+--------------------+--------------------+
+| 2015-04-23 | 0.2 | Jonas Bjurel | Minor change |
| | | | |
+--------------------+--------------------+--------------------+--------------------+
diff --git a/fuel/docs/src/tmp/BUILD/README.build b/fuel/docs/src/tmp/BUILD/README.build
deleted file mode 100644
index b8ce799..0000000
--- a/fuel/docs/src/tmp/BUILD/README.build
+++ /dev/null
@@ -1,98 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-How to setup and use the OPNFV/BGS/FUEL build system
-====================================================
-
-1. Build system requirements
-----------------------------
-1.1 Host HW/VM requirements
----------------------------
-- An x86_64 host (real iron or VM) with Ubuntu 14.04 LTS installed
-- ~30 GB available disc
-- 4 GB RAM
-
-1.2 Host SW dependencies
-------------------------
-The host should run Ubuntu 14.04
-On the host, the following packages must be installed:
-- docker - see https://docs.docker.com/installation/ubuntulinux/ for installation notes
-for Ubuntu 14.04. Note: only use the Ubuntu stock distro of Docker (docker.io)
-- git (simply available through apt-get install git)
-- make (simply available through apt-get install make)
-- curl
-
-2 Setting up the Docker build container
-----------------------------------------
-When having installed Docker, add yourself to the docker group:
-usermod -a -G docker <userid>
-
-Also make sure to define rellevant dns servers part of the global dns chain in
-in your /etc/default/docker config file, eg.
-DOCKER_OPTS=" --dns=8.8.8.8 --dns=8.8.8.4"
-
-Then restart docker with "service docker.io restart".:
-
-3. Setting up OPNFV Gerrit in order to being able to clone the code
--------------------------------------------------------------------
-- Start by creating a SSH key, if you don't already have one, create one with ssh-keygen
-
-- Add your generated public key in OPNFV Gerrit -https://gerrit.opnfv.org/
- (this requires a linuxfoundation account, create one if you do not already have one)
-
-- Select "SSH Public Keys" to the left and then "Add Key" and paste your public key in.
-
-4. Clone the OPNFV code git repository
---------------------------------------
- So now we're coming to the fun part! Let's clone the code repository:
- git clone ssh://<Linux foundation user>@gerrit.opnfv.org:29418/genesis
-
- Change the directory to "fuel-build"
- cd fuel-build/
-
-5. Set up (build) your build environment
-----------------------------------------
-Select the versions of the components you want to build by editing the
-fuel-build/config-spec file. Note if you want to build with OpenDaylight
-SDN controller you need to uncomment the lines starting with odl-main and
-java-main
-
-6. Build your fuel .iso image
------------------------------
-That's it, time to build!
-make [all]
-
-This will:
-- If not allready existing, initialize the docker build environment
-- If not already done, build OpenDaylight from upstream (as defined by fuel-build/
- config-spec)
-- If not already done, build fuel from upstream (as defined by fuel-build/config-spec)
-- Build the defined additions to fuel (as defined by the structure of this framework)
-- Apply changes and patches to fuel (as defined by the structure of this framework)
-- Reconstruct a fuel .iso image
-
-If the build is successful, you will find the generated ISO file in the release
-subdirectory!
-
-NOTE: the build of the baseline for Fuel and odl are cached, if build results reffering
-to the same versions as specified in fuel-build/config spec has been built before, these
-packages will not be rebuilt. The cache is cleared by "make clean"
-
-7. Install your stack
----------------------
-Please see DOC/INSTALL/README
-
-NOTES and TODO:
----------------
-The build system will for now partly run as sudo
-BUT NOTE, NONE OF THE MAKEFILES OR SCRIPTS SHALL BE EXECUTED WITH
-SUDO/ROOT PRIVILEDGES!
-TODO: Change the scripts so that no root priviledges will be needed
-
diff --git a/fuel/prototypes/deploy/README.rst b/fuel/prototypes/deploy/README.rst
new file mode 100644
index 0000000..ad77583
--- /dev/null
+++ b/fuel/prototypes/deploy/README.rst
@@ -0,0 +1,21 @@
+** DEA/DHA deployment prototype**
+
+This is a continuation of the specific libvirt deployment prototype into a generic concept supporting a hardware plugin architecture in the deployment engine.
+
+Conceptually the deployer contains of a number of entities:
+
+* The main deployment engine, deploy.sh. The deploy script needs three pieces of information:
+ * The ISO file to deploy
+ * The dea.yaml file describing the Fuel deployment
+ * The dha.yaml file describing the hardware configuration
+* The Deployment Hardware Adapters (one per support hardware type). The adapter is an implementation of the DHA API for a specific hardware.
+* The Deployment Hardware Adapter configuration (dha.yaml). The DHA configuration specifies the hardware configuration in terms of number of nodes and includes both general properties and specific information for the hardware adapter (such as IPMI configuration, libvirt VM names etc).
+* The Deployment Environment Adapter configuration (dea.yaml). The DEA configuration describes an actual Fuel deployment, complete with network settings, node roles, interface configurations and more. The nodes identities in the dea.yaml must line up with those in the dha.yaml.
+
+Both the dea.yaml and dha.yaml can be created from an existing Fuel deployment, in a way making a xerox copy of it for re-deployment. For this, the create_templates structure is copied to the Fuel master and the create_templates.sh is run there.
+
+In the examples directory, VM and network definitions for libvirt together with matching dea.yaml and dha.yaml can be found. The DEA configuration is made using a opnfv-59 deployment.
+
+The details and API description for DEA and DHA can be found in the documentation directory.
+
+See the README in examples to learn how to get a libvirt Fuel deploy up and running!
diff --git a/fuel/prototypes/deploy/TODO.txt b/fuel/prototypes/deploy/TODO.txt
new file mode 100644
index 0000000..3dcdfb7
--- /dev/null
+++ b/fuel/prototypes/deploy/TODO.txt
@@ -0,0 +1,34 @@
+*** FIXMEs can automatically be extracted from the code by running
+*** list_fixmes.sh!
+
+In transplant2.sh, grafting can not operate on the DEA node ID but
+must consider the DHA MAC address instead. It will work as long as
+the interface setup on all nodes is identical or if the powerOnStrategy
+of the dha.yaml is sequential but it needs to be fixed down the road.
+
+Fix the NTP server pointer on computes (currently fixed, needs to be
+calculated - OPNFV clause). Alternatively, add data to feed into
+pre-deploy in the dea.yaml. Or... Add the information requested from
+the script into settings.yaml instead and make the pre-deploy script
+only "refine" these settings?
+
+Move the network transformations out from the two compute: and
+controller: clauses and add them to the individual nodes as well?
+Potentially needed if e.g. a Cinder node is a separate node type!
+
+The release is currently hardcoded to Ubuntu, make in general.
+
+Investigate how to reliable detect that the kickstart has started - is
+it possible to set the IP even when installation is made by cdrom and
+trigger on that? See FIXME in install_iso.sh.
+
+The Fuel gateway in dea_getFuelGateway is always returning the .1 of
+the Fuel IP - should be possible to use another scheme?
+
+Verify that nodes in DEA corresponds with nodes in DHA.
+
+Verify that API versions in DEA, DHA and API are aligned.
+
+Fix dhaParse and dheParse so they exit gracefully when parsing of YAML
+file fails.
+
diff --git a/fuel/prototypes/deploy/create_templates/create_templates.sh b/fuel/prototypes/deploy/create_templates/create_templates.sh
new file mode 100755
index 0000000..8f6101b
--- /dev/null
+++ b/fuel/prototypes/deploy/create_templates/create_templates.sh
@@ -0,0 +1,184 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+cleanup () {
+ if [ -n "$tmpDir" ]; then
+ rm -Rf $tmpDir
+ fi
+}
+
+trap cleanup exit
+
+error_exit () {
+ echo "Error: $@" >&2
+ exit 1
+}
+
+tmpDir=`mktemp -d /tmp/deaXXXX`
+
+export PATH=`dirname $0`:$PATH
+
+if [ $# -lt 2 ]; then
+ error_exit "`basename $0`: <deafile> <dhafile> <comment>"
+fi
+
+deafile=$1
+dhafile=$2
+shift 2
+
+if [ $# -ne 0 ]; then
+ comment="$@"
+else
+ comment=""
+fi
+
+if [ -f $deafile ]; then
+ error_exit "$deafile already exists"
+elif [ -f $dhafile ]; then
+ error_exit "$dhafile already exists"
+fi
+
+# Create headers
+
+cat >$deafile << EOF
+title: Deployment Environment Adapter (DEA)
+# DEA API version supported
+version: 1.1
+created: `date`
+comment: $comment
+EOF
+
+cat >$dhafile << EOF
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: `date`
+comment: $comment
+
+# Adapter to use for this definition
+adapter:
+
+# Node list.
+# Mandatory properties are id and role.
+# The MAC address of the PXE boot interface for Fuel is not
+# mandatory to be defined.
+# All other properties are adapter specific.
+
+EOF
+
+if [ `fuel env | tail -n +3 | grep -v '^$' | wc -l` -ne 1 ]; then
+ error_exit "Not exactly one environment"
+fi
+envId=`fuel env | tail -n +3 | grep -v '^$' | awk '{ print $1 }'`
+
+computeId=`fuel node | grep compute | grep True | head -1 | awk '{ print $1}'`
+controllerId=`fuel node | grep controller | grep True | head -1 | awk '{ print $1}'`
+
+if [ -z "$computeId" ]; then
+ error_exit "Could not find any compute node"
+elif [ -z "$controllerId" ]; then
+ error_exit "Could not find any controller node"
+fi
+
+fuel deployment --env $envId --download --dir $tmpDir > /dev/null || \
+ error_exit "Could not get deployment info"
+fuel settings --env $envId --download --dir $tmpDir > /dev/null || \
+ error_exit "Could not get settings"
+fuel network --env $envId --download --dir $tmpDir > /dev/null || \
+ error_exit "Could not get network settings"
+
+# Create node structure for DEA mapping to the DHA
+# Note! Nodes will be renumbered to always start with id 1
+echo "nodes:" >> $deafile
+echo "nodes:" >> $dhafile
+minNode=`fuel node | tail -n +3 | sed 's/ .*//' | sort -n | head -1`
+for realNodeId in `fuel node | tail -n +3 | sed 's/ .*//' | sort -n`
+do
+ nodeId=$[realNodeId - minNode + 1]
+ role=`fuel node --node-id $realNodeId | tail -n +3 | cut -d "|" -f 7 | sed 's/ //g'` || \
+ error_exit "Could not get role for node $realNodeId"
+
+ if [ -z "$role" ]; then
+ error_exit "Node $realNodeId has no role - is this environment really deployed?"
+ fi
+
+ fuel node --node-id $realNodeId --network --download --dir $tmpDir > /dev/null || \
+ error_exit "Could not get network info for node $controllerId"
+
+ generate_node_info.py $nodeId $role $tmpDir/node_${realNodeId}/interfaces.yaml $dhafile | \
+ grep -v "^nodes:" >> $deafile || \
+ error_exit "Could not extract info for node $realNodeId"
+done
+
+cat >>$dhafile <<EOF
+# Adding the Fuel node as node id $[nodeId + 1] which may not be correct - please
+# adjust as needed.
+EOF
+generate_fuel_node_info.py $[nodeId +1] $dhafile || \
+ error_exit "Could not extract info for the Fuel node"
+
+# Environment mode
+echo "environment_mode: `fuel env | tail -n +3 | cut -d "|" -f 4 | sed 's/ //g' | sed 's/ha_compact/ha/'`" \
+ >>$deafile || error_exit "Could not get environment mode"
+
+echo "environment_name: `fuel env | tail -n +3 | cut -d "|" -f 3 | sed 's/ //g'`" \
+ >>$deafile || error_exit "Could not get environment mode"
+
+reap_fuel_settings.py $deafile fuel || \
+ error_exit "Could not extract Fuel node settings"
+
+# TODO: Potentially move the network scheme into each node of the DEA nodes structure
+# TODO: instead (this may be too generic to support all node types)
+reap_network_scheme.py $tmpDir/deployment_${envId}/*controller_${controllerId}.yaml \
+ $deafile controller || error_exit "Could not extract network scheme for controller"
+
+# TODO: Potentially move the network scheme into each node of the DEA nodes structure
+# TODO: instead (this may be too generic to support all node types)
+reap_network_scheme.py $tmpDir/deployment_${envId}/compute_${computeId}.yaml $deafile \
+ compute || error_exit "Could not extract network scheme for compute"
+
+reap_opnfv_astute.py $tmpDir/deployment_${envId}/*controller_${controllerId}.yaml \
+ $tmpDir/deployment_${envId}/compute_${computeId}.yaml ${deafile} || \
+ error_exit "Could not extract opnfv info from astute"
+
+reap_network_settings.py $tmpDir/network_${envId}.yaml $deafile network || \
+ error_exit "Could not extract network settings"
+
+
+reap_settings.py $tmpDir/settings_${envId}.yaml $deafile settings || \
+ error_exit "Could not extract settings"
+
+# Last part of the DHA file
+cat >>$dhafile << EOF
+
+# Deployment power on strategy
+# all: Turn on all nodes at once. There will be no correlation
+# between the DHA and DEA node numbering. MAC addresses
+# will be used to select the node roles though.
+# sequence: Turn on the nodes in sequence starting with the lowest order
+# node and wait for the node to be detected by Fuel. Not until
+# the node has been detected and assigned a role will the next
+# node be turned on.
+powerOnStrategy: sequence
+
+# If fuelCustomInstall is set to true, Fuel is assumed to be installed by
+# calling the DHA adapter function "dha_fuelCustomInstall()" with two
+# arguments: node ID and the ISO file name to deploy. The custom install
+# function is then to handle all necessary logic to boot the Fuel master
+# from the ISO and then return.
+# Allowed values: true, false
+
+fuelCustomInstall: false
+EOF
+
+
+echo "DEA file is available at $deafile"
+echo "DHA file is available at $dhafile (this is just a template)"
diff --git a/fuel/prototypes/deploy/create_templates/generate_fuel_node_info.py b/fuel/prototypes/deploy/create_templates/generate_fuel_node_info.py
new file mode 100755
index 0000000..845d2ab
--- /dev/null
+++ b/fuel/prototypes/deploy/create_templates/generate_fuel_node_info.py
@@ -0,0 +1,36 @@
+#!/usr/bin/python
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import yaml
+import re
+import sys
+import os
+
+if len(sys.argv) != 3:
+ sys.stderr.write("Usage: "+sys.argv[0]+" <nodeid> <dhafile>\n")
+ sys.exit(1)
+
+nodeId=int(sys.argv[1])
+dhafile=sys.argv[2]
+
+f1 = open("/etc/fuel/astute.yaml", 'r')
+doc = yaml.load(f1)
+f1.close()
+
+dhaMac = doc["ADMIN_NETWORK"]["mac"]
+
+# Write contribution to DHA file
+f2 = open(dhafile, 'a')
+f2.write("- id: " + str(nodeId) + "\n")
+f2.write(" pxeMac: " + dhaMac + "\n")
+f2.write(" isFuel: yes\n")
+f2.close()
+
diff --git a/fuel/prototypes/deploy/create_templates/generate_node_info.py b/fuel/prototypes/deploy/create_templates/generate_node_info.py
new file mode 100755
index 0000000..e4a7dd3
--- /dev/null
+++ b/fuel/prototypes/deploy/create_templates/generate_node_info.py
@@ -0,0 +1,61 @@
+#!/usr/bin/python
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import yaml
+import re
+import sys
+import os
+
+if len(sys.argv) != 5:
+ sys.stderr.write("Usage: "+sys.argv[0]+" <nodeid> <role> <infile> <dhafile>\n")
+ sys.exit(1)
+
+infile = sys.argv[3]
+if not os.path.exists(infile):
+ sys.stderr.write("ERROR: The file "+infile+" could not be opened\n")
+ sys.exit(1)
+
+nodeId=int(sys.argv[1])
+nodeRole=sys.argv[2]
+dhafile=sys.argv[4]
+
+f1 = open(infile, 'r')
+doc = yaml.load(f1)
+f1.close()
+
+out = {}
+
+node = {}
+node["id"] = nodeId
+node["role"] = nodeRole
+node["interfaces"] = {}
+
+
+for interface in doc:
+ iface = {}
+ networks = []
+ for network in interface["assigned_networks"]:
+ networks.append(network["name"])
+ if network["name"] == "management":
+ dhaMac = interface["mac"]
+ if networks:
+ node["interfaces"][interface["name"]] = networks
+
+out = [node]
+
+sys.stdout.write(yaml.dump(out, default_flow_style=False))
+
+# Write contribution to DHA file
+f2 = open(dhafile, 'a')
+f2.write("- id: " + str(nodeId) + "\n")
+f2.write(" pxeMac: " + dhaMac + "\n")
+f2.close()
+
diff --git a/fuel/prototypes/libvirt/create_dea/reap_interfaces.py b/fuel/prototypes/deploy/create_templates/reap_fuel_settings.py
index 20a34f0..a8ddba9 100755
--- a/fuel/prototypes/libvirt/create_dea/reap_interfaces.py
+++ b/fuel/prototypes/deploy/create_templates/reap_fuel_settings.py
@@ -15,29 +15,31 @@ import sys
import os
if len(sys.argv) != 3:
- sys.stderr.write("Usage: "+sys.argv[0]+" <infile> <outfile>\n")
+ sys.stderr.write("Usage: "+sys.argv[0]+" <outfile> <outnamespace>\n")
sys.exit(1)
-infile = sys.argv[1]
+infile = "/etc/fuel/astute.yaml"
if not os.path.exists(infile):
sys.stderr.write("ERROR: The file "+infile+" could not be opened\n")
sys.exit(1)
-outfile = sys.argv[2]
+outfile = sys.argv[1]
+namespace = sys.argv[2]
f1 = open(infile, 'r')
doc = yaml.load(f1)
f1.close()
out = {}
-out["interfaces"] = {}
-
-for interface in doc:
- iface = {}
- networks = []
- for network in interface["assigned_networks"]:
- networks.append(network["name"])
- out["interfaces"][interface["name"]] = networks
+out[namespace] = {}
+
+# Delete unique data
+del(doc["ADMIN_NETWORK"]["mac"])
+del(doc["ADMIN_NETWORK"]["interface"])
+
+for copy in [ "ADMIN_NETWORK", "HOSTNAME", "DNS_DOMAIN", "DNS_SEARCH",
+ "DNS_UPSTREAM", "NTP1", "NTP2", "NTP3", "ADMIN_NETWORK", "FUEL_ACCESS" ]:
+ out[namespace][copy] = doc[copy]
f2 = open(outfile, 'a')
f2.write(yaml.dump(out, default_flow_style=False))
diff --git a/fuel/prototypes/libvirt/create_dea/reap_network_scheme.py b/fuel/prototypes/deploy/create_templates/reap_network_scheme.py
index 19c18bf..19c18bf 100755
--- a/fuel/prototypes/libvirt/create_dea/reap_network_scheme.py
+++ b/fuel/prototypes/deploy/create_templates/reap_network_scheme.py
diff --git a/fuel/prototypes/libvirt/create_dea/reap_network_settings.py b/fuel/prototypes/deploy/create_templates/reap_network_settings.py
index bbd1fd0..b03a063 100755
--- a/fuel/prototypes/libvirt/create_dea/reap_network_settings.py
+++ b/fuel/prototypes/deploy/create_templates/reap_network_settings.py
@@ -31,8 +31,15 @@ doc = yaml.load(f1)
f1.close()
for nw in doc["networks"]:
- del nw["id"]
- del nw["group_id"]
+ try:
+ del nw["id"]
+ except:
+ pass
+
+ try:
+ del nw["group_id"]
+ except:
+ pass
out = {}
out[namespace] = doc
diff --git a/fuel/prototypes/deploy/create_templates/reap_opnfv_astute.py b/fuel/prototypes/deploy/create_templates/reap_opnfv_astute.py
new file mode 100755
index 0000000..55fad05
--- /dev/null
+++ b/fuel/prototypes/deploy/create_templates/reap_opnfv_astute.py
@@ -0,0 +1,49 @@
+#!/usr/bin/python
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import yaml
+import re
+import sys
+import os
+
+if len(sys.argv) != 4:
+ sys.stderr.write("Usage: "+sys.argv[0]+" <controllerfile> <computefile> <outfile>\n")
+ sys.exit(1)
+
+controller = sys.argv[1]
+if not os.path.exists(controller):
+ sys.stderr.write("ERROR: The file "+controller+" could not be opened\n")
+ sys.exit(1)
+
+compute = sys.argv[2]
+if not os.path.exists(compute):
+ sys.stderr.write("ERROR: The file "+compute+" could not be opened\n")
+ sys.exit(1)
+
+outfile = sys.argv[3]
+
+f_controller = open(controller, 'r')
+doc_controller = yaml.load(f_controller)
+f_controller.close()
+
+f_compute = open(compute, 'r')
+doc_compute = yaml.load(f_compute)
+f_compute.close()
+
+out = {}
+out["opnfv"] = {}
+out["opnfv"]["controller"] = doc_controller["opnfv"]
+out["opnfv"]["compute"] = doc_compute["opnfv"]
+
+f2 = open(outfile, 'a')
+f2.write(yaml.dump(out, default_flow_style=False))
+f2.close()
+
diff --git a/fuel/prototypes/libvirt/create_dea/reap_settings.py b/fuel/prototypes/deploy/create_templates/reap_settings.py
index 22794d2..22794d2 100755
--- a/fuel/prototypes/libvirt/create_dea/reap_settings.py
+++ b/fuel/prototypes/deploy/create_templates/reap_settings.py
diff --git a/fuel/prototypes/deploy/deploy/deploy.sh b/fuel/prototypes/deploy/deploy/deploy.sh
new file mode 100755
index 0000000..50488a4
--- /dev/null
+++ b/fuel/prototypes/deploy/deploy/deploy.sh
@@ -0,0 +1,176 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Setup locations
+topdir=$(dirname $(readlink -f $BASH_SOURCE))
+exampledir=$(cd $topdir/../examples; pwd)
+functions=${topdir}/functions
+tmpdir=$HOME/fueltmp
+
+# Define common functions
+. ${functions}/common.sh
+
+exit_handler() {
+ # Remove safety catch
+ kill -9 `ps -p $killpid -o pid --no-headers` \
+ `ps --ppid $killpid -o pid --no-headers`\
+ > /dev/null 2>&1
+}
+
+usage()
+{
+ cat <<EOF
+Syntax: `basename $0` [-nf] <isofile> <deafile> <dhafile>
+Arguments
+ -nf Do not install Fuel master
+EOF
+}
+
+
+# maximum allowed deploy time (default three hours)
+MAXDEPLOYTIME=${MAXDEPLOYTIME-3h}
+
+####### MAIN ########
+
+time0=`date +%s`
+
+if [ "`whoami`" != "root" ]; then
+ error_exit "You need be root to run this script"
+fi
+
+# Set initial veriables
+nofuel=1
+
+
+# Check for arguments
+if [ "$1" == "-nf" ]; then
+ nofuel=0
+ shift
+fi
+
+if [ $# -ne 3 ]; then
+ usage
+ exit 1
+fi
+
+# Setup tmpdir
+if [ -d $tmpdir ]; then
+ rm -Rf $tmpdir || error_exit "Could not remove tmpdir $tmpdir"
+fi
+mkdir $tmpdir || error_exit "Could not create tmpdir $tmpdir"
+
+isofile=$(cd `dirname $1`; echo `pwd`/`basename $1`)
+deafile=$(cd `dirname $2`; echo `pwd`/`basename $2`)
+dhafile=$(cd `dirname $3`; echo `pwd`/`basename $3`)
+
+if [ ! -f $isofile ]; then
+ error_exit "Could not find ISO file $isofile"
+elif [ ! -f $deafile ]; then
+ error-exit "Could not find DEA file $deafile"
+elif [ ! -f $dhafile ]; then
+ error-exit "Could not find DHA file $dhafile"
+fi
+
+# Connect adapter
+adapter=`grep "^adapter: " $dhafile | sed 's/.*: //'`
+if [ -z "$adapter" ]; then
+ error_exit "No adapter in DHA file!"
+elif [ ! -f $topdir/dha-adapters/${adapter}.sh ]; then
+ error_exit "Could not find adapter for $adapter"
+else
+ . $topdir/dha-adapters/${adapter}.sh $dhafile
+fi
+
+# Connect DEA API
+. ${topdir}/functions/dea-api.sh $deafile
+
+# Enable safety catch
+echo "Enabling auto-kill if deployment exceeds $MAXDEPLOYTIME"
+(sleep $MAXDEPLOYTIME; echo "Auto-kill of deploy after a timeout of $MAXDEPLOYTIME"; kill $$) &
+killpid=$!
+
+# Enable exit handler
+trap exit_handler exit
+
+# Get Fuel node information
+fuelIp=`dea getFuelIp` || error_exit "Could not get Fuel IP"
+fuelNetmask=`dea getFuelNetmask` || error_exit "Could not get Fuel netmask"
+fuelGateway=`dea getFuelGateway` || error_exit "Could not get Fuel Gateway"
+fuelHostname=`dea getFuelHostname` || error_exit "Could not get Fuel hostname"
+fuelDns=`dea getFuelDns` || error_exit "Could not get Fuel DNS"
+fuelNodeId=`dha getFuelNodeId` || error_exit "Could not get fuel node id"
+
+# Stop all VMs
+for id in `dha getAllNodeIds`
+do
+ if [ $nofuel -eq 0 ]; then
+ if [ $fuelNodeId -ne $id ]; then
+ echo "Powering off id $id"
+ dha nodePowerOff $id
+ fi
+ else
+ echo "Powering off id $id"
+ dha nodePowerOff $id
+ fi
+done
+
+# Install the Fuel master
+if [ $nofuel -eq 1 ]; then
+ echo "Patching iso file"
+
+ deployiso="${tmpdir}/deploy-`basename $isofile`"
+ ${functions}/patch-iso.sh $isofile $deployiso $tmpdir \
+ $fuelIp $fuelNetmask $fuelGateway $fuelHostname $fuelDns \
+ || error_exit "Failed to patch ISO"
+
+ # Swap isofiles from now on
+ isofile=$deployiso
+ if dha useFuelCustomInstall; then
+ echo "Custom Fuel install"
+ dha fuelCustomInstall || error_exit "Failed to run Fuel custom install"
+ else
+ echo "Ordinary Fuel install"
+ . ${functions}/install_iso.sh || error_exit "Failed to install Fuel"
+ fi
+else
+ echo "Not installing Fuel master"
+fi
+
+. ${functions}/deploy_env.sh
+
+echo "Waiting for one minute for deploy to stabilize"
+sleep 1m
+
+echo "Verifying node status after deployment"
+# Any node with non-ready status?
+ssh root@${fuelIp} fuel node 2>/dev/null | tail -n +3 | cut -d "|" -f 2 | \
+ sed 's/ //g' | grep -v ready | wc -l | grep -q "^0$"
+if [ $? -ne 0 ]; then
+ echo -e "Deploy failed to verify\n"
+ ssh root@${fuelIp} fuel node 2>/dev/null
+ error_exit "Exiting with error status"
+else
+ echo -e "Deployment verified\n"
+ ssh root@${fuelIp} fuel node 2>/dev/null
+ echo -e "\nNow running sanity and smoke health checks"
+ echo -e "\n\n"
+ ssh root@${fuelIp} fuel health --env ${envId} --check sanity,smoke \
+ --force
+ if [ $? -eq 0 ]; then
+ echo "Health checks passed!"
+ else
+ error_exit "One or several health checks failed!"
+ fi
+
+ time1=`date +%s`
+ echo "Total deployment time: $[(time1-time0)/60] minutes"
+ exit 0
+fi
diff --git a/fuel/prototypes/deploy/deploy/dha-adapters/dhaParse.py b/fuel/prototypes/deploy/deploy/dha-adapters/dhaParse.py
new file mode 100755
index 0000000..d2712c6
--- /dev/null
+++ b/fuel/prototypes/deploy/deploy/dha-adapters/dhaParse.py
@@ -0,0 +1,87 @@
+#!/usr/bin/python
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import yaml
+import re
+import sys
+import os
+
+def test(arr):
+ print "Nodes"
+ nodes = doc["nodes"]
+ for node in nodes:
+ print "Node %d " % node["id"]
+ print " Mac: %s" % node["adminMac"]
+ print " Role: %s" % node["role"]
+
+def get(arg):
+ try:
+ if doc[arg[0]]:
+ print doc[arg[0]]
+ else:
+ print ""
+ except KeyError:
+ print ""
+
+def getNodes(arg):
+ for node in doc["nodes"]:
+ print node["id"]
+
+# Get property arg2 from arg1
+def getNodeProperty(arg):
+ id=arg[0]
+ key=arg[1]
+
+ for node in doc["nodes"]:
+ if node["id"] == int(id):
+ try:
+ if node[key]:
+ print node[key]
+ exit(0)
+ except:
+ print ""
+ exit(0)
+ exit(1)
+
+
+
+infile = sys.argv[1]
+
+if not os.path.exists(infile):
+ sys.stderr.write("ERROR: The file "+infile+" could not be opened\n")
+ sys.exit(1)
+
+
+f1 = open(infile, 'r')
+doc = yaml.load(f1)
+f1.close()
+
+cmd = sys.argv[2]
+args = sys.argv[3:]
+
+if cmd == "test":
+ test(args)
+elif cmd == "getNodes":
+ getNodes(args)
+elif cmd == "getNodeProperty":
+ getNodeProperty(args)
+elif cmd == "get":
+ get(args)
+else:
+ print "No such command: %s" % cmd
+ exit(1)
+
+#print "Dumping"
+#print yaml.dump(doc, default_flow_style=False)
+
+#Functions:
+
+#getIdRole
diff --git a/fuel/prototypes/deploy/deploy/dha-adapters/libvirt.sh b/fuel/prototypes/deploy/deploy/dha-adapters/libvirt.sh
new file mode 100755
index 0000000..0e91f49
--- /dev/null
+++ b/fuel/prototypes/deploy/deploy/dha-adapters/libvirt.sh
@@ -0,0 +1,334 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+
+########################################################################
+# Internal functions BEGIN
+
+
+dha_f_err()
+{
+ local rc
+ local cmd
+
+ rc=$1
+ shift
+
+ echo "$@" >&2
+ echo "Exit with code $rc" >&2
+
+ exit $rc
+}
+
+dha_f_run()
+{
+ $@
+ rc=$?
+ if [ $rc -ne 0 ]; then
+ dha_f_err $rc "running $@" >&2
+ exit $rc
+ fi
+}
+
+# Internal functions END
+########################################################################
+
+
+true=0
+false=1
+
+# API: Get the DHA API version supported by this adapter
+dha_getApiVersion ()
+{
+ echo "1.0"
+}
+
+# API: Get the name of this adapter
+dha_getAdapterName ()
+{
+ echo "libvirt"
+}
+
+# API: ### Node identity functions ###
+# API: Node numbering is sequential.
+
+# API: Get a list of all defined node ids, sorted in ascending order
+dha_getAllNodeIds()
+{
+ dha_f_run $DHAPARSE $DHAFILE getNodes | sort -n
+}
+
+
+# API: Get ID for Fuel node ID
+dha_getFuelNodeId()
+{
+ for node in `dha_getAllNodeIds`
+ do
+ if [ -n "`dha_f_run $DHAPARSE $DHAFILE getNodeProperty $node isFuel`" ]
+ then
+ echo $node
+ fi
+ done
+}
+
+# API: Get node property
+# API: Argument 1: node id
+# API: Argument 2: Property
+dha_getNodeProperty()
+{
+ dha_f_run $DHAPARSE $DHAFILE getNodeProperty $1 $2
+}
+
+
+# API: Get MAC address for the PXE interface of this node. If not
+# API: defined, an empty string will be returned.
+# API: Argument 1: Node id
+dha_getNodePxeMac()
+{
+ dha_getNodeProperty $1 pxeMac
+}
+
+
+### Node operation functions ###
+
+# API: Use custom installation method for Fuel master?
+# API: Returns 0 if true, 1 if false
+dha_useFuelCustomInstall()
+{
+ $DHAPARSE $DHAFILE get fuelCustomInstall | grep -qi true
+ rc=$?
+ return $rc
+}
+
+# API: Fuel custom installation method
+# API: Leaving the Fuel master powered on and booting from ISO at exit
+# API: Argument 1: Full path to ISO file to install
+dha_fuelCustomInstall()
+{
+ dha_useFuelCustomInstall || dha_f_err 1 "dha_fuelCustomInstall not supported"
+ date
+}
+
+# API: Get power on strategy from DHA
+# API: Returns one of two values:
+# API: all: Power on all nodes simultaneously
+# API: sequence: Power on node by node, wait for Fuel detection
+dha_getPowerOnStrategy()
+{
+ local strategy
+
+ strategy=`$DHAPARSE $DHAFILE get powerOnStrategy`
+
+ if [ "$strategy" == "all" ]; then
+ echo $strategy
+ elif
+ [ "$strategy" == "sequence" ]; then
+ echo $strategy
+ else
+ dha_f_err 1 "Could not parse strategy from DHA, got $strategy"
+ fi
+}
+
+
+# API: Power on node
+# API: Argument 1: node id
+dha_nodePowerOn()
+{
+ local state
+ local virtName
+
+ virtName=`$DHAPARSE $DHAFILE getNodeProperty $1 libvirtName`
+ state=`virsh domstate $virtName`
+ if [ "$state" == "shut off" ]; then
+ dha_f_run virsh start $virtName
+ fi
+}
+
+# API: Power off node
+# API: Argument 1: node id
+dha_nodePowerOff()
+{
+ local state
+ local virtName
+
+ virtName=`$DHAPARSE $DHAFILE getNodeProperty $1 libvirtName`
+ state=`virsh domstate $virtName`
+ if [ "$state" != "shut off" ]; then
+ dha_f_run virsh destroy $virtName
+ fi
+}
+
+# API: Reset node
+# API: Argument 1: node id
+dha_nodeReset()
+{
+ local virtName
+
+ virtName=`$DHAPARSE $DHAFILE getNodeProperty $1 libvirtName`
+ dha_f_run virsh reset $virtName
+}
+
+# Boot order and ISO boot file
+
+# API: Is the node able to commit boot order without power toggle?
+# API: Argument 1: node id
+# API: Returns 0 if true, 1 if false
+dha_nodeCanSetBootOrderLive()
+{
+ return $false
+}
+
+# API: Set node boot order
+# API: Argument 1: node id
+# API: Argument 2: Space separated line of boot order - boot ids are "pxe", "disk" and "iso"
+dha_nodeSetBootOrder()
+{
+ local id
+ local bootline
+ local virtName
+ local order
+
+ id=$1
+ virtName=`$DHAPARSE $DHAFILE getNodeProperty $1 libvirtName`
+ shift
+
+ for order in $@
+ do
+ if [ "$order" == "pxe" ]; then
+ bootline+="<boot dev='network'\/>\n"
+ elif [ "$order" == "disk" ]; then
+ bootline+="<boot dev='hd'/\>\n"
+ elif [ "$order" == "iso" ]; then
+ bootline+="<boot dev='cdrom'/\>\n"
+ else
+ error_exit "Unknown boot type: $order"
+ fi
+ done
+ echo $bootline
+
+ virsh dumpxml $virtName | grep -v "<boot dev.*>" | \
+ sed "/<\/os>/i\
+ ${bootline}" > $tmpdir/vm.xml || error_exit "Could not set bootorder"
+ virsh define $tmpdir/vm.xml || error_exit "Could not set bootorder"
+
+}
+
+# API: Is the node able to operate on ISO media?
+# API: Argument 1: node id
+# API: Returns 0 if true, 1 if false
+dha_nodeCanSetIso()
+{
+ return $true
+}
+
+# API: Is the node able to insert add eject ISO files without power toggle?
+# API: Argument 1: node id
+# API: Returns 0 if true, 1 if false
+dha_nodeCanHandeIsoLive()
+{
+ return $true
+}
+
+# API: Insert ISO into virtualDVD
+# API: Argument 1: node id
+# API: Argument 2: iso file
+dha_nodeInsertIso()
+{
+ local virtName
+ local isoFile
+
+ virtName=`$DHAPARSE $DHAFILE getNodeProperty $1 libvirtName`
+ isoFile=$2
+ virsh change-media fuel-master --insert hdc $isoFile
+}
+
+# API: Eject ISO from virtual DVD
+# API: Argument 1: node id
+dha_nodeEjectIso()
+{
+ local virtName
+ local isoFile
+
+ virtName=`$DHAPARSE $DHAFILE getNodeProperty $1 libvirtName`
+ isoFile=$2
+ virsh change-media $virtName --eject hdc
+}
+
+# API: Wait until a suitable time to change the boot order to
+# API: "disk iso" when ISO has been booted. Can't be too long, nor
+# API: too short...
+# API: We should make a smart trigger for this somehow...
+dha_waitForIsoBoot()
+{
+ echo "waitForIsoBoot: No delay necessary for libvirt"
+}
+
+# API: Is the node able to reset its MBR?
+# API: Returns 0 if true, 1 if false
+dha_nodeCanZeroMBR()
+{
+ return $true
+}
+
+# API: Reset the node's MBR
+dha_nodeZeroMBR()
+{
+ local fueldisk
+ local disksize
+
+ fueldisk=`virsh dumpxml $(dha_getNodeProperty $1 libvirtName) | \
+ grep "<source file" | grep raw | sed "s/.*'\(.*\)'.*/\1/"`
+ disksize=`ls -l $fueldisk | awk '{ print $5 }'`
+ rm -f $fueldisk
+ fallocate -l $disksize $fueldisk
+}
+
+
+# API: Entry point for dha functions
+# API: Typically do not call "dha_node_zeroMBR" but "dha node_ZeroMBR"
+# API:
+# API: Before calling dha, the adapter file must gave been sourced with
+# API: the DHA file name as argument
+dha()
+{
+ if [ -z "$DHAFILE" ]; then
+ error_exit "dha_setup has not been run"
+ fi
+
+
+ if type dha_$1 &>/dev/null; then
+ cmd=$1
+ shift
+ dha_$cmd $@
+ return $?
+ else
+ error_exit "No such function dha_$1 defined"
+ fi
+}
+
+if [ "$1" == "api" ]; then
+ egrep "^# API: |dha.*\(\)" $0 | sed 's/^# API: /# /' | grep -v dha_f_ | sed 's/)$/)\n/'
+else
+ dhatopdir=$(dirname $(readlink -f $BASH_SOURCE))
+ DHAPARSE="$dhatopdir/dhaParse.py"
+ DHAFILE=$1
+
+ if [ ! -f $DHAFILE ]; then
+ error_exit "No such DHA file: $DHAFILE"
+ else
+ echo "Adapter init"
+ echo "$@"
+ echo "DHAPARSE: $DHAPARSE"
+ echo "DHAFILE: $DHAFILE"
+ fi
+
+fi
diff --git a/fuel/prototypes/deploy/deploy/functions/common.sh b/fuel/prototypes/deploy/deploy/functions/common.sh
new file mode 100755
index 0000000..6947d79
--- /dev/null
+++ b/fuel/prototypes/deploy/deploy/functions/common.sh
@@ -0,0 +1,67 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Common functions
+
+error_exit () {
+ echo "Error: $@" >&2
+ exit 1
+}
+
+ssh() {
+ SSHPASS="r00tme" sshpass -e ssh -o UserKnownHostsFile=${tmpdir}/known_hosts \
+ -o StrictHostKeyChecking=no -o ConnectTimeout=15 "$@"
+}
+
+scp() {
+ SSHPASS="r00tme" sshpass -e scp -o UserKnownHostsFile=${tmpdir}/known_hosts \
+ -o StrictHostKeyChecking=no -o ConnectTimeout=15 "$@"
+}
+
+
+fuel () {
+ ssh root@`dea getFuelIp` "fuel $@"
+}
+
+
+# TODO: Move numberOfNodes into the DEA API
+numberOfNodes() {
+ fuel node | tail -n +3 | grep -v "^$" | wc -l
+}
+
+# TODO: Move numberOfNodesUp into the DEA API
+numberOfNodesUp() {
+ fuel node | tail -n +3 | grep -v "^$" | grep True | wc -l
+}
+
+# Currently not used!
+# Wait for node count to increase
+waitForNode() {
+ local cnt
+ local initCnt
+ local expectCnt
+
+ initCnt=`numberOfNodesUp`
+ expectCnt=$[initCnt+1]
+ while true
+ do
+ cnt=`numberOfNodesUp`
+ if [ $cnt -eq $expectCnt ]; then
+ break
+ elif [ $cnt -lt $initCnt ]; then
+ error_exit "Node count decreased while waiting, $initCnt -> $cnt"
+ elif [ $cnt -gt $expectCnt ]; then
+ error_exit "Node count exceeded expect count, $cnt > $expectCnt"
+ fi
+ sleep 10
+ echo -n "[${cnt}]"
+ done
+ echo "[${cnt}]"
+}
diff --git a/fuel/prototypes/deploy/deploy/functions/dea-api.sh b/fuel/prototypes/deploy/deploy/functions/dea-api.sh
new file mode 100755
index 0000000..9401192
--- /dev/null
+++ b/fuel/prototypes/deploy/deploy/functions/dea-api.sh
@@ -0,0 +1,171 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+
+########################################################################
+# Internal functions BEGIN
+
+
+
+dea_f_err()
+{
+ local rc
+ local cmd
+
+ rc=$1
+ shift
+
+ if [ -n "$rc" ]; then
+ echo "Error ($rc): $@" >&2
+ else
+ echo "Error: $@" >&2
+ fi
+}
+
+dea_f_run()
+{
+ $@
+ rc=$?
+ if [ $rc -ne 0 ]; then
+ dea_f_err $rc "Error running $@"
+ return $rc
+ fi
+}
+
+# Internal functions END
+########################################################################
+
+true=0
+false=1
+
+# API: Get the DEA API version supported by this adapter
+dea_getApiVersion ()
+{
+ echo "1.0"
+}
+
+
+# API: Node numbering is sequential.
+
+
+# API: Get the role for this node
+# API: Argument 1: node id
+dea_getNodeRole()
+{
+ $DEAPARSE $DEAFILE getNodeRole $@
+
+}
+
+# API: Get IP address of Fuel master
+dea_getFuelIp()
+{
+ $DEAPARSE $DEAFILE getProperty fuel ADMIN_NETWORK ipaddress
+}
+
+# API: Get netmask Fuel master
+dea_getFuelNetmask()
+{
+ $DEAPARSE $DEAFILE getProperty fuel ADMIN_NETWORK netmask
+}
+
+# API: Get gateway address of Fuel master
+# FIXME: This is currently not in the DEA, so make the gatway the ..1
+# FiXME: of the IP
+dea_getFuelGateway()
+{
+ $DEAPARSE $DEAFILE getProperty fuel ADMIN_NETWORK ipaddress | \
+ sed 's/.[0-9]*$/.1/'
+}
+
+# API: Get gateway address of Fuel master
+dea_getFuelHostname()
+{
+ $DEAPARSE $DEAFILE getProperty fuel HOSTNAME
+}
+
+# API: Get DNS address of Fuel master
+dea_getFuelDns()
+{
+ $DEAPARSE $DEAFILE getProperty fuel DNS_UPSTREAM
+}
+
+# API: Convert a normal MAC to a Fuel short mac for --node-id
+dea_convertMacToShortMac()
+{
+ echo $1 | sed 's/.*..:..:..:..:\(..:..\).*/\1/'
+}
+
+
+# API: Get property from DEA file
+# API: Argument 1: search path, as e.g. "fuel ADMIN_NETWORK ipaddress"
+dea_getProperty()
+{
+ $DEAPARSE $DEAFILE getProperty $@
+}
+
+# API: Convert DHA node id to Fuel cluster node id
+# API: Look for lowest Fuel node number, this will be DHA node 1
+# API: Argument: node id
+dea_getClusterNodeId()
+{
+ local baseId
+ local inId
+ local fuelIp
+
+ inId=$1
+ fuelIp=`dea_getFuelIp`
+
+ baseId=`ssh root@${fuelIp} fuel node | tail -n +3 | awk '{ print $1 }'| sed 's/ //g' | sort -n | head -1`
+ echo "$[inId + baseId - 1]"
+}
+
+# API: Entry point for dea functions
+# API: Typically do not call "dea_node_zeroMBR" but "dea node_ZeroMBR"
+# API:
+# API: Before calling dea, the adapter file must gave been sourced with
+# API: the DEA file name as argument
+dea()
+{
+ if [ -z "$DEAFILE" ]; then
+ error_exit "dea_setup has not been run"
+ fi
+
+
+ if type dea_$1 &>/dev/null; then
+ cmd=$1
+ shift
+ dea_$cmd $@
+ return $?
+ else
+ error_exit "No such function dea_$1 defined"
+ fi
+}
+
+if [ "$1" == "api" ]; then
+ egrep "^# API: |dea.*\(\)" $0 | sed 's/^# API: /# /' | grep -v dea_f_ | sed 's/)$/)\n/'
+else
+ deatopdir=$(dirname $(readlink -f $BASH_SOURCE))
+ DEAPARSE="$deatopdir/deaParse.py"
+ DEAFILE=$1
+
+ if [ ! -f $DEAFILE ]; then
+ error_exit "No such DEA file: $DEAFILE"
+ else
+ echo "Adapter init"
+ echo "$@"
+ echo "DEAPARSE: $DEAPARSE"
+ echo "DEAFILE: $DEAFILE"
+ fi
+fi
+
+
+
diff --git a/fuel/prototypes/deploy/deploy/functions/deaParse.py b/fuel/prototypes/deploy/deploy/functions/deaParse.py
new file mode 100755
index 0000000..7ca6501
--- /dev/null
+++ b/fuel/prototypes/deploy/deploy/functions/deaParse.py
@@ -0,0 +1,85 @@
+#!/usr/bin/python
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import yaml
+import re
+import sys
+import os
+
+def test(arr):
+ print "Nodes"
+ nodes = doc["nodes"]
+ for node in nodes:
+ print "Node %d " % node["id"]
+ print " Mac: %s" % node["adminMac"]
+ print " Role: %s" % node["role"]
+
+def getNodeRole(arg):
+ for node in doc["nodes"]:
+ print node
+ try:
+ if node["role"] == arg[0]:
+ print doc["id"]
+ except KeyError:
+ exit(1)
+
+def getNodes(arg):
+ for node in doc["nodes"]:
+ print node["id"]
+
+
+def getProperty(arg):
+ result = doc
+ for level in arg:
+ result = result[level]
+ print result
+
+def getNodeRole(arg):
+ for node in doc["nodes"]:
+ if int(arg[0]) == node["id"]:
+ print node["role"]
+
+def getNode(arg):
+ id=arg[0]
+ key=arg[1]
+ for node in doc["nodes"]:
+ if int(node["id"]) == int(id):
+ print node[key]
+
+ # for node in doc["nodes"]:
+ # if int(node["id"]) == int(arg[0]):
+ # print node
+
+infile = sys.argv[1]
+
+if not os.path.exists(infile):
+ sys.stderr.write("ERROR: The file "+infile+" could not be opened\n")
+ sys.exit(1)
+
+
+f1 = open(infile, 'r')
+doc = yaml.load(f1)
+f1.close()
+
+cmd = sys.argv[2]
+args = sys.argv[3:]
+
+if cmd == "getProperty":
+ getProperty(args)
+elif cmd == "getNodeRole":
+ getNodeRole(args)
+elif cmd == "getNode":
+ getNode(args)
+elif cmd == "get":
+ get(args)
+else:
+ print "No such command: %s" % cmd
+ exit(1)
diff --git a/fuel/prototypes/deploy/deploy/functions/deploy_env.sh b/fuel/prototypes/deploy/deploy/functions/deploy_env.sh
new file mode 100755
index 0000000..139fcc5
--- /dev/null
+++ b/fuel/prototypes/deploy/deploy/functions/deploy_env.sh
@@ -0,0 +1,136 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Deploy!
+scp -q $deafile root@${fuelIp}:. || error_exit "Could not copy DEA file to Fuel"
+echo "Uploading build tools to Fuel server"
+ssh root@${fuelIp} rm -rf tools || error_exit "Error cleaning old tools structure"
+scp -qrp $topdir/tools root@${fuelIp}:. || error_exit "Error copying tools"
+
+# Refuse to run if environment already present
+envcnt=`fuel env | tail -n +3 | grep -v '^$' | wc -l`
+if [ $envcnt -ne 0 ]; then
+ error_exit "Environment count is $envcnt"
+fi
+
+# Refuse to run if any nodes are up
+nodeCnt=`numberOfNodesUp`
+if [ $nodeCnt -ne 0 ]; then
+ error_exit "Nodes are up (node count: $nodeCnt)"
+fi
+
+# FIXME: Add support for CentOS creation here
+# Extract release ID for Ubuntu environment
+ubuntucnt=`fuel release | grep Ubuntu | wc -l`
+if [ $ubuntucnt -ne 1 ]; then
+ error_exit "Not exacly one Ubuntu release found"
+fi
+
+# FIXME: Make release a property in the dea.yaml and use that instead!
+ubuntuid=`fuel release | grep Ubuntu | awk '{ print $1 }'`
+
+# Create environment
+envName=`dea getProperty environment_name` || error_exit "Could not get environment name"
+envMode=`dea getProperty environment_mode` || error_exit "Could not get environment mode"
+
+fuel env create --name $envName \
+ --rel $ubuntuid \
+ --mode $envMode \
+ --network-mode neutron \
+ --net-segment-type vlan \
+ || error_exit "Error creating environment"
+
+envId=`ssh root@${fuelIp} fuel env | tail -n +3 | awk '{ print $1 }'` \
+ || error_exit "Could not get environment id"
+
+echo "Running transplant #1"
+ssh root@${fuelIp} "cd tools; ./transplant1.sh ../`basename $deafile`" \
+ || error_exit "Error running transplant sequence #1"
+
+# Start VMs
+strategy=`dha getPowerOnStrategy` || error_exit "Could not get power on strategy"
+if [ $strategy == "all" ]; then
+ echo "Starting all nodes at once"
+ poweredOn=0
+ for id in `dha getAllNodeIds`
+ do
+ if [ $id -ne $fuelNodeId ]; then
+ echo "Setting boot order pxe disk for node $id"
+ dha nodeSetBootOrder $id "pxe disk" || "Could not set boot order for node"
+ echo "Powering on node $id"
+ dha nodePowerOn $id || error_exit "Could not power on node"
+ poweredOn=$[poweredOn + 1]
+ fi
+ done
+ # Wait for all nodes to be accounted for
+ echo "Waiting for $poweredOn nodes to come up"
+ while true
+ do
+ nodesUp=`numberOfNodesUp`
+ echo -n "[${nodesUp}]"
+ if [ $nodesUp -eq $poweredOn ]; then
+ break
+ fi
+ sleep 10
+ done
+ echo "[${nodesUp}]"
+else
+ # Refuse to run if any nodes are defined
+ totalNodeCnt=`numberOfNodes`
+ if [ $totalNodeCnt -ne 0 ]; then
+ error_exit "There are already ${totalNodeCnt} defined nodes, can not run power on in sequence!"
+ fi
+ echo "Starting nodes sequentially, waiting for Fuel detection until proceeding"
+ for id in `dha getAllNodeIds`
+ do
+ if [ $id -ne $fuelNodeId ]; then
+ echo "Setting boot order pxe disk for node $id"
+ dha nodeSetBootOrder $id "pxe disk" || "Could not set boot order for node"
+ echo "Powering on node $id"
+ dha nodePowerOn $id || error_exit "Could not power on node"
+ # Wait for node count to increase
+ waitForNode
+ fi
+ done
+fi
+
+# Set roles for detected hosts
+for id in `dha getAllNodeIds`
+do
+ # If not a Fuel node
+ if [ $fuelNodeId -ne $id ]; then
+ longMac=`dha getNodePxeMac $id` || \
+ error_exit "Could not get MAC address for node $id from DHA"
+ shortMac=`dea convertMacToShortMac $longMac`
+ role="`dea getNodeRole $id`"
+ echo "Setting role $role for Fuel node $shortMac (DEA node $id)"
+ fuel node set --node-id $shortMac --role $role --env $envId \
+ || error_exit "Could not set role for $node"
+ fi
+done
+
+# Run pre-deploy with default input
+# Need to set terminal as script does "clear" and needs curses support
+ssh root@${fuelIp} "TERM=vt100 /opt/opnfv/pre-deploy.sh < /dev/null" \
+ || error_exit "Pre-deploy failed"
+
+# Inject node network config (will override pre-deploy Astute settings but we
+# want to catch pre-deploy provisioning changes)
+# TODO: There needs to be a function to adjust the NTP settings for clients
+# TODO: to that of the actual set of controllers in this deployment.
+echo "Running transplant #2"
+ssh root@${fuelIp} "cd tools; ./transplant2.sh ../`basename $deafile`" \
+ || error_exit "Error running transplant sequence #2"
+
+
+# Deploy
+echo "Deploying!"
+ssh root@${fuelIp} "fuel deploy-changes --env $envId" >/dev/null 2>&1 || error_exit "Deploy failed"
+echo "Deployment completed"
diff --git a/fuel/prototypes/deploy/deploy/functions/install_iso.sh b/fuel/prototypes/deploy/deploy/functions/install_iso.sh
new file mode 100755
index 0000000..2ec510b
--- /dev/null
+++ b/fuel/prototypes/deploy/deploy/functions/install_iso.sh
@@ -0,0 +1,91 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+fuelIp=`dea getFuelIp` || error_exit "Could not get fuel IP"
+fuelNodeId=`dha getFuelNodeId` || error_exit "Could not get fuel node id"
+
+
+if dha nodeCanZeroMBR $fuelNodeId; then
+ echo "Node $fuelNodeId capable of zeroing MBR so doing that..."
+ dha nodeZeroMBR $fuelNodeId || error_exit "Failed to zero Fuel MBR"
+ dha nodeSetBootOrder $fuelNodeId "disk iso"
+elif dha nodeCanSetBootOrderLive; then
+ echo "Node can change ISO boot order live"
+ dha nodeSetBootOrder $fuelNodeId "iso disk"
+else
+ error_exit "No way to install Fuel node"
+fi
+
+sleep 3
+dha nodeEjectIso $fuelNodeId
+dha nodeInsertIso $fuelNodeId $isofile
+
+sleep 3
+dha nodePowerOn $fuelNodeId
+
+# Switch back boot order to disk, hoping that node is now up
+
+# FIXME: Can we do a smarter and more generic detection of when the
+# FIXME: kickstart procedure has started? Then th dha_waitForIsoBoot
+# FIXME: can be removed. Setting and IP already in the kickstart install
+# FIXME: and ping-wait for that?
+dha waitForIsoBoot
+
+dha nodeSetBootOrder $fuelNodeId "disk iso"
+
+# wait for node up
+echo "Waiting for Fuel master to accept SSH"
+while true
+do
+ ssh root@${fuelIp} date 2>/dev/null
+ if [ $? -eq 0 ]; then
+ break
+ fi
+ sleep 10
+done
+
+# Wait until fuelmenu is up
+echo "Waiting for fuelmenu to come up"
+menuPid=""
+while [ -z "$menuPid" ]
+do
+ menuPid=`ssh root@${fuelIp} "ps -ef" 2>&1 | grep fuelmenu | grep -v grep | awk '{ print $2 }'`
+ sleep 10
+done
+
+# This is where we inject our own astute.yaml settings
+scp -q $deafile root@${fuelIp}:. || error_exit "Could not copy DEA file to Fuel"
+echo "Uploading build tools to Fuel server"
+ssh root@${fuelIp} rm -rf tools || error_exit "Error cleaning old tools structure"
+scp -qrp $topdir/tools root@${fuelIp}:. || error_exit "Error copying tools"
+echo "Running transplant #0"
+ssh root@${fuelIp} "cd tools; ./transplant0.sh ../`basename $deafile`" \
+ || error_exit "Error running transplant sequence #0"
+
+
+
+# Let the Fuel deployment continue
+echo "Found menu as PID $menuPid, now killing it"
+ssh root@${fuelIp} "kill $menuPid" 2>/dev/null
+
+# Wait until installation complete
+echo "Waiting for bootstrap of Fuel node to complete"
+while true
+do
+ ssh root@${fuelIp} "ps -ef" 2>/dev/null \
+ | grep -q /usr/local/sbin/bootstrap_admin_node
+ if [ $? -ne 0 ]; then
+ break
+ fi
+ sleep 10
+done
+
+echo "Waiting for one minute for Fuel to stabilize"
+sleep 1m
diff --git a/fuel/prototypes/deploy/deploy/functions/isolinux.cfg.patch b/fuel/prototypes/deploy/deploy/functions/isolinux.cfg.patch
new file mode 100644
index 0000000..298a057
--- /dev/null
+++ b/fuel/prototypes/deploy/deploy/functions/isolinux.cfg.patch
@@ -0,0 +1,14 @@
+*** isolinux/isolinux.cfg.orig 2015-04-15 08:29:52.026868322 -0400
+--- isolinux/isolinux.cfg 2015-04-15 08:30:34.350868343 -0400
+***************
+*** 19,22 ****
+ menu label Fuel Install (^Static IP)
+ menu default
+ kernel vmlinuz
+! append initrd=initrd.img biosdevname=0 ks=cdrom:/ks.cfg ip=10.20.0.2 gw=10.20.0.1 dns1=10.20.0.1 netmask=255.255.255.0 hostname=fuel.domain.tld showmenu=no
+--- 19,22 ----
+ menu label Fuel Install (^Static IP)
+ menu default
+ kernel vmlinuz
+! append initrd=initrd.img biosdevname=0 ks=cdrom:/ks.cfg ip=10.20.0.2 gw=10.20.0.1 dns1=10.20.0.1 netmask=255.255.255.0 hostname=fuel.domain.tld showmenu=yes
+
diff --git a/fuel/prototypes/deploy/deploy/functions/ks.cfg.patch b/fuel/prototypes/deploy/deploy/functions/ks.cfg.patch
new file mode 100644
index 0000000..1896957
--- /dev/null
+++ b/fuel/prototypes/deploy/deploy/functions/ks.cfg.patch
@@ -0,0 +1,19 @@
+*** ks.cfg.orig Wed Apr 15 21:47:09 2015
+--- ks.cfg Wed Apr 15 21:47:24 2015
+***************
+*** 35,41 ****
+ default_drive=`echo ${drives} ${removable_drives} | awk '{print $1}'`
+
+ installdrive="undefined"
+! forceformat="no"
+ for I in `cat /proc/cmdline`; do case "$I" in *=*) eval $I;; esac ; done
+
+ set ${drives} ${removable_drives}
+--- 35,41 ----
+ default_drive=`echo ${drives} ${removable_drives} | awk '{print $1}'`
+
+ installdrive="undefined"
+! forceformat="yes"
+ for I in `cat /proc/cmdline`; do case "$I" in *=*) eval $I;; esac ; done
+
+ set ${drives} ${removable_drives}
diff --git a/fuel/prototypes/deploy/deploy/functions/patch-iso.sh b/fuel/prototypes/deploy/deploy/functions/patch-iso.sh
new file mode 100755
index 0000000..da1996b
--- /dev/null
+++ b/fuel/prototypes/deploy/deploy/functions/patch-iso.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# This is a temporary script - this should be rolled into a separate
+# build target "make ci-iso" instead!
+
+exit_handler() {
+ rm -Rf $tmpnewdir
+ fusermount -u $tmporigdir 2>/dev/null
+ test -d $tmporigdir && mdir $tmporigdir
+}
+
+trap exit_handler exit
+
+error_exit() {
+ echo "$@"
+ exit 1
+}
+
+if [ $# -ne 8 ]; then
+ error_exit "Input argument error"
+fi
+
+top=$(cd `dirname $0`; pwd)
+origiso=$(cd `dirname $1`; echo `pwd`/`basename $1`)
+newiso=$(cd `dirname $2`; echo `pwd`/`basename $2`)
+tmpdir=$3
+fuelIp=$4
+fuelNetmask=$5
+fuelGateway=$6
+fuelHostname=$7
+fuelDns=$8
+
+tmporigdir=/${tmpdir}/origiso
+tmpnewdir=/${tmpdir}/newiso
+
+test -f $origiso || error_exit "Could not find origiso $origiso"
+test -d $tmpdir || error_exit "Could not find tmpdir $tmpdir"
+
+
+if [ "`whoami`" != "root" ]; then
+ error_exit "You need be root to run this script"
+fi
+
+echo "Copying..."
+rm -Rf $tmporigdir $tmpnewdir
+mkdir -p $tmporigdir $tmpnewdir
+fuseiso $origiso $tmporigdir || error_exit "Failed fuseiso"
+cd $tmporigdir
+find . | cpio -pd $tmpnewdir
+cd $tmpnewdir
+fusermount -u $tmporigdir
+rmdir $tmporigdir
+chmod -R 755 $tmpnewdir
+
+echo "Patching..."
+cd $tmpnewdir
+# Patch ISO to make it suitable for automatic deployment
+cat $top/ks.cfg.patch | patch -p0 || error_exit "Failed patching ks.cfg"
+rm -rf .rr_moved
+
+# Add dynamic Fuel content
+echo "isolinux.cfg before: `grep netmask isolinux/isolinux.cfg`"
+sed -i "s/ ip=[^ ]*/ ip=$fuelIp/" isolinux/isolinux.cfg
+sed -i "s/ gw=[^ ]*/ gw=$fuelGateway/" isolinux/isolinux.cfg
+sed -i "s/ dns1=[^ ]*/ dns1=$fuelDns/" isolinux/isolinux.cfg
+sed -i "s/ netmask=[^ ]*/ netmask=$fuelNetmask/" isolinux/isolinux.cfg
+sed -i "s/ hostname=[^ ]*/ hostname=$fuelHostname/" isolinux/isolinux.cfg
+sed -i "s/ showmenu=[^ ]*/ showmenu=yes/" isolinux/isolinux.cfg
+echo "isolinux.cfg after: `grep netmask isolinux/isolinux.cfg`"
+
+echo "Creating iso $newiso"
+mkisofs -quiet -r \
+ -J -R -b isolinux/isolinux.bin \
+ -no-emul-boot \
+ -boot-load-size 4 -boot-info-table \
+ --hide-rr-moved \
+ -x "lost+found" -o $newiso . || error_exit "Failed making iso"
+
diff --git a/fuel/prototypes/deploy/deploy/tools/transplant0.sh b/fuel/prototypes/deploy/deploy/tools/transplant0.sh
new file mode 100755
index 0000000..7c5883b
--- /dev/null
+++ b/fuel/prototypes/deploy/deploy/tools/transplant0.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+cleanup () {
+ if [ -n "$tmpDir" ]; then
+ rm -Rf $tmpDir
+ fi
+}
+
+trap cleanup exit
+
+error_exit () {
+ echo "Error: $@" >&2
+ exit 1
+}
+
+tmpDir=`mktemp -d /tmp/deaXXXX`
+
+export PATH=`dirname $0`:$PATH
+
+if [ $# -lt 1 ]; then
+ error_exit "Argument error"
+fi
+deafile=$1
+shift
+
+if [ ! -f "$deafile" ]; then
+ error_exit "Can't find $deafile"
+fi
+
+transplant_fuel_settings.py /etc/fuel/astute.yaml $deafile || \
+ error_exit "Could not transplant astute settings"
diff --git a/fuel/prototypes/libvirt/deploy/tools/transplant1.sh b/fuel/prototypes/deploy/deploy/tools/transplant1.sh
index 9cead7a..2401c6f 100755
--- a/fuel/prototypes/libvirt/deploy/tools/transplant1.sh
+++ b/fuel/prototypes/deploy/deploy/tools/transplant1.sh
@@ -32,10 +32,6 @@ fi
deafile=$1
shift
-if [ $# -ne 0 ]; then
- comment="$@"
-fi
-
if [ ! -f "$deafile" ]; then
error_exit "Can't find $deafile"
fi
@@ -47,16 +43,20 @@ envId=`fuel env | tail -n +3 | grep -v '^$' | awk '{ print $1 }'`
fuel settings --env $envId --download --dir $tmpDir > /dev/null || \
error_exit "Could not get settings"
+
fuel network --env $envId --download --dir $tmpDir > /dev/null || \
error_exit "Could not get network settings"
cp $tmpDir/network_${envId}.yaml network_before.yaml
+
+# Transplant network settings
transplant_network_settings.py $tmpDir/network_${envId}.yaml $deafile || \
error_exit "Could not transplant network settings"
fuel network --env $envId --upload --dir $tmpDir || \
error_exit "Could not update network settings"
cp $tmpDir/network_${envId}.yaml network_after.yaml
+# Transplant settings
cp $tmpDir/settings_${envId}.yaml settings_before.yaml
transplant_settings.py $tmpDir/settings_${envId}.yaml $deafile || \
error_exit "Could not transplant settings"
diff --git a/fuel/prototypes/libvirt/deploy/tools/transplant2.sh b/fuel/prototypes/deploy/deploy/tools/transplant2.sh
index 5049f88..46c7a60 100755
--- a/fuel/prototypes/libvirt/deploy/tools/transplant2.sh
+++ b/fuel/prototypes/deploy/deploy/tools/transplant2.sh
@@ -22,6 +22,15 @@ error_exit () {
exit 1
}
+# Return offset between DEA node id and cluster node id
+getDeaNodeOffset()
+{
+ local baseId
+
+ baseId=`fuel node | tail -n +3 | awk '{ print $1 }' | sed 's/ //g' | sort -n | head -1`
+ echo "$[baseId - 1]"
+}
+
tmpDir=`mktemp -d /tmp/deaXXXX`
export PATH=`dirname $0`:$PATH
@@ -42,7 +51,6 @@ fi
envId=`fuel env | tail -n +3 | grep -v '^$' | awk '{ print $1 }'`
# Phase 1: Graft deployment information
-if [ "a" == "b" ]; then
fuel deployment --env $envId --default --dir $tmpDir || \
error_exit "Could not dump environment"
@@ -50,30 +58,40 @@ for controller in `find $tmpDir -type f | grep -v compute`
do
transplant_network_scheme.py $controller $deaFile controller || \
error_exit "Failed to graft `basename $controller`"
+
+ transplant_opnfv_settings.py $controller $deaFile controller || \
+ error_exit "Failed to graft `basename $controller`"
done
for compute in `find $tmpDir -type f | grep compute`
do
transplant_network_scheme.py $compute $deaFile compute || \
error_exit "Failed to graft `basename $compute`"
+
+ transplant_opnfv_settings.py $compute $deaFile compute || \
+ error_exit "Failed to graft `basename $controller`"
done
fuel deployment --env $envId --upload --dir $tmpDir || \
error_exit "Could not upload environment"
-fi
+
# Phase 2: Graft interface information
+deaOffset=`getDeaNodeOffset`
+echo "DEA offset: $deaOffset"
-for nodeId in `fuel node | grep True | awk '{ print $1}'`
+for clusterNodeId in `fuel node | grep True | awk '{ print $1}'`
do
- echo "Node $nodeId"
- fuel node --node-id $nodeId --network --download --dir $tmpDir || \
- error_exit "Could not download node $nodeId"
+ deaNodeId=$[clusterNodeId - deaOffset]
+ echo "Node $clusterNodeId is $deaNodeId"
+ fuel node --node-id $clusterNodeId --network --download --dir $tmpDir || \
+ error_exit "Could not download node $clusterNodeId"
- transplant_interfaces.py ${tmpDir}/node_${nodeId}/interfaces.yaml $deaFile || \
- error_exit "Failed to graft interfaces"
+ transplant_interfaces.py ${tmpDir}/node_${clusterNodeId}/interfaces.yaml \
+ $deaFile $deaNodeId || \
+ error_exit "Failed to graft interfaces"
- fuel node --node-id $nodeId --network --upload --dir $tmpDir || \
- error_exit "Could not upload node $nodeId"
+ fuel node --node-id $clusterNodeId --network --upload --dir $tmpDir || \
+ error_exit "Could not upload node $clusterNodeId"
done
diff --git a/fuel/prototypes/deploy/deploy/tools/transplant_fuel_settings.py b/fuel/prototypes/deploy/deploy/tools/transplant_fuel_settings.py
new file mode 100755
index 0000000..49ea5e4
--- /dev/null
+++ b/fuel/prototypes/deploy/deploy/tools/transplant_fuel_settings.py
@@ -0,0 +1,50 @@
+#!/usr/bin/python
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import yaml
+import re
+import sys
+import os
+
+if len(sys.argv) != 3:
+ sys.stderr.write("Usage: "+sys.argv[0]+" <astutefile> <deafile>\n")
+ sys.exit(1)
+
+fuelfile = sys.argv[1]
+if not os.path.exists(fuelfile):
+ sys.stderr.write("ERROR: The file "+fuelfile+" could not be opened\n")
+ sys.exit(1)
+
+deafile = sys.argv[2]
+if not os.path.exists(deafile):
+ sys.stderr.write("ERROR: The file "+deafile+" could not be opened\n")
+ sys.exit(1)
+
+f = open(deafile, 'r')
+dea = yaml.load(f)
+f.close()
+
+f = open(fuelfile, 'r')
+fuel = yaml.load(f)
+f.close()
+
+dea = dea["fuel"]
+for property in dea.keys():
+ if property == "ADMIN_NETWORK":
+ for adminproperty in dea[property].keys():
+ fuel[property][adminproperty] = dea[property][adminproperty]
+ else:
+ fuel[property] = dea[property]
+
+f = open(fuelfile, 'w')
+f.write(yaml.dump(fuel, default_flow_style=False))
+f.close()
+
diff --git a/fuel/prototypes/libvirt/deploy/tools/transplant_interfaces.py b/fuel/prototypes/deploy/deploy/tools/transplant_interfaces.py
index 8d076ff..758372a 100755
--- a/fuel/prototypes/libvirt/deploy/tools/transplant_interfaces.py
+++ b/fuel/prototypes/deploy/deploy/tools/transplant_interfaces.py
@@ -14,8 +14,8 @@ import re
import sys
import os
-if len(sys.argv) != 3:
- sys.stderr.write("Usage: "+sys.argv[0]+" <infile> <deafile>\n")
+if len(sys.argv) != 4:
+ sys.stderr.write("Usage: "+sys.argv[0]+" <infile> <deafile> <nodeid>\n")
sys.exit(1)
infile = sys.argv[1]
@@ -28,6 +28,7 @@ if not os.path.exists(deafile):
sys.stderr.write("ERROR: The file "+deafile+" could not be opened\n")
sys.exit(1)
deafile = sys.argv[2]
+nodeid = int(sys.argv[3])
namespace = "interfaces"
@@ -39,6 +40,7 @@ f2 = open(deafile, 'r')
doc2 = yaml.load(f2)
f2.close()
+
# Create lookup table network name -> id for current setup
nwlookup = {}
for interface in doc1:
@@ -46,7 +48,13 @@ for interface in doc1:
networks = []
for network in interface["assigned_networks"]:
nwlookup[network["name"]] = network["id"]
-
+
+# Find network information in DEA for this node
+nodeInfo = {}
+for node in doc2["nodes"]:
+ if node["id"] == nodeid:
+ nodeInfo=node
+ print "Found nodeinfo for node %d" % nodeid
out = {}
out["interfaces"] = {}
@@ -55,7 +63,7 @@ for interface in doc1:
assigned = []
nw = {}
interface["assigned_networks"] = []
- for nwname in doc2["interfaces"][interface["name"]]:
+ for nwname in nodeInfo["interfaces"][interface["name"]]:
iface = {}
iface["id"] = nwlookup[nwname]
iface["name"] = nwname
diff --git a/fuel/prototypes/libvirt/deploy/tools/transplant_network_scheme.py b/fuel/prototypes/deploy/deploy/tools/transplant_network_scheme.py
index 7d50cbe..7d50cbe 100755
--- a/fuel/prototypes/libvirt/deploy/tools/transplant_network_scheme.py
+++ b/fuel/prototypes/deploy/deploy/tools/transplant_network_scheme.py
diff --git a/fuel/prototypes/libvirt/deploy/tools/transplant_network_settings.py b/fuel/prototypes/deploy/deploy/tools/transplant_network_settings.py
index c0a46be..c0a46be 100755
--- a/fuel/prototypes/libvirt/deploy/tools/transplant_network_settings.py
+++ b/fuel/prototypes/deploy/deploy/tools/transplant_network_settings.py
diff --git a/fuel/prototypes/deploy/deploy/tools/transplant_opnfv_settings.py b/fuel/prototypes/deploy/deploy/tools/transplant_opnfv_settings.py
new file mode 100755
index 0000000..00d0950
--- /dev/null
+++ b/fuel/prototypes/deploy/deploy/tools/transplant_opnfv_settings.py
@@ -0,0 +1,42 @@
+#!/usr/bin/python
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import yaml
+import re
+import sys
+import os
+
+if len(sys.argv) != 4:
+ sys.stderr.write("Usage: "+sys.argv[0]+" <file> <deafile> [compute|controller]\n")
+ sys.exit(1)
+
+file = sys.argv[1]
+if not os.path.exists(file):
+ sys.stderr.write("ERROR: The file "+file+" could not be opened\n")
+ sys.exit(1)
+
+deafile = sys.argv[2]
+namespace = sys.argv[3]
+
+f1 = open(file, 'r')
+doc1 = yaml.load(f1)
+f1.close()
+
+f2 = open(deafile, 'r')
+doc2 = yaml.load(f2)
+f1.close()
+
+doc1["opnfv"] = doc2["opnfv"][namespace]
+
+f2 = open(file, 'w')
+f2.write(yaml.dump(doc1, default_flow_style=False))
+f2.close()
+
diff --git a/fuel/prototypes/libvirt/deploy/tools/transplant_settings.py b/fuel/prototypes/deploy/deploy/tools/transplant_settings.py
index 7f5c0d8..7f5c0d8 100755
--- a/fuel/prototypes/libvirt/deploy/tools/transplant_settings.py
+++ b/fuel/prototypes/deploy/deploy/tools/transplant_settings.py
diff --git a/fuel/prototypes/deploy/deploy/verify_dea.sh b/fuel/prototypes/deploy/deploy/verify_dea.sh
new file mode 100755
index 0000000..2054e9f
--- /dev/null
+++ b/fuel/prototypes/deploy/deploy/verify_dea.sh
@@ -0,0 +1,79 @@
+#!/bin/bash -e
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+error_exit()
+{
+ echo "Error: $@"
+ exit 1
+}
+
+if [ $# -ne 1 ]; then
+ echo "Syntax: `basename $0` deafile"
+ exit 1
+fi
+
+if [ ! -f $1 ]; then
+ echo "No such DEA file: $1"
+ exit 1
+fi
+
+tmpdir=$HOME/fueltmp2
+rm -Rf $tmpdir
+mkdir $tmpdir
+
+topdir=$(dirname $(readlink -f $BASH_SOURCE))
+. $topdir/functions/common.sh
+. $topdir/functions/dea-api.sh $1
+
+echo "API version: `dea getApiVersion`"
+
+#echo "Cluster node id for node 1 is: `dea getClusterNodeId 1`"
+
+err=1
+echo "Verifying that expected functions are present..."
+for function in \
+ dea_getApiVersion \
+ dea_getNodeRole \
+ dea_getFuelIp \
+ dea_getFuelNetmask \
+ dea_getFuelGateway \
+ dea_getFuelHostname \
+ dea_getFuelDns \
+ dea_convertMacToShortMac \
+ dea_getProperty \
+ dea_getClusterNodeId \
+ dea
+do
+ if type $function &>/dev/null; then
+ echo "$function: OK"
+ else
+ echo "$function: Missing!"
+ err=0
+ fi
+done
+
+if [ $err -eq 0 ]; then
+ echo "Error in API!"
+ exit 1
+else
+ echo "API functions OK."
+ echo ""
+fi
+
+echo "Fuel IP address: `dea getFuelIp`"
+echo "Fuel netmask: `dea getFuelNetmask`"
+echo "Fuel gateway: `dea getFuelGateway`"
+echo "Fuel hostname: `dea getFuelHostname`"
+echo "Fuel DNS: `dea getFuelDns`"
+echo "Short MAC of 11:22:33:44:55:66: `dea convertMacToShortMac 11:22:33:44:55:66`"
+
+echo "Done"
diff --git a/fuel/prototypes/deploy/deploy/verify_dha.sh b/fuel/prototypes/deploy/deploy/verify_dha.sh
new file mode 100755
index 0000000..5b09721
--- /dev/null
+++ b/fuel/prototypes/deploy/deploy/verify_dha.sh
@@ -0,0 +1,126 @@
+#!/bin/bash -e
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+error_exit()
+{
+ echo "Erroxxxr: $@"
+ exit 1
+}
+
+if [ $# -ne 2 ]; then
+ echo "Syntax: `basename $0` adaptername dhafile"
+ exit 1
+fi
+
+if [ ! -f dha-adapters/${1}.sh ]; then
+ echo "No such adapter file: $1"
+ exit 1
+elif [ ! -f $2 ]; then
+ echo "No such DHA file: $2"
+ exit 1
+fi
+
+. dha-adapters/${1}.sh $2
+
+
+err=1
+echo "Verifying that expected functions are present..."
+for function in \
+ dha_getApiVersion \
+ dha_getAdapterName \
+ dha_getAllNodeIds \
+ dha_getFuelNodeId \
+ dha_getNodeProperty \
+ dha_getNodePxeMac \
+ dha_useFuelCustomInstall \
+ dha_fuelCustomInstall \
+ dha_getPowerOnStrategy \
+ dha_nodePowerOn \
+ dha_nodePowerOff \
+ dha_nodeReset \
+ dha_nodeCanSetBootOrderLive \
+ dha_nodeSetBootOrder \
+ dha_nodeCanSetIso \
+ dha_nodeCanHandeIsoLive \
+ dha_nodeInsertIso \
+ dha_nodeEjectIso \
+ dha_waitForIsoBoot \
+ dha_nodeCanZeroMBR \
+ dha_nodeZeroMBR \
+ dha
+do
+ if type $function &>/dev/null; then
+ echo "$function: OK"
+ else
+ echo "$function: Missing!"
+ err=0
+ fi
+done
+
+
+echo "Adapter API version: `dha getApiVersion`"
+echo "Adapter name: `dha getAdapterName`"
+
+echo "All PXE MAC addresses:"
+for id in `(dha getAllNodeIds) | sort`
+do
+ if [ "`dha getAdapterName`" == "libvirt" ]; then
+ libvirtName=`dha getNodeProperty $id libvirtName`
+ else
+ libvirtName=""
+ fi
+
+ if [ $id == "`dha getFuelNodeId`" ]; then
+ echo "$id: `dha getNodeProperty $id pxeMac` $libvirtName <--- Fuel master"
+ else
+ echo "$id: `dha getNodeProperty $id pxeMac` $libvirtName"
+ fi
+done
+
+
+echo -n "Using Fuel custom install: "
+if dha useFuelCustomInstall; then
+ echo "yes"
+else
+ echo "no"
+fi
+
+
+echo -n "Can set boot order live: "
+if dha nodeCanSetBootOrderLive; then
+ echo "yes"
+else
+ echo "no"
+fi
+
+echo -n "Can operate on ISO media: "
+if dha nodeCanSetIso; then
+ echo "yes"
+else
+ echo "no"
+fi
+
+echo -n "Can insert/eject ISO without power toggle: "
+if dha nodeCanHandeIsoLive; then
+ echo "yes"
+else
+ echo "no"
+fi
+
+echo -n "Can erase the boot disk MBR: "
+if dha nodeCanZeroMBR; then
+ echo "yes"
+else
+ echo "no"
+fi
+
+
+echo "Done"
diff --git a/fuel/prototypes/deploy/documentation/1-introduction.txt b/fuel/prototypes/deploy/documentation/1-introduction.txt
new file mode 100644
index 0000000..c4efed5
--- /dev/null
+++ b/fuel/prototypes/deploy/documentation/1-introduction.txt
@@ -0,0 +1,36 @@
+The structure is being reworked. This page is an introduction to DEA
+and DHA.
+
+Introduction
+
+The aim of the deployment prototype is to try out a (hopefully)
+logical setup to support Fuel deployment on a variety of different
+hardware platforms using a common data format to describe the
+deployment itself and another data format to describe the hardware in
+question.
+
+DEA.yaml The DEA.yaml file describes a Fuel deployment, complete with
+ all settings. The easiest way to create this file is to use
+ the "create_templates.sh" script in an existing deployment to
+ copy its configuration to the DEA.yaml file.
+
+DHA.yaml The DHA.yaml file describes to hardware setup for an
+ installation. This file denotes among other things which DHA
+ adapter to use when deploying Fuel on this hardware setup.
+
+DHA adapter interface: The DHA adapter interface contains a number of
+ functions calls available to the automatic Fuel deployer script
+ (deploy.sh). Each adapter creates an implementation of this
+ interface in order for the deployer to orchestrate the
+ installation. There's currently an example DHA adapter "libvirt"
+ that is able to deploy Fuel in a nested KVM environment. Future
+ adapters could support HP C7000, Dell R620 or other types of
+ hardware.
+
+ It is important to note that a certain DHA adapter could implement
+ the dha_fuelCustomInstall() function, which for instance could
+ install the Fuel master as a VM or using PXE.
+
+A typical installation would be kicked off by the following command:
+
+./deploy.sh <isofile to deploy> <dea.yaml> <dha.yaml>
diff --git a/fuel/prototypes/deploy/documentation/2-dea.txt b/fuel/prototypes/deploy/documentation/2-dea.txt
new file mode 100644
index 0000000..36f805c
--- /dev/null
+++ b/fuel/prototypes/deploy/documentation/2-dea.txt
@@ -0,0 +1,1082 @@
+The structure is being reworked. This page describes the DEA.yaml
+file.
+
+The DEA.yaml file describes an actual Fuel deployment. This YAML file
+can either be edited from an existing template or created from an
+existing deployment by running the "create_templates.sh" script.
+
+The top level fields and their origin
+
+compute: Network translations for the compute nodes (from astute.yaml)
+Hoping that this is sufficient and we don't need to be more granular!
+
+controller: Network translations for the compute nodes (from
+astute.yaml) Hoping that this is sufficient and we don't need to be
+more granular!
+
+created: Creation time for this DEA file.
+
+environment_mode: Environment mode from "fuel env" (ha_compact,
+multinode, ...)
+
+fuel: The networking, DNS and NTP information from the Fuel node
+astute.yaml.
+
+network: The "fuel network" part.
+
+nodes: A data structure describing the role and network configuration
+for all nodes.
+
+opnfv: This structure contains two sub structures "controller" and
+"compute" containing the "opnfv" namespace from their respective
+astute.yaml.
+
+settings: The "fuel settings" part. This is the complete settings,
+thinking it can come in handy for future modifications. I think that
+the "pre_deploy.sh" should be replaced by us customising these
+settings instead (way into the future though).
+
+title: Deployment Environment Adapter (DEA)
+
+version: DEA API to be used for parsing this file. Currently 1.1.
+
+Live example (looooong!)
+
+# DEA API version supported
+version: 1.1
+created: Wed Apr 22 09:43:22 UTC 2015
+comment: Small libvirt deployment
+nodes:
+- id: 1
+ interfaces:
+ eth0:
+ - fuelweb_admin
+ - management
+ eth1:
+ - storage
+ eth2:
+ - private
+ eth3:
+ - public
+ role: compute
+- id: 2
+ interfaces:
+ eth0:
+ - fuelweb_admin
+ - management
+ eth1:
+ - storage
+ eth2:
+ - private
+ eth3:
+ - public
+ role: controller
+environment_mode: multinode
+fuel:
+ ADMIN_NETWORK:
+ dhcp_pool_end: 10.20.0.254
+ dhcp_pool_start: 10.20.0.3
+ ipaddress: 10.20.0.2
+ netmask: 255.255.255.0
+ DNS_DOMAIN: domain.tld
+ DNS_SEARCH: domain.tld
+ DNS_UPSTREAM: 8.8.8.8
+ FUEL_ACCESS:
+ password: admin
+ user: admin
+ HOSTNAME: fuel
+ NTP1: 0.pool.ntp.org
+ NTP2: 1.pool.ntp.org
+ NTP3: 2.pool.ntp.org
+controller:
+- action: add-br
+ name: br-eth0
+- action: add-port
+ bridge: br-eth0
+ name: eth0
+- action: add-br
+ name: br-eth1
+- action: add-port
+ bridge: br-eth1
+ name: eth1
+- action: add-br
+ name: br-eth2
+- action: add-port
+ bridge: br-eth2
+ name: eth2
+- action: add-br
+ name: br-eth3
+- action: add-port
+ bridge: br-eth3
+ name: eth3
+- action: add-br
+ name: br-ex
+- action: add-br
+ name: br-mgmt
+- action: add-br
+ name: br-storage
+- action: add-br
+ name: br-fw-admin
+- action: add-patch
+ bridges:
+ - br-eth1
+ - br-storage
+ tags:
+ - 102
+ - 0
+ vlan_ids:
+ - 102
+ - 0
+- action: add-patch
+ bridges:
+ - br-eth0
+ - br-mgmt
+ tags:
+ - 101
+ - 0
+ vlan_ids:
+ - 101
+ - 0
+- action: add-patch
+ bridges:
+ - br-eth0
+ - br-fw-admin
+ trunks:
+ - 0
+- action: add-patch
+ bridges:
+ - br-eth3
+ - br-ex
+ trunks:
+ - 0
+- action: add-br
+ name: br-prv
+- action: add-patch
+ bridges:
+ - br-eth2
+ - br-prv
+compute:
+- action: add-br
+ name: br-eth0
+- action: add-port
+ bridge: br-eth0
+ name: eth0
+- action: add-br
+ name: br-eth1
+- action: add-port
+ bridge: br-eth1
+ name: eth1
+- action: add-br
+ name: br-eth2
+- action: add-port
+ bridge: br-eth2
+ name: eth2
+- action: add-br
+ name: br-eth3
+- action: add-port
+ bridge: br-eth3
+ name: eth3
+- action: add-br
+ name: br-mgmt
+- action: add-br
+ name: br-storage
+- action: add-br
+ name: br-fw-admin
+- action: add-patch
+ bridges:
+ - br-eth1
+ - br-storage
+ tags:
+ - 102
+ - 0
+ vlan_ids:
+ - 102
+ - 0
+- action: add-patch
+ bridges:
+ - br-eth0
+ - br-mgmt
+ tags:
+ - 101
+ - 0
+ vlan_ids:
+ - 101
+ - 0
+- action: add-patch
+ bridges:
+ - br-eth0
+ - br-fw-admin
+ trunks:
+ - 0
+- action: add-br
+ name: br-prv
+- action: add-patch
+ bridges:
+ - br-eth2
+ - br-prv
+opnfv:
+ compute:
+ dns:
+ compute:
+ - 8.8.8.8
+ - 8.8.4.4
+ controller:
+ - 8.8.8.8
+ - 8.8.4.4
+ hosts:
+ - address: 46.253.206.181
+ fqdn: tor.e1.se
+ name: tor
+ ntp:
+ compute: 'server node-4.domain.tld
+
+ '
+ controller: 'server 0.ubuntu.pool.ntp.org
+
+ server 1.ubuntu.pool.ntp.org
+
+ server 2.ubuntu.pool.ntp.org
+
+ server 3.ubuntu.pool.ntp.org
+
+ '
+ controller:
+ dns:
+ compute:
+ - 8.8.8.8
+ - 8.8.4.4
+ controller:
+ - 8.8.8.8
+ - 8.8.4.4
+ hosts:
+ - address: 46.253.206.181
+ fqdn: tor.e1.se
+ name: tor
+ ntp:
+ compute: 'server node-4.domain.tld
+
+ '
+ controller: 'server 0.ubuntu.pool.ntp.org
+
+ server 1.ubuntu.pool.ntp.org
+
+ server 2.ubuntu.pool.ntp.org
+
+ server 3.ubuntu.pool.ntp.org
+
+ '
+network:
+ networking_parameters:
+ base_mac: fa:16:3e:00:00:00
+ dns_nameservers:
+ - 8.8.4.4
+ - 8.8.8.8
+ floating_ranges:
+ - - 172.16.0.130
+ - 172.16.0.254
+ gre_id_range:
+ - 2
+ - 65535
+ internal_cidr: 192.168.111.0/24
+ internal_gateway: 192.168.111.1
+ net_l23_provider: ovs
+ segmentation_type: vlan
+ vlan_range:
+ - 1000
+ - 1200
+ networks:
+ - cidr: 172.16.0.0/24
+ gateway: 172.16.0.1
+ ip_ranges:
+ - - 172.16.0.2
+ - 172.16.0.126
+ meta:
+ assign_vip: true
+ cidr: 172.16.0.0/24
+ configurable: true
+ floating_range_var: floating_ranges
+ ip_range:
+ - 172.16.0.2
+ - 172.16.0.126
+ map_priority: 1
+ name: public
+ notation: ip_ranges
+ render_addr_mask: public
+ render_type: null
+ use_gateway: true
+ vlan_start: null
+ name: public
+ vlan_start: null
+ - cidr: 192.168.0.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.0.2
+ - 192.168.0.254
+ meta:
+ assign_vip: true
+ cidr: 192.168.0.0/24
+ configurable: true
+ map_priority: 2
+ name: management
+ notation: cidr
+ render_addr_mask: internal
+ render_type: cidr
+ use_gateway: false
+ vlan_start: 101
+ name: management
+ vlan_start: 101
+ - cidr: 192.168.1.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.1.2
+ - 192.168.1.254
+ meta:
+ assign_vip: false
+ cidr: 192.168.1.0/24
+ configurable: true
+ map_priority: 2
+ name: storage
+ notation: cidr
+ render_addr_mask: storage
+ render_type: cidr
+ use_gateway: false
+ vlan_start: 102
+ name: storage
+ vlan_start: 102
+ - cidr: null
+ gateway: null
+ ip_ranges: []
+ meta:
+ assign_vip: false
+ configurable: false
+ map_priority: 2
+ name: private
+ neutron_vlan_range: true
+ notation: null
+ render_addr_mask: null
+ render_type: null
+ seg_type: vlan
+ use_gateway: false
+ vlan_start: null
+ name: private
+ vlan_start: null
+ - cidr: 10.20.0.0/24
+ gateway: null
+ ip_ranges:
+ - - 10.20.0.3
+ - 10.20.0.254
+ meta:
+ assign_vip: false
+ configurable: false
+ map_priority: 0
+ notation: ip_ranges
+ render_addr_mask: null
+ render_type: null
+ unmovable: true
+ use_gateway: true
+ name: fuelweb_admin
+ vlan_start: null
+interfaces:
+ eth0:
+ - fuelweb_admin
+ - management
+ eth1:
+ - storage
+ eth2:
+ - private
+ eth3:
+ - public
+settings:
+ editable:
+ access:
+ email:
+ description: Email address for Administrator
+ label: email
+ type: text
+ value: admin@localhost
+ weight: 40
+ metadata:
+ label: Access
+ weight: 10
+ password:
+ description: Password for Administrator
+ label: password
+ type: password
+ value: admin
+ weight: 20
+ tenant:
+ description: Tenant (project) name for Administrator
+ label: tenant
+ regex:
+ error: Invalid tenant name
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift
+$)(?!ceph$)(?![Gg]uest$).*
+ type: text
+ value: admin
+ weight: 30
+ user:
+ description: Username for Administrator
+ label: username
+ regex:
+ error: Invalid username
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift
+$)(?!ceph$)(?![Gg]uest$).*
+ type: text
+ value: admin
+ weight: 10
+ additional_components:
+ ceilometer:
+ description: If selected, Ceilometer component will be installed
+ label: Install Ceilometer
+ type: checkbox
+ value: false
+ weight: 40
+ heat:
+ description: ''
+ label: ''
+ type: hidden
+ value: true
+ weight: 30
+ metadata:
+ label: Additional Components
+ weight: 20
+ murano:
+ description: If selected, Murano component will be installed
+ label: Install Murano
+ restrictions:
+ - cluster:net_provider != 'neutron'
+ type: checkbox
+ value: false
+ weight: 20
+ sahara:
+ description: If selected, Sahara component will be installed
+ label: Install Sahara
+ type: checkbox
+ value: false
+ weight: 10
+ common:
+ auth_key:
+ description: Public key(s) to include in authorized_keys on deployed nodes
+ label: Public Key
+ type: text
+ value: ''
+ weight: 70
+ auto_assign_floating_ip:
+ description: If selected, OpenStack will automatically assign a floating IP
+ to a new instance
+ label: Auto assign floating IP
+ restrictions:
+ - cluster:net_provider == 'neutron'
+ type: checkbox
+ value: false
+ weight: 40
+ compute_scheduler_driver:
+ label: Scheduler driver
+ type: radio
+ value: nova.scheduler.filter_scheduler.FilterScheduler
+ values:
+ - data: nova.scheduler.filter_scheduler.FilterScheduler
+ description: Currently the most advanced OpenStack scheduler. See the OpenStack
+ documentation for details.
+ label: Filter scheduler
+ - data: nova.scheduler.simple.SimpleScheduler
+ description: This is 'naive' scheduler which tries to find the least loaded
+ host
+ label: Simple scheduler
+ weight: 40
+ debug:
+ description: Debug logging mode provides more information, but requires more
+ disk space.
+ label: OpenStack debug logging
+ type: checkbox
+ value: false
+ weight: 20
+ disable_offload:
+ description: If set, generic segmentation offload (gso) and generic receive
+ offload (gro) on physical nics will be disabled. See ethtool man.
+ label: Disable generic offload on physical nics
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_ty
+pe
+ == 'gre'
+ type: checkbox
+ value: true
+ weight: 80
+ libvirt_type:
+ label: Hypervisor type
+ type: radio
+ value: kvm
+ values:
+ - data: kvm
+ description: Choose this type of hypervisor if you run OpenStack on hardware
+ label: KVM
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ - data: qemu
+ description: Choose this type of hypervisor if you run OpenStack on virtual
+ hosts.
+ label: QEMU
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter
+ - data: vcenter
+ description: Choose this type of hypervisor if you run OpenStack in a vCenter
+ environment.
+ label: vCenter
+ restrictions:
+ - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider
+ == 'neutron'
+ weight: 30
+ metadata:
+ label: Common
+ weight: 30
+ nova_quota:
+ description: Quotas are used to limit CPU and memory usage for tenants. Enabling
+ quotas will increase load on the Nova database.
+ label: Nova quotas
+ type: checkbox
+ value: false
+ weight: 25
+ resume_guests_state_on_host_boot:
+ description: Whether to resume previous guests state when the host reboots.
+ If enabled, this option causes guests assigned to the host to resume their
+ previous state. If the guest was running a restart will be attempted when
+ nova-compute starts. If the guest was not running previously, a restart
+ will not be attempted.
+ label: Resume guests state on host boot
+ type: checkbox
+ value: false
+ weight: 60
+ use_cow_images:
+ description: For most cases you will want qcow format. If it's disabled, raw
+ image format will be used to run VMs. OpenStack with raw format currently
+ does not support snapshotting.
+ label: Use qcow format for images
+ type: checkbox
+ value: true
+ weight: 50
+ corosync:
+ group:
+ description: ''
+ label: Group
+ type: text
+ value: 226.94.1.1
+ weight: 10
+ metadata:
+ label: Corosync
+ restrictions:
+ - action: hide
+ condition: 'true'
+ weight: 50
+ port:
+ description: ''
+ label: Port
+ type: text
+ value: '12000'
+ weight: 20
+ verified:
+ description: Set True only if multicast is configured correctly on router.
+ label: Need to pass network verification.
+ type: checkbox
+ value: false
+ weight: 10
+ external_dns:
+ dns_list:
+ description: List of upstream DNS servers, separated by comma
+ label: DNS list
+ type: text
+ value: 8.8.8.8, 8.8.4.4
+ weight: 10
+ metadata:
+ label: Upstream DNS
+ weight: 90
+ external_ntp:
+ metadata:
+ label: Upstream NTP
+ weight: 100
+ ntp_list:
+ description: List of upstream NTP servers, separated by comma
+ label: NTP servers list
+ type: text
+ value: 0.pool.ntp.org, 1.pool.ntp.org
+ weight: 10
+ kernel_params:
+ kernel:
+ description: Default kernel parameters
+ label: Initial parameters
+ type: text
+ value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset
+ weight: 45
+ metadata:
+ label: Kernel parameters
+ weight: 40
+ neutron_mellanox:
+ metadata:
+ enabled: true
+ label: Mellanox Neutron components
+ toggleable: false
+ weight: 50
+ plugin:
+ label: Mellanox drivers and SR-IOV plugin
+ type: radio
+ value: disabled
+ values:
+ - data: disabled
+ description: If selected, Mellanox drivers, Neutron and Cinder plugin will
+ not be installed.
+ label: Mellanox drivers and plugins disabled
+ restrictions:
+ - settings:storage.iser.value == true
+ - data: drivers_only
+ description: If selected, Mellanox Ethernet drivers will be installed to
+ support networking over Mellanox NIC. Mellanox Neutron plugin will not
+ be installed.
+ label: Install only Mellanox drivers
+ restrictions:
+ - settings:common.libvirt_type.value != 'kvm'
+ - data: ethernet
+ description: If selected, both Mellanox Ethernet drivers and Mellanox network
+ acceleration (Neutron) plugin will be installed.
+ label: Install Mellanox drivers and SR-IOV plugin
+ restrictions:
+ - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
+ == 'neutron' and networking_parameters:segmentation_type == 'vlan')
+ weight: 60
+ vf_num:
+ description: Note that one virtual function will be reserved to the storage
+ network, in case of choosing iSER.
+ label: Number of virtual NICs
+ restrictions:
+ - settings:neutron_mellanox.plugin.value != 'ethernet'
+ type: text
+ value: '16'
+ weight: 70
+ nsx_plugin:
+ connector_type:
+ description: Default network transport type to use
+ label: NSX connector type
+ type: select
+ value: stt
+ values:
+ - data: gre
+ label: GRE
+ - data: ipsec_gre
+ label: GRE over IPSec
+ - data: stt
+ label: STT
+ - data: ipsec_stt
+ label: STT over IPSec
+ - data: bridge
+ label: Bridge
+ weight: 80
+ l3_gw_service_uuid:
+ description: UUID for the default L3 gateway service to use with this cluster
+ label: L3 service UUID
+ regex:
+ error: Invalid L3 gateway service UUID
+ source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+ type: text
+ value: ''
+ weight: 50
+ metadata:
+ enabled: false
+ label: VMware NSX
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider
+ != 'nsx'
+ weight: 20
+ nsx_controllers:
+ description: One or more IPv4[:port] addresses of NSX controller node, separated
+ by comma (e.g. 10.30.30.2,192.168.110.254:443)
+ label: NSX controller endpoint
+ regex:
+ error: Invalid controller endpoints, specify valid IPv4[:port] pair
+ source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2
+[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3})
+)?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-
+5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$
+ type: text
+ value: ''
+ weight: 60
+ nsx_password:
+ description: Password for Administrator
+ label: NSX password
+ regex:
+ error: Empty password
+ source: \S
+ type: password
+ value: ''
+ weight: 30
+ nsx_username:
+ description: NSX administrator's username
+ label: NSX username
+ regex:
+ error: Empty username
+ source: \S
+ type: text
+ value: admin
+ weight: 20
+ packages_url:
+ description: URL to NSX specific packages
+ label: URL to NSX bits
+ regex:
+ error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g.
+ http://10.20.0.2/nsx)
+ source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1
+[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][
+\d]{0,3}))?(/.*)?$
+ type: text
+ value: ''
+ weight: 70
+ replication_mode:
+ description: ''
+ label: NSX cluster has Service nodes
+ type: checkbox
+ value: true
+ weight: 90
+ transport_zone_uuid:
+ description: UUID of the pre-existing default NSX Transport zone
+ label: Transport zone UUID
+ regex:
+ error: Invalid transport zone UUID
+ source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+ type: text
+ value: ''
+ weight: 40
+ provision:
+ metadata:
+ label: Provision
+ restrictions:
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
+ weight: 80
+ method:
+ description: Which provision method to use for this cluster.
+ label: Provision method
+ type: radio
+ value: cobbler
+ values:
+ - data: image
+ description: Copying pre-built images on a disk.
+ label: Image
+ - data: cobbler
+ description: Install from scratch using anaconda or debian-installer.
+ label: Classic (use anaconda or debian-installer)
+ public_network_assignment:
+ assign_to_all_nodes:
+ description: When disabled, public network will be assigned to controllers
+ and zabbix-server only
+ label: Assign public network to all nodes
+ type: checkbox
+ value: false
+ weight: 10
+ metadata:
+ label: Public network assignment
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'neutron'
+ weight: 50
+ storage:
+ ephemeral_ceph:
+ description: Configures Nova to store ephemeral volumes in RBD. This works
+ best if Ceph is enabled for volumes and images, too. Enables live migration
+ of all types of Ceph backed VMs (without this option, live migration will
+ only work with VMs launched from Cinder volumes).
+ label: Ceph RBD for ephemeral volumes (Nova)
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ type: checkbox
+ value: false
+ weight: 75
+ images_ceph:
+ description: Configures Glance to use the Ceph RBD backend to store images.
+ If enabled, this option will prevent Swift from installing.
+ label: Ceph RBD for images (Glance)
+ type: checkbox
+ value: false
+ weight: 30
+ images_vcenter:
+ description: Configures Glance to use the vCenter/ESXi backend to store images.
+ If enabled, this option will prevent Swift from installing.
+ label: VMWare vCenter/ESXi datastore for images (Glance)
+ restrictions:
+ - settings:common.libvirt_type.value != 'vcenter'
+ type: checkbox
+ value: false
+ weight: 35
+ iser:
+ description: 'High performance block storage: Cinder volumes over iSER protocol
+ (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC,
+ and will use a dedicated virtual function for the storage network.'
+ label: iSER protocol for volumes (Cinder)
+ restrictions:
+ - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
+ != 'kvm'
+ type: checkbox
+ value: false
+ weight: 11
+ metadata:
+ label: Storage
+ weight: 60
+ objects_ceph:
+ description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
+ Swift API Interfaces. If enabled, this option will prevent Swift from installing.
+ label: Ceph RadosGW for objects (Swift API)
+ restrictions:
+ - settings:storage.images_ceph.value == false
+ type: checkbox
+ value: false
+ weight: 80
+ osd_pool_size:
+ description: Configures the default number of object replicas in Ceph. This
+ number must be equal to or lower than the number of deployed 'Storage -
+ Ceph OSD' nodes.
+ label: Ceph object replication factor
+ regex:
+ error: Invalid number
+ source: ^[1-9]\d*$
+ restrictions:
+ - settings:common.libvirt_type.value == 'vcenter'
+ type: text
+ value: '2'
+ weight: 85
+ vc_datacenter:
+ description: Inventory path to a datacenter. If you want to use ESXi host
+ as datastore, it should be "ha-datacenter".
+ label: Datacenter name
+ regex:
+ error: Empty datacenter
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_t
+ype.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 65
+ vc_datastore:
+ description: Datastore associated with the datacenter.
+ label: Datastore name
+ regex:
+ error: Empty datastore
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_t
+ype.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 60
+ vc_host:
+ description: IP Address of vCenter/ESXi
+ label: vCenter/ESXi IP
+ regex:
+ error: Specify valid IPv4 address
+ source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2
+[0-4][\d]|25[0-5])$
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_t
+ype.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 45
+ vc_image_dir:
+ description: The name of the directory where the glance images will be stored
+ in the VMware datastore.
+ label: Datastore Images directory
+ regex:
+ error: Empty images directory
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_t
+ype.value
+ != 'vcenter'
+ type: text
+ value: /openstack_glance
+ weight: 70
+ vc_password:
+ description: vCenter/ESXi admin password
+ label: Password
+ regex:
+ error: Empty password
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_t
+ype.value
+ != 'vcenter'
+ type: password
+ value: ''
+ weight: 55
+ vc_user:
+ description: vCenter/ESXi admin username
+ label: Username
+ regex:
+ error: Empty username
+ source: \S
+ restrictions:
+ - action: hide
+ condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_t
+ype.value
+ != 'vcenter'
+ type: text
+ value: ''
+ weight: 50
+ volumes_ceph:
+ description: Configures Cinder to store volumes in Ceph RBD images.
+ label: Ceph RBD for volumes (Cinder)
+ restrictions:
+ - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value
+ == 'vcenter'
+ type: checkbox
+ value: false
+ weight: 20
+ volumes_lvm:
+ description: Requires at least one Storage - Cinder LVM node.
+ label: Cinder LVM over iSCSI for volumes
+ restrictions:
+ - settings:storage.volumes_ceph.value == true
+ type: checkbox
+ value: false
+ weight: 10
+ volumes_vmdk:
+ description: Configures Cinder to store volumes via VMware vCenter.
+ label: VMware vCenter for volumes (Cinder)
+ restrictions:
+ - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value
+ == true
+ type: checkbox
+ value: false
+ weight: 15
+ syslog:
+ metadata:
+ label: Syslog
+ weight: 50
+ syslog_port:
+ description: Remote syslog port
+ label: Port
+ regex:
+ error: Invalid Syslog port
+ source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6
+553[0-5])$
+ type: text
+ value: '514'
+ weight: 20
+ syslog_server:
+ description: Remote syslog hostname
+ label: Hostname
+ type: text
+ value: ''
+ weight: 10
+ syslog_transport:
+ label: Syslog transport protocol
+ type: radio
+ value: tcp
+ values:
+ - data: udp
+ description: ''
+ label: UDP
+ - data: tcp
+ description: ''
+ label: TCP
+ weight: 30
+ vcenter:
+ cluster:
+ description: vCenter cluster name. If you have multiple clusters, use comma
+ to separate names
+ label: Cluster
+ regex:
+ error: Invalid cluster list
+ source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$
+ type: text
+ value: ''
+ weight: 40
+ datastore_regex:
+ description: The Datastore regexp setting specifies the data stores to use
+ with Compute. For example, "nas.*". If you want to use all available datastores,
+ leave this field blank
+ label: Datastore regexp
+ regex:
+ error: Invalid datastore regexp
+ source: ^(\S.*\S|\S|)$
+ type: text
+ value: ''
+ weight: 50
+ host_ip:
+ description: IP Address of vCenter
+ label: vCenter IP
+ regex:
+ error: Specify valid IPv4 address
+ source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2
+[0-4][\d]|25[0-5])$
+ type: text
+ value: ''
+ weight: 10
+ metadata:
+ label: vCenter
+ restrictions:
+ - action: hide
+ condition: settings:common.libvirt_type.value != 'vcenter'
+ weight: 20
+ use_vcenter:
+ description: ''
+ label: ''
+ type: hidden
+ value: true
+ weight: 5
+ vc_password:
+ description: vCenter admin password
+ label: Password
+ regex:
+ error: Empty password
+ source: \S
+ type: password
+ value: admin
+ weight: 30
+ vc_user:
+ description: vCenter admin username
+ label: Username
+ regex:
+ error: Empty username
+ source: \S
+ type: text
+ value: admin
+ weight: 20
+ vlan_interface:
+ description: Physical ESXi host ethernet adapter for VLAN networking (e.g.
+ vmnic1). If empty "vmnic0" is used by default
+ label: ESXi VLAN interface
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager
+ != 'VlanManager'
+ type: text
+ value: ''
+ weight: 60
+ zabbix:
+ metadata:
+ label: Zabbix Access
+ restrictions:
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
+ weight: 70
+ password:
+ description: Password for Zabbix Administrator
+ label: password
+ type: password
+ value: zabbix
+ weight: 20
+ username:
+ description: Username for Zabbix Administrator
+ label: username
+ type: text
+ value: admin
+ weight: 10
+
+
diff --git a/fuel/prototypes/deploy/documentation/3-dha.txt b/fuel/prototypes/deploy/documentation/3-dha.txt
new file mode 100644
index 0000000..d38b6d0
--- /dev/null
+++ b/fuel/prototypes/deploy/documentation/3-dha.txt
@@ -0,0 +1,65 @@
+The structure is being reworked. This page describes the DHA.yaml file.
+
+Below is an example DHA for a libvirt deployment. An actual hardware deployment
+could for instance add additional data fields to the node list, such as:
+
+nodes:
+- id: 1
+ pxeMac: 52:54:00:9c:c2:c9
+ ipmiIp: 192.168.220.1
+ ipmiUser: admin
+ impiPassword: ericsson
+ isFuel: true
+
+The important thing is to keep the mandatory fields and add additional
+ones to map to the DHA adapter implementation for the hardware in
+question.
+
+The following example for libvirt is based on what's created by
+create_template.sh.
+
+Example DHA.yaml file for a libvirt adapter
+
+# DHA API version supported
+version: 1.1
+created: Wed Apr 22 11:34:14 UTC 2015
+comment: Small libvirt deployment
+
+# Adapter to use for this definition
+adapter: libvirt
+
+# Node list.
+# Mandatory fields are id, role and the "isFuel: true" property
+# for the Fuel node if not fuelCustomInstall is set, when it is
+# optional.
+# The MAC address of the PXE boot interface is not mandatory
+# to be set, but the field must be present.
+# All other fields are adapter specific.
+
+nodes:
+- id: 1
+ pxeMac: 52:54:00:38:c7:8e
+- id: 2
+ pxeMac: 52:54:00:9c:c2:c9
+- id: 3
+ pxeMac: 11:11:11:11:11:11
+ isFuel: true
+
+# Deployment power on strategy
+# all: Turn on all nodes at once. If MAC addresses are set these
+# will be used for connecting roles to physical nodes, if the
+# installation order will be arbitrary.
+# sequence: Turn on the nodes in sequence starting with the lowest order
+# node and wait for the node to be detected by Fuel. Not until
+# the node has been detected and assigned a role will the next
+# node be turned on.
+powerOnStrategy: all
+
+# If fuelCustomInstall is set to true, Fuel is assumed to be installed by
+# calling the DHA adapter function "dha_fuelCustomInstall()" with two
+# arguments: node ID and the ISO file name to deploy. The custom install
+# function is then to handle all necessary logic to boot the Fuel master
+# from the ISO and then return.
+# Allowed values: true, false
+
+fuelCustomInstall: false
diff --git a/fuel/prototypes/deploy/documentation/4-dha-adapter-api.txt b/fuel/prototypes/deploy/documentation/4-dha-adapter-api.txt
new file mode 100644
index 0000000..917d17c
--- /dev/null
+++ b/fuel/prototypes/deploy/documentation/4-dha-adapter-api.txt
@@ -0,0 +1,128 @@
+The structure is being reworked. This page describes the DHA adapter interface.
+
+
+This is a the beginning of a documentation of the DHA adapter
+interface, which is auto generated from the bash implementation of the
+libvirt DHA adapter. So, to some extent work in progress.
+
+An example run from the ./verify_adapter tool:
+
+sfb@blackbox:~/git/toolbox/opnfv/production/deploy$ ./verify_adapter.sh libvirt.sh dha.yaml
+Adapter init
+dha.yaml
+DHAPARSE: /home/sfb/git/toolbox/opnfv/production/deploy/dha-adapters/dhaParse.py
+DHAFILE: dha.yaml
+Adapter API version: 1.0
+Adapter name: libvirt
+All PXE MAC addresses:
+1: 52:54:00:38:c7:8e
+2: 52:54:00:9c:c2:c9
+Using Fuel custom install: no
+Can set boot order live: no
+Can operate on ISO media: yes
+Can insert/eject ISO without power toggle: yes
+Can erase the boot disk MBR: yes
+Done
+
+
+*** DHA API definition version 1.1 ***
+
+# Get the DHA API version supported by this adapter
+dha_getApiVersion ()
+
+# Get the name of this adapter
+dha_getAdapterName ()
+
+# ### Node identity functions ###
+# Node numbering is sequential.
+# Get a list of all defined node ids, sorted in ascending order
+dha_getAllNodeIds()
+
+# Get ID for Fuel node ID
+dha_getFuelNodeId()
+
+# Get node property
+# Argument 1: node id
+# Argument 2: Property
+dha_getNodeProperty()
+
+# Get MAC address for the PXE interface of this node. If not
+# defined, an empty string will be returned.
+# Argument 1: Node id
+dha_getNodePxeMac()
+
+# Use custom installation method for Fuel master?
+# Returns 0 if true, 1 if false
+dha_useFuelCustomInstall()
+
+# Fuel custom installation method
+# Leaving the Fuel master powered on and booting from ISO at exit
+# Argument 1: Full path to ISO file to install
+dha_fuelCustomInstall()
+
+# Get power on strategy from DHA
+# Returns one of two values:
+# all: Power on all nodes simultaneously
+# sequence: Power on node by node, wait for Fuel detection
+dha_getPowerOnStrategy()
+
+# Power on node
+# Argument 1: node id
+dha_nodePowerOn()
+
+# Power off node
+# Argument 1: node id
+dha_nodePowerOff()
+
+# Reset node
+# Argument 1: node id
+dha_nodeReset()
+
+# Is the node able to commit boot order without power toggle?
+# Argument 1: node id
+# Returns 0 if true, 1 if false
+dha_nodeCanSetBootOrderLive()
+
+# Set node boot order
+# Argument 1: node id
+# Argument 2: Space separated line of boot order - boot ids are "pxe", "disk" and "iso"
+dha_nodeSetBootOrder()
+
+# Is the node able to operate on ISO media?
+# Argument 1: node id
+# Returns 0 if true, 1 if false
+dha_nodeCanSetIso()
+
+# Is the node able to insert add eject ISO files without power toggle?
+# Argument 1: node id
+# Returns 0 if true, 1 if false
+dha_nodeCanHandeIsoLive()
+
+# Insert ISO into virtualDVD
+# Argument 1: node id
+# Argument 2: iso file
+dha_nodeInsertIso()
+
+# Eject ISO from virtual DVD
+# Argument 1: node id
+dha_nodeEjectIso()
+
+# Wait until a suitable time to change the boot order to
+# "disk iso" when ISO has been booted. Can't be too long, nor
+# too short...
+# We should make a smart trigger for this somehow...
+dha_waitForIsoBoot()
+
+# Is the node able to reset its MBR?
+# Returns 0 if true, 1 if false
+dha_nodeCanZeroMBR()
+
+# Reset the node's MBR
+dha_nodeZeroMBR()
+
+# Entry point for dha functions
+# Typically do not call "dha_node_zeroMBR" but "dha node_ZeroMBR"
+# Before calling dha, the adapter file must gave been sourced with
+# the DHA file name as argument
+dha()
+
diff --git a/fuel/prototypes/deploy/documentation/5-dea-api.txt b/fuel/prototypes/deploy/documentation/5-dea-api.txt
new file mode 100644
index 0000000..d5c6f5c
--- /dev/null
+++ b/fuel/prototypes/deploy/documentation/5-dea-api.txt
@@ -0,0 +1,47 @@
+The structure is being reworked. This page describes the DEA interface.
+
+The DEA API is internal to the deployer, but documented here for information.
+
+*** DEA API definition version 1.1 ***
+
+# Get the DEA API version supported by this adapter
+dea_getApiVersion ()
+
+# Node numbering is sequential.
+# Get the role for this node
+# Argument 1: node id
+dea_getNodeRole()
+
+# Get IP address of Fuel master
+dea_getFuelIp()
+
+# Get netmask Fuel master
+dea_getFuelNetmask()
+
+# Get gateway address of Fuel master
+dea_getFuelGateway()
+
+# Get gateway address of Fuel master
+dea_getFuelHostname()
+
+# Get DNS address of Fuel master
+dea_getFuelDns()
+
+# Convert a normal MAC to a Fuel short mac for --node-id
+dea_convertMacToShortMac()
+
+# Get property from DEA file
+# Argument 1: search path, as e.g. "fuel ADMIN_NETWORK ipaddress"
+dea_getProperty()
+
+# Convert DHA node id to Fuel cluster node id
+# Look for lowest Fuel node number, this will be DHA node 1
+# Argument: node id
+dea_getClusterNodeId()
+
+# Entry point for dea functions
+# Typically do not call "dea_node_zeroMBR" but "dea node_ZeroMBR"
+# Before calling dea, the adapter file must gave been sourced with
+# the DEA file name as argument
+dea()
+
diff --git a/fuel/prototypes/deploy/examples/libvirt/README.txt b/fuel/prototypes/deploy/examples/libvirt/README.txt
new file mode 100644
index 0000000..9cbfa54
--- /dev/null
+++ b/fuel/prototypes/deploy/examples/libvirt/README.txt
@@ -0,0 +1,25 @@
+This is an example setup for the libvirt DHA adapter which will setup
+four libvirt networks:
+
+fuel1: NATed network for management and admin
+fuel2: Isolated network for storage
+fuel3: Isolated network for private
+fuel4: NATed network for public
+
+Four VMs will be created:
+
+fuel-master
+controller1
+compute4
+compute5
+
+Prerequisite: A Ubuntu 14.x host or later with sudo access.
+
+Start by installing the necessary Ubuntu packages by running
+"sudo install_ubuntu_packages.sh".
+
+Then (re)generate the libvirt network and VM setup by running
+"setup_vms.sh".
+
+You can then run deploy.sh with the corresponding dea.yaml and
+dha.yaml which can be found in the conf subdirectory.
diff --git a/fuel/prototypes/libvirt/examples/libvirt_dea.yaml b/fuel/prototypes/deploy/examples/libvirt/conf/dea.yaml
index eafbacf..5d2f55f 100644
--- a/fuel/prototypes/libvirt/examples/libvirt_dea.yaml
+++ b/fuel/prototypes/deploy/examples/libvirt/conf/dea.yaml
@@ -1,6 +1,63 @@
-version: 1.0
-created: Tue Apr 14 12:04:01 UTC 2015
-comment: The first experimental libvirt DEA
+title: Deployment Environment Adapter (DEA)
+# DEA API version supported
+version: 1.1
+created: Sat Apr 25 16:26:22 UTC 2015
+comment: Small libvirt setup
+nodes:
+- id: 1
+ interfaces:
+ eth0:
+ - fuelweb_admin
+ - management
+ eth1:
+ - storage
+ eth2:
+ - private
+ eth3:
+ - public
+ role: controller
+- id: 2
+ interfaces:
+ eth0:
+ - fuelweb_admin
+ - management
+ eth1:
+ - storage
+ eth2:
+ - private
+ eth3:
+ - public
+ role: compute
+- id: 3
+ interfaces:
+ eth0:
+ - fuelweb_admin
+ - management
+ eth1:
+ - storage
+ eth2:
+ - private
+ eth3:
+ - public
+ role: compute
+environment_mode: multinode
+environment_name: opnfv59-b
+fuel:
+ ADMIN_NETWORK:
+ dhcp_pool_end: 10.20.0.254
+ dhcp_pool_start: 10.20.0.3
+ ipaddress: 10.20.0.2
+ netmask: 255.255.255.0
+ DNS_DOMAIN: domain.tld
+ DNS_SEARCH: domain.tld
+ DNS_UPSTREAM: 8.8.8.8
+ FUEL_ACCESS:
+ password: admin
+ user: admin
+ HOSTNAME: opnfv59
+ NTP1: 0.pool.ntp.org
+ NTP2: 1.pool.ntp.org
+ NTP3: 2.pool.ntp.org
controller:
- action: add-br
name: br-eth0
@@ -127,8 +184,10 @@ compute:
bridges:
- br-eth2
- br-prv
+opnfv:
+ compute: {}
+ controller: {}
network:
- management_vip: 192.168.0.2
networking_parameters:
base_mac: fa:16:3e:00:00:00
dns_nameservers:
@@ -146,7 +205,7 @@ network:
segmentation_type: vlan
vlan_range:
- 1000
- - 1200
+ - 1030
networks:
- cidr: 172.16.0.0/24
gateway: 172.16.0.1
@@ -173,7 +232,7 @@ network:
- cidr: 192.168.0.0/24
gateway: null
ip_ranges:
- - - 192.168.0.2
+ - - 192.168.0.1
- 192.168.0.254
meta:
assign_vip: true
@@ -191,7 +250,7 @@ network:
- cidr: 192.168.1.0/24
gateway: null
ip_ranges:
- - - 192.168.1.2
+ - - 192.168.1.1
- 192.168.1.254
meta:
assign_vip: false
@@ -239,7 +298,6 @@ network:
use_gateway: true
name: fuelweb_admin
vlan_start: null
- public_vip: 172.16.0.2
settings:
editable:
access:
@@ -281,7 +339,7 @@ settings:
description: If selected, Ceilometer component will be installed
label: Install Ceilometer
type: checkbox
- value: true
+ value: false
weight: 40
heat:
description: ''
@@ -396,7 +454,7 @@ settings:
will not be attempted.
label: Resume guests state on host boot
type: checkbox
- value: false
+ value: true
weight: 60
use_cow_images:
description: For most cases you will want qcow format. If it's disabled, raw
@@ -782,7 +840,7 @@ settings:
restrictions:
- settings:storage.volumes_ceph.value == true
type: checkbox
- value: false
+ value: true
weight: 10
volumes_vmdk:
description: Configures Cinder to store volumes via VMware vCenter.
@@ -874,7 +932,7 @@ settings:
error: Empty password
source: \S
type: password
- value: ''
+ value: admin
weight: 30
vc_user:
description: vCenter admin username
@@ -883,7 +941,7 @@ settings:
error: Empty username
source: \S
type: text
- value: ''
+ value: admin
weight: 20
vlan_interface:
description: Physical ESXi host ethernet adapter for VLAN networking (e.g.
@@ -915,13 +973,3 @@ settings:
type: text
value: admin
weight: 10
-interfaces:
- eth0:
- - fuelweb_admin
- - management
- eth1:
- - storage
- eth2:
- - private
- eth3:
- - public
diff --git a/fuel/prototypes/deploy/examples/libvirt/conf/dha.yaml b/fuel/prototypes/deploy/examples/libvirt/conf/dha.yaml
new file mode 100644
index 0000000..66395e8
--- /dev/null
+++ b/fuel/prototypes/deploy/examples/libvirt/conf/dha.yaml
@@ -0,0 +1,49 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: Sat Apr 25 16:26:22 UTC 2015
+comment: Small libvirt setup
+
+# Adapter to use for this definition
+adapter: libvirt
+
+# Node list.
+# Mandatory fields are id and role.
+# The MAC address of the PXE boot interface is not mandatory
+# to be set, but the field must be present.
+# All other fields are adapter specific.
+
+nodes:
+- id: 1
+ pxeMac: 52:54:00:aa:dd:84
+ libvirtName: controller1
+- id: 2
+ pxeMac: 52:54:00:41:64:f3
+ libvirtName: compute4
+- id: 3
+ pxeMac: 52:54:00:69:a0:79
+ libvirtName: compute5
+- id: 4
+ pxeMac: 52:54:00:f8:b0:75
+ libvirtName: fuel-master
+ isFuel: yes
+
+# Deployment power on strategy
+# all: Turn on all nodes at once. There will be no correlation
+# between the DHA and DEA node numbering. MAC addresses
+# will be used to select the node roles though.
+# sequence: Turn on the nodes in sequence starting with the lowest order
+# node and wait for the node to be detected by Fuel. Not until
+# the node has been detected and assigned a role will the next
+# node be turned on.
+powerOnStrategy: all
+
+# If fuelCustomInstall is set to true, Fuel is assumed to be installed by
+# calling the DHA adapter function "dha_fuelCustomInstall()" with two
+# arguments: node ID and the ISO file name to deploy. The custom install
+# function is then to handle all necessary logic to boot the Fuel master
+# from the ISO and then return.
+# Allowed values: true, false
+
+fuelCustomInstall: false
+
diff --git a/fuel/prototypes/deploy/examples/libvirt/conf/networks/fuel1 b/fuel/prototypes/deploy/examples/libvirt/conf/networks/fuel1
new file mode 100644
index 0000000..7b2b154
--- /dev/null
+++ b/fuel/prototypes/deploy/examples/libvirt/conf/networks/fuel1
@@ -0,0 +1,12 @@
+<network>
+ <name>fuel1</name>
+ <forward mode='nat'>
+ <nat>
+ <port start='1024' end='65535'/>
+ </nat>
+ </forward>
+ <bridge name='fuel1' stp='on' delay='0'/>
+ <ip address='10.20.0.1' netmask='255.255.255.0'>
+ </ip>
+</network>
+
diff --git a/fuel/prototypes/deploy/examples/libvirt/conf/networks/fuel2 b/fuel/prototypes/deploy/examples/libvirt/conf/networks/fuel2
new file mode 100644
index 0000000..615c920
--- /dev/null
+++ b/fuel/prototypes/deploy/examples/libvirt/conf/networks/fuel2
@@ -0,0 +1,5 @@
+<network>
+ <name>fuel2</name>
+ <bridge name='fuel2' stp='on' delay='0'/>
+</network>
+
diff --git a/fuel/prototypes/deploy/examples/libvirt/conf/networks/fuel3 b/fuel/prototypes/deploy/examples/libvirt/conf/networks/fuel3
new file mode 100644
index 0000000..2383e6c
--- /dev/null
+++ b/fuel/prototypes/deploy/examples/libvirt/conf/networks/fuel3
@@ -0,0 +1,5 @@
+<network>
+ <name>fuel3</name>
+ <bridge name='fuel3' stp='on' delay='0'/>
+</network>
+
diff --git a/fuel/prototypes/deploy/examples/libvirt/conf/networks/fuel4 b/fuel/prototypes/deploy/examples/libvirt/conf/networks/fuel4
new file mode 100644
index 0000000..5b69f91
--- /dev/null
+++ b/fuel/prototypes/deploy/examples/libvirt/conf/networks/fuel4
@@ -0,0 +1,12 @@
+<network>
+ <name>fuel4</name>
+ <forward mode='nat'>
+ <nat>
+ <port start='1024' end='65535'/>
+ </nat>
+ </forward>
+ <bridge name='fuel4' stp='on' delay='0'/>
+ <ip address='172.16.0.1' netmask='255.255.255.0'>
+ </ip>
+</network>
+
diff --git a/fuel/prototypes/libvirt/examples/vms/compute4 b/fuel/prototypes/deploy/examples/libvirt/conf/vms/compute4
index ec98eab..099c21e 100644
--- a/fuel/prototypes/libvirt/examples/vms/compute4
+++ b/fuel/prototypes/deploy/examples/libvirt/conf/vms/compute4
@@ -1,5 +1,6 @@
<domain type='kvm'>
<name>compute4</name>
+ <uuid>6206efbf-18e1-492e-951a-60eda6676ef5</uuid>
<memory unit='KiB'>8388608</memory>
<currentMemory unit='KiB'>8388608</currentMemory>
<vcpu placement='static'>2</vcpu>
@@ -58,21 +59,25 @@
</controller>
<controller type='pci' index='0' model='pci-root'/>
<interface type='network'>
+ <mac address='52:54:00:41:64:f3'/>
<source network='fuel1'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<interface type='network'>
+ <mac address='52:54:00:67:00:5e'/>
<source network='fuel2'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</interface>
<interface type='network'>
+ <mac address='52:54:00:3e:dd:0d'/>
<source network='fuel3'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</interface>
<interface type='network'>
+ <mac address='52:54:00:a6:3c:32'/>
<source network='fuel4'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>
diff --git a/fuel/prototypes/libvirt/examples/vms/compute5 b/fuel/prototypes/deploy/examples/libvirt/conf/vms/compute5
index 411be64..76569e0 100644
--- a/fuel/prototypes/libvirt/examples/vms/compute5
+++ b/fuel/prototypes/deploy/examples/libvirt/conf/vms/compute5
@@ -1,5 +1,6 @@
<domain type='kvm'>
<name>compute5</name>
+ <uuid>2c1e50fa-110f-4ab7-98ff-7c5e08c8f38f</uuid>
<memory unit='KiB'>8388608</memory>
<currentMemory unit='KiB'>8388608</currentMemory>
<vcpu placement='static'>2</vcpu>
@@ -57,21 +58,25 @@
</controller>
<controller type='pci' index='0' model='pci-root'/>
<interface type='network'>
+ <mac address='52:54:00:69:a0:79'/>
<source network='fuel1'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<interface type='network'>
+ <mac address='52:54:00:58:4c:df'/>
<source network='fuel2'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</interface>
<interface type='network'>
+ <mac address='52:54:00:27:54:1d'/>
<source network='fuel3'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</interface>
<interface type='network'>
+ <mac address='52:54:00:84:b8:c4'/>
<source network='fuel4'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>
diff --git a/fuel/prototypes/libvirt/examples/vms/controller1 b/fuel/prototypes/deploy/examples/libvirt/conf/vms/controller1
index f82ad28..715d4c4 100644
--- a/fuel/prototypes/libvirt/examples/vms/controller1
+++ b/fuel/prototypes/deploy/examples/libvirt/conf/vms/controller1
@@ -1,5 +1,6 @@
<domain type='kvm'>
<name>controller1</name>
+ <uuid>ce9897c1-d9e7-452c-b998-9fad0cf5b9db</uuid>
<memory unit='KiB'>2097152</memory>
<currentMemory unit='KiB'>2097152</currentMemory>
<vcpu placement='static'>2</vcpu>
@@ -57,21 +58,25 @@
</controller>
<controller type='pci' index='0' model='pci-root'/>
<interface type='network'>
+ <mac address='52:54:00:aa:dd:84'/>
<source network='fuel1'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<interface type='network'>
+ <mac address='52:54:00:e4:6e:d9'/>
<source network='fuel2'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</interface>
<interface type='network'>
+ <mac address='52:54:00:60:88:73'/>
<source network='fuel3'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</interface>
<interface type='network'>
+ <mac address='52:54:00:f0:b2:0f'/>
<source network='fuel4'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>
diff --git a/fuel/prototypes/deploy/examples/libvirt/conf/vms/fuel-master b/fuel/prototypes/deploy/examples/libvirt/conf/vms/fuel-master
new file mode 100644
index 0000000..9ff8017
--- /dev/null
+++ b/fuel/prototypes/deploy/examples/libvirt/conf/vms/fuel-master
@@ -0,0 +1,103 @@
+<domain type='kvm'>
+ <name>fuel-master</name>
+ <uuid>caabab80-b42f-4f9d-9c0d-18f274c4cb58</uuid>
+ <memory unit='KiB'>2097152</memory>
+ <currentMemory unit='KiB'>2097152</currentMemory>
+ <vcpu placement='static'>2</vcpu>
+ <resource>
+ <partition>/machine</partition>
+ </resource>
+ <os>
+ <type arch='x86_64' machine='pc-1.0'>hvm</type>
+ <boot dev='hd'/>
+ <boot dev='cdrom'/>
+ <bootmenu enable='no'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <cpu mode='custom' match='exact'>
+ <model fallback='allow'>SandyBridge</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='vme'/>
+ <feature policy='require' name='dtes64'/>
+ <feature policy='require' name='vmx'/>
+ <feature policy='require' name='erms'/>
+ <feature policy='require' name='xtpr'/>
+ <feature policy='require' name='smep'/>
+ <feature policy='require' name='pcid'/>
+ <feature policy='require' name='est'/>
+ <feature policy='require' name='monitor'/>
+ <feature policy='require' name='smx'/>
+ <feature policy='require' name='tm'/>
+ <feature policy='require' name='acpi'/>
+ <feature policy='require' name='osxsave'/>
+ <feature policy='require' name='ht'/>
+ <feature policy='require' name='pdcm'/>
+ <feature policy='require' name='fsgsbase'/>
+ <feature policy='require' name='f16c'/>
+ <feature policy='require' name='ds'/>
+ <feature policy='require' name='tm2'/>
+ <feature policy='require' name='ss'/>
+ <feature policy='require' name='pbe'/>
+ <feature policy='require' name='ds_cpl'/>
+ <feature policy='require' name='rdrand'/>
+ </cpu>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>/usr/bin/kvm</emulator>
+ <disk type='block' device='cdrom'>
+ <driver name='qemu' type='raw'/>
+ <target dev='hdc' bus='ide'/>
+ <readonly/>
+ <address type='drive' controller='0' bus='1' target='0' unit='0'/>
+ </disk>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='raw'/>
+ <source file='disk.raw'/>
+ <target dev='vda' bus='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+ </disk>
+ <controller type='ide' index='0'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
+ </controller>
+ <controller type='usb' index='0'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
+ </controller>
+ <controller type='pci' index='0' model='pci-root'/>
+ <interface type='network'>
+ <mac address='52:54:00:f8:b0:75'/>
+ <source network='fuel1'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
+ </interface>
+ <serial type='pty'>
+ <target port='0'/>
+ </serial>
+ <console type='pty'>
+ <target type='serial' port='0'/>
+ </console>
+ <input type='mouse' bus='ps2'/>
+ <input type='keyboard' bus='ps2'/>
+ <graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1' keymap='sv'>
+ <listen type='address' address='127.0.0.1'/>
+ </graphics>
+ <sound model='ich6'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+ </sound>
+ <video>
+ <model type='cirrus' vram='9216' heads='1'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+ </video>
+ <memballoon model='virtio'>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
+ </memballoon>
+ </devices>
+ <seclabel type='dynamic' model='apparmor' relabel='yes'/>
+</domain>
+
diff --git a/fuel/prototypes/libvirt/setup_vms/setup-vm-host.sh b/fuel/prototypes/deploy/examples/libvirt/install-ubuntu-packages.sh
index 58ce19a..58ce19a 100755
--- a/fuel/prototypes/libvirt/setup_vms/setup-vm-host.sh
+++ b/fuel/prototypes/deploy/examples/libvirt/install-ubuntu-packages.sh
diff --git a/fuel/prototypes/deploy/examples/libvirt/setup_example_vms.sh b/fuel/prototypes/deploy/examples/libvirt/setup_example_vms.sh
new file mode 100755
index 0000000..e0388ac
--- /dev/null
+++ b/fuel/prototypes/deploy/examples/libvirt/setup_example_vms.sh
@@ -0,0 +1,105 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+exit_handler() {
+ rm $tmpfile
+}
+
+
+error_exit () {
+ echo "$@"
+ exit 1
+}
+
+trap exit_handler EXIT
+
+# You can change these disk sizes to adapt to your needs
+fueldisk="30G"
+controllerdisk="20G"
+computedisk="20G"
+
+
+topdir=$(dirname $(readlink -f $BASH_SOURCE))
+netdir=$topdir/conf/networks
+vmdir=$topdir/conf/vms
+tmpfile=`mktemp /tmp/XXXXX`
+
+if [ ! -d $netdir ]; then
+ error_exit "No net directory $netdir"
+ exit 1
+elif [ ! -d $vmdir ]; then
+ error_exit "No VM directory $vmdir"
+ exit 1
+fi
+
+if [ $# -ne 1 ]; then
+ echo "Argument error."
+ echo "`basename $0` <path to storage dir>"
+ exit 1
+fi
+
+if [ "`whoami`" != "root" ]; then
+ error_exit "You need be root to run this script"
+fi
+
+echo "Cleaning up"
+tools/cleanup_example_vms.sh
+
+storagedir=$1
+
+if [ ! -d $storagedir ]; then
+ error_exit "Could not find storagedir directory $storagedir"
+fi
+
+# Create storage space and patch it in
+for vm in $vmdir/*
+do
+ vmname=`basename $vm`
+ virsh dumpxml $vmname >/dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ echo "Found vm $vmname, deleting"
+ virsh destroy $vmname
+ virsh undefine $vmname
+ sleep 10
+
+ fi
+
+
+ storage=${storagedir}/`basename ${vm}`.raw
+ if [ -f ${storage} ]; then
+ echo "Storage already present, removing: $storage"
+ rm $storage
+ fi
+
+ echo `basename $vm` | grep -q fuel-master && size=$fueldisk
+ echo `basename $vm` | grep -q controller && size=$controllerdisk
+ echo `basename $vm` | grep -q compute && size=$computedisk
+
+ echo "Creating ${size} GB of storage in ${storage}"
+ fallocate -l ${size} ${storage} || \
+ error_exit "Could not create storage"
+ sed "s:<source file='disk.raw':<source file='${storage}':" $vm >$tmpfile
+ virsh define $tmpfile
+done
+
+for net in $netdir/*
+do
+ netname=`basename $net`
+ virsh net-dumpxml $netname >/dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ echo "Found net $netname, deleting"
+ virsh net-destroy $netname
+ virsh net-undefine $netname
+ fi
+ virsh net-define $net
+ virsh net-autostart $netname
+ virsh net-start $netname
+done
diff --git a/fuel/prototypes/deploy/examples/libvirt/tools/cleanup_example_vms.sh b/fuel/prototypes/deploy/examples/libvirt/tools/cleanup_example_vms.sh
new file mode 100755
index 0000000..960b21e
--- /dev/null
+++ b/fuel/prototypes/deploy/examples/libvirt/tools/cleanup_example_vms.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+error_exit () {
+ echo "$@"
+ exit 1
+}
+
+topdir=$(cd $(dirname $(readlink -f $BASH_SOURCE)); cd ..; pwd)
+netdir=$topdir/conf/networks
+vmdir=$topdir/conf/vms
+
+if [ ! -d $netdir ]; then
+ error_exit "No net directory $netdir"
+ exit 1
+elif [ ! -d $vmdir ]; then
+ error_exit "No VM directory $vmdir"
+ exit 1
+fi
+
+
+if [ "`whoami`" != "root" ]; then
+ error_exit "You need be root to run this script"
+fi
+
+for vm in $vmdir/*
+do
+ vmname=`basename $vm`
+ virsh dumpxml $vmname >/dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ diskfile=`virsh dumpxml $vmname | grep "<source file=" | grep raw | \
+ sed "s/.*<source file='\(.*\)'.*/\1/"`
+ echo "Removing $vmname with disk $diskfile"
+ virsh destroy $vmname 2>/dev/null
+ virsh undefine $vmname
+ rm -f $diskfile
+ fi
+done
+
+for net in $netdir/*
+do
+ netname=`basename $net`
+ virsh net-dumpxml $netname >/dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ echo "Found net $netname, deleting"
+ virsh net-destroy $netname
+ virsh net-undefine $netname
+ fi
+done
diff --git a/fuel/prototypes/libvirt/setup_vms/dump_setup.sh b/fuel/prototypes/deploy/examples/libvirt/tools/dump_setup.sh
index b65cf31..cdd029f 100755
--- a/fuel/prototypes/libvirt/setup_vms/dump_setup.sh
+++ b/fuel/prototypes/deploy/examples/libvirt/tools/dump_setup.sh
@@ -9,46 +9,55 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-netdir='../examples/networks'
-vmdir='../examples/vms'
+topdir=$(cd $(dirname $(readlink -f $BASH_SOURCE)); cd ..; pwd)
+netdir=$topdir/conf/networks
+vmdir=$topdir/conf/vms
+vms="fuel-master controller1 compute4 compute5"
+networks="fuel1 fuel2 fuel3 fuel4"
-if [ -d $netdir ]; then
- echo "Net directory already present"
- exit 1
-elif [ -d $vmdir ]; then
- echo "VM directory already present"
- exit 1
+
+if [ "`whoami`" != "root" ]; then
+ error_exit "You need be root to run this script"
fi
mkdir -p $netdir
mkdir -p $vmdir
-# Check that no VM is up
-if virsh list | egrep -q "fuel-master|controller|compute" ; then
- echo "Can't dump while VMs are up:"
- virsh list | egrep "fuel-master|controller|compute"
+if [ `ls -1 $netdir/ | wc -l` -ne 0 ]; then
+ echo "There are files in $netdir already!"
+ exit 1
+elif [ `ls -1 $vmdir/ | wc -l` -ne 0 ]; then
+ echo "There are files in $vmdir already!"
exit 1
fi
+
+# Check that no VM is up
+for vm in $vms
+do
+ if [ "`virsh domstate $vm`" == "running" ]; then
+ echo "Can't dump while VM are up: $vm"
+ exit 1
+ fi
+done
+
# Dump all networks in the fuell* namespace
-for net in `virsh net-list --all | tail -n+3 | awk '{ print $1 }' | grep fuel`
+for net in $networks
do
virsh net-dumpxml $net > $netdir/$net
done
# Dump all fuel-master, compute* and controller* VMs
-for vm in `virsh list --all | tail -n+3 | awk '{ print $2 }' | egrep 'fuel-master|compute|controller'`
+for vm in $vms
do
virsh dumpxml $vm > $vmdir/$vm
done
-# Remove all attached ISOs, generalize the rest of the setup
+# Remove all attached ISOs, generalize disk file
for vm in $vmdir/*
do
sed -i '/.iso/d' $vm
sed -i "s/<source file='.*raw'/<source file='disk.raw'/" $vm
- sed -i '/<uuid/d' $vm
- sed -i '/<mac/d' $vm
done
# Generalize all nets
diff --git a/fuel/docs/src/tmp/BUILD/README.architecture b/fuel/prototypes/deploy/list_fixmes.sh
index 4629763..651c478 100644..100755
--- a/fuel/docs/src/tmp/BUILD/README.architecture
+++ b/fuel/prototypes/deploy/list_fixmes.sh
@@ -1,3 +1,4 @@
+#!/bin/bash
##############################################################################
# Copyright (c) 2015 Ericsson AB and others.
# stefan.k.berg@ericsson.com
@@ -8,3 +9,11 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+for file in `find . -type f -exec egrep -il "FIXME|TODO" {} \; \
+ | grep -v list_fixmes.sh \
+ | grep -v TODO.txt`
+do
+ echo "***** Things to fix in $file *****"
+ egrep -i "FIXME|TODO" $file
+ echo ""
+done
diff --git a/fuel/prototypes/libvirt/create_dea/create_dea.sh b/fuel/prototypes/libvirt/create_dea/create_dea.sh
deleted file mode 100755
index 87587a5..0000000
--- a/fuel/prototypes/libvirt/create_dea/create_dea.sh
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/bin/bash
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-cleanup () {
- if [ -n "$tmpDir" ]; then
- rm -Rf $tmpDir
- fi
-}
-
-trap cleanup exit
-
-error_exit () {
- echo "Error: $@" >&2
- exit 1
-}
-
-tmpDir=`mktemp -d /tmp/deaXXXX`
-
-export PATH=`dirname $0`:$PATH
-
-if [ $# -lt 1 ]; then
- error_exit "Argument error"
-fi
-deafile=$1
-shift
-
-if [ $# -ne 0 ]; then
- comment="$@"
-fi
-
-if [ -f "$deafile" ]; then
- error_exit "$deafile already exists"
-fi
-
-if [ `fuel env | tail -n +3 | grep -v '^$' | wc -l` -ne 1 ]; then
- error_exit "Not exactly one environment"
-fi
-envId=`fuel env | tail -n +3 | grep -v '^$' | awk '{ print $1 }'`
-
-computeId=`fuel node | grep compute | grep True | head -1 | awk '{ print $1}'`
-controllerId=`fuel node | grep controller | grep True | head -1 | awk '{ print $1}'`
-
-if [ -z "$computeId" ]; then
- error_exit "Could not find any compute node"
-elif [ -z "$controllerId" ]; then
- error_exit "Could not find any controller node"
-fi
-
-fuel deployment --env $envId --download --dir $tmpDir > /dev/null || \
- error_exit "Could not get deployment info"
-fuel settings --env $envId --download --dir $tmpDir > /dev/null || \
- error_exit "Could not get settings"
-fuel network --env $envId --download --dir $tmpDir > /dev/null || \
- error_exit "Could not get network settings"
-
-echo "version: 1.0" > $deafile
-echo "created: `date`" >> $deafile
-if [ -n "$comment" ]; then
- echo "comment: $comment" >> $deafile
-fi
-
-reap_network_scheme.py $tmpDir/deployment_${envId}/*controller_${controllerId}.yaml $deafile controller || \
- error_exit "Could not extract network scheme for controller"
-reap_network_scheme.py $tmpDir/deployment_${envId}/compute_${computeId}.yaml $deafile compute || \
- error_exit "Could not extract network scheme for controller"
-reap_network_settings.py $tmpDir/network_${envId}.yaml $deafile network || \
- error_exit "Could not extract network settings"
-reap_settings.py $tmpDir/settings_${envId}.yaml $deafile settings || \
- error_exit "Could not extract settings"
-
-fuel node --node-id $controllerId --network --download --dir $tmpDir || \
- error_exit "Could not get network info for node $controllerId"
-reap_interfaces.py $tmpDir/node_${controllerId}/interfaces.yaml $deafile || \
- error_exit "Could not extract interfaces"
-
-
-echo "DEA file is available at $deafile"
-
diff --git a/fuel/prototypes/libvirt/deploy/functions/deploy_env.sh b/fuel/prototypes/libvirt/deploy/functions/deploy_env.sh
deleted file mode 100755
index 6fb26c4..0000000
--- a/fuel/prototypes/libvirt/deploy/functions/deploy_env.sh
+++ /dev/null
@@ -1,81 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Deploy!
-
-scp -q $deafile root@10.20.0.2:. || error_exit "Could not copy DEA file to Fuel"
-echo "Uploading build tools to Fuel server"
-ssh root@10.20.0.2 rm -rf tools || error_exit "Error cleaning old tools structure"
-scp -qrp $topdir/tools root@10.20.0.2:. || error_exit "Error copying tools"
-
-# Refuse to run if environment already present
-envcnt=`fuel env | tail -n +3 | grep -v '^$' | wc -l`
-if [ $envcnt -ne 0 ]; then
- error_exit "Environment count is $envcnt"
-fi
-
-# Refuse to run if any nodes are up
-nodeCnt=`noNodesUp`
-if [ $nodeCnt -ne 0 ]; then
- error_exit "Nodes are up (node count: $nodeCnt)"
-fi
-
-# Extract release ID for Ubuntu environment
-ubuntucnt=`fuel release | grep Ubuntu | wc -l`
-if [ $ubuntucnt -ne 1 ]; then
- error_exit "Not exacly one Ubuntu release found"
-fi
-
-ubuntuid=`fuel release | grep Ubuntu | awk '{ print $1 }'`
-
-# Create environment
-fuel env create --name Foobar --rel $ubuntuid --mode ha --network-mode neutron --net-segment-type vlan || error_exit "Error creating environment"
-envId=`ssh root@10.20.0.2 fuel env | tail -n +3 | awk '{ print $1 }'` || error_exit "Could not get environment id"
-
-
-echo "Running transplant #1"
-ssh root@10.20.0.2 "cd tools; ./transplant1.sh ../`basename $deafile`" || error_exit "Error running transplant sequence #1"
-
-# Spin up VMs
-for node in controller1 controller2 controller3 compute4 compute5
-do
- echo "Starting VM $node"
- virsh start $node >/dev/null 2>&1 || error_exit "Could not virsh start $node"
- sleep 10
-done
-
-for node in controller1 controller2 controller3
-do
- echo -n "Waiting for Fuel to detect $node"
- waitForHost $node
- echo "Setting role for $node"
- fuel node set --node-id `getNodeId $node` --role controller,mongo --env $envId || error_exit "Could not set role for $node"
-done
-
-for node in compute4 compute5
-do
- echo -n "Waiting for Fuel to detect $node"
- waitForHost $node
- echo "Setting role for $node"
- fuel node set --node-id `getNodeId $node` --role compute --env $envId || error_exit "Could not set role for $node"
-done
-
-# Inject node network config
-echo "Running transplant #2"
-ssh root@10.20.0.2 "cd tools; ./transplant2.sh ../`basename $deafile`" || error_exit "Error running transplant sequence #2"
-
-# Run pre-deploy with default input
-# Need to set terminal as script does "clear"
-ssh root@10.20.0.2 "TERM=vt100 /opt/opnfv/pre-deploy.sh < /dev/null" || error_exit "Pre-deploy failed"
-
-# Deploy
-echo "Deploying!"
-ssh root@10.20.0.2 "fuel deploy-changes --env $envId" >/dev/null 2>&1 || error_exit "Deploy failed"
-echo "Deployment completed"