aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonas Bjurel <jonas.bjurel@ericsson.com>2015-10-03 17:34:24 +0200
committerJonas Bjurel <jonas.bjurel@ericsson.com>2015-10-03 17:34:24 +0200
commit543130927ba40a174e8674cca66ae442d5056d76 (patch)
treeee497cf48d8f31e998b48c6ae54fc625b852b20d
parent11dbe27afb96c5b54b9f4f0a1c8b21194f59dc7b (diff)
Moving tag arno.2015.2.0 from genesis to fuel/stable/arnoarno.2015.2.0stable/arno
Change-Id: I01b5f9f9125756d80d7ca666bb6d994f2b13d2a0 Signed-off-by: Jonas Bjurel <jonas.bjurel@ericsson.com>
-rwxr-xr-xcommon/ci/clean.sh326
-rwxr-xr-xcommon/ci/setup.sh270
-rw-r--r--common/docs/user-guide.rst19
-rw-r--r--common/puppet-opnfv/manifests/compute.pp43
-rw-r--r--common/puppet-opnfv/manifests/controller_networker.pp92
-rw-r--r--common/puppet-opnfv/manifests/external_net_presetup.pp15
-rw-r--r--common/puppet-opnfv/manifests/external_net_setup.pp5
-rw-r--r--common/puppet-opnfv/manifests/init.pp1
-rw-r--r--common/puppet-opnfv/manifests/odl_docker.pp50
-rw-r--r--common/puppet-opnfv/manifests/templates/br_ex.erb10
-rw-r--r--common/puppet-opnfv/manifests/templates/dockerfile/Dockerfile82
-rw-r--r--common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/check_feature.sh18
-rw-r--r--common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/speak.sh20
-rw-r--r--common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/start_odl_docker_container.sh48
-rwxr-xr-xcompass/ci/build.sh26
-rwxr-xr-xcompass/ci/deploy.sh14
-rwxr-xr-xcompass/ci/launch.sh65
-rwxr-xr-xcompass/ci/log.sh22
-rw-r--r--compass/deploy/ansible/openstack_juno/HA-ansible-multinodes.yml42
-rw-r--r--compass/deploy/ansible/openstack_juno/allinone.yml38
-rw-r--r--compass/deploy/ansible/openstack_juno/compute.yml9
-rw-r--r--compass/deploy/ansible/openstack_juno/controller.yml15
-rw-r--r--compass/deploy/ansible/openstack_juno/group_vars/all54
-rw-r--r--compass/deploy/ansible/openstack_juno/multinodes.yml75
-rw-r--r--compass/deploy/ansible/openstack_juno/network.yml8
-rw-r--r--compass/deploy/ansible/openstack_juno/single-controller.yml38
-rw-r--r--compass/deploy/ansible/openstack_juno/storage.yml8
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/handlers/main.yml6
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/tasks/cinder_config.yml20
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/tasks/cinder_install.yml20
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/tasks/main.yml13
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/templates/api-paste.ini71
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/templates/cinder.conf63
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/templates/cinder_init.sh6
-rw-r--r--compass/deploy/ansible/roles/cinder-volume/files/loop.yml1
-rw-r--r--compass/deploy/ansible/roles/cinder-volume/handlers/main.yml3
-rw-r--r--compass/deploy/ansible/roles/cinder-volume/tasks/main.yml55
-rw-r--r--compass/deploy/ansible/roles/cinder-volume/templates/cinder.conf62
-rw-r--r--compass/deploy/ansible/roles/common/files/sources.list.d/cloudarchive-juno.list1
-rw-r--r--compass/deploy/ansible/roles/common/tasks/main.yml28
-rw-r--r--compass/deploy/ansible/roles/common/templates/hosts22
-rw-r--r--compass/deploy/ansible/roles/common/templates/ntp.conf56
-rw-r--r--compass/deploy/ansible/roles/dashboard/tasks/main.yml30
-rw-r--r--compass/deploy/ansible/roles/dashboard/templates/local_settings.py511
-rw-r--r--compass/deploy/ansible/roles/dashboard/templates/openstack-dashboard.conf14
-rw-r--r--compass/deploy/ansible/roles/database/files/my.cnf131
-rw-r--r--compass/deploy/ansible/roles/database/tasks/main.yml12
-rw-r--r--compass/deploy/ansible/roles/database/tasks/mariadb.yml46
-rw-r--r--compass/deploy/ansible/roles/database/tasks/mysql.yml22
-rw-r--r--compass/deploy/ansible/roles/database/templates/data.j239
-rw-r--r--compass/deploy/ansible/roles/database/templates/my.cnf134
-rw-r--r--compass/deploy/ansible/roles/database/templates/wsrep.cnf126
-rw-r--r--compass/deploy/ansible/roles/glance/handlers/main.yml6
-rw-r--r--compass/deploy/ansible/roles/glance/tasks/glance_config.yml29
-rw-r--r--compass/deploy/ansible/roles/glance/tasks/glance_install.yml26
-rw-r--r--compass/deploy/ansible/roles/glance/tasks/main.yml18
-rw-r--r--compass/deploy/ansible/roles/glance/tasks/nfs.yml41
-rw-r--r--compass/deploy/ansible/roles/glance/templates/glance-api.conf677
-rw-r--r--compass/deploy/ansible/roles/glance/templates/glance-registry.conf190
-rw-r--r--compass/deploy/ansible/roles/glance/templates/image_upload.sh2
-rw-r--r--compass/deploy/ansible/roles/ha/files/galera_chk10
-rw-r--r--compass/deploy/ansible/roles/ha/files/mysqlchk15
-rw-r--r--compass/deploy/ansible/roles/ha/files/notify.sh4
-rw-r--r--compass/deploy/ansible/roles/ha/handlers/main.yml9
-rw-r--r--compass/deploy/ansible/roles/ha/tasks/main.yml94
-rw-r--r--compass/deploy/ansible/roles/ha/templates/failover.j265
-rw-r--r--compass/deploy/ansible/roles/ha/templates/haproxy.cfg133
-rw-r--r--compass/deploy/ansible/roles/ha/templates/keepalived.conf42
-rw-r--r--compass/deploy/ansible/roles/keystone/tasks/keystone_config.yml16
-rw-r--r--compass/deploy/ansible/roles/keystone/tasks/keystone_install.yml29
-rw-r--r--compass/deploy/ansible/roles/keystone/tasks/main.yml13
-rw-r--r--compass/deploy/ansible/roles/keystone/templates/admin-openrc.sh6
-rw-r--r--compass/deploy/ansible/roles/keystone/templates/demo-openrc.sh5
-rw-r--r--compass/deploy/ansible/roles/keystone/templates/keystone.conf1317
-rw-r--r--compass/deploy/ansible/roles/keystone/templates/keystone_init43
-rw-r--r--compass/deploy/ansible/roles/monitor/files/check_service.sh7
-rw-r--r--compass/deploy/ansible/roles/monitor/files/root1
-rw-r--r--compass/deploy/ansible/roles/monitor/tasks/main.yml11
-rw-r--r--compass/deploy/ansible/roles/mq/tasks/main.yml5
-rw-r--r--compass/deploy/ansible/roles/mq/tasks/rabbitmq.yml45
-rw-r--r--compass/deploy/ansible/roles/mq/tasks/rabbitmq_cluster.yml27
-rw-r--r--compass/deploy/ansible/roles/mq/templates/rabbitmq-env.conf1
-rw-r--r--compass/deploy/ansible/roles/neutron-common/handlers/main.yml13
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/defaults/main.yml2
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/handlers/main.yml13
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/tasks/main.yml55
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/dhcp_agent.ini90
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf2
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/etc/xorp/config.boot25
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/l3_agent.ini81
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/metadata_agent.ini46
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/ml2_conf.ini108
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/neutron-network.conf465
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/neutron.conf466
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/neutron_init.sh4
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/nova.conf73
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/handlers/main.yml24
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/tasks/main.yml13
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/tasks/neutron_config.yml10
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/tasks/neutron_install.yml29
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/dhcp_agent.ini90
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf2
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/etc/xorp/config.boot25
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/l3_agent.ini81
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/metadata_agent.ini46
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/ml2_conf.ini108
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/neutron-network.conf465
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/neutron.conf466
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/neutron_init.sh4
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/nova.conf69
-rw-r--r--compass/deploy/ansible/roles/neutron-network/handlers/main.yml21
-rw-r--r--compass/deploy/ansible/roles/neutron-network/tasks/igmp-router.yml20
-rw-r--r--compass/deploy/ansible/roles/neutron-network/tasks/main.yml114
-rw-r--r--compass/deploy/ansible/roles/neutron-network/tasks/odl.yml13
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/dhcp_agent.ini90
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf2
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/etc/xorp/config.boot25
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/l3_agent.ini81
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/metadata_agent.ini46
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/ml2_conf.ini108
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/neutron-network.conf465
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/neutron.conf466
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/neutron_init.sh4
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/nova.conf69
-rw-r--r--compass/deploy/ansible/roles/nova-compute/handlers/main.yml3
-rw-r--r--compass/deploy/ansible/roles/nova-compute/tasks/main.yml21
-rw-r--r--compass/deploy/ansible/roles/nova-compute/templates/nova-compute.conf7
-rw-r--r--compass/deploy/ansible/roles/nova-compute/templates/nova.conf73
-rw-r--r--compass/deploy/ansible/roles/nova-controller/handlers/main.yml24
-rw-r--r--compass/deploy/ansible/roles/nova-controller/tasks/main.yml13
-rw-r--r--compass/deploy/ansible/roles/nova-controller/tasks/nova_config.yml16
-rw-r--r--compass/deploy/ansible/roles/nova-controller/tasks/nova_install.yml35
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/dhcp_agent.ini90
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf2
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/etc/xorp/config.boot25
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/l3_agent.ini81
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/metadata_agent.ini46
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/ml2_conf.ini108
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/neutron-network.conf465
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/neutron.conf466
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/neutron_init.sh4
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/nova.conf72
-rw-r--r--compass/deploy/ansible/roles/repo/tasks/main.yml6
-rw-r--r--compass/deploy/ansible/roles/repo/templates/sources.list1
-rw-r--r--compass/deploy/compass_vm.sh103
-rw-r--r--compass/deploy/conf/baremetal.conf20
-rw-r--r--compass/deploy/conf/base.conf24
-rw-r--r--compass/deploy/conf/cluster.conf20
-rw-r--r--compass/deploy/conf/five.conf4
-rw-r--r--compass/deploy/deploy-vm.sh9
-rw-r--r--compass/deploy/deploy_host.sh40
-rwxr-xr-x[-rw-r--r--]compass/deploy/func.sh17
-rw-r--r--compass/deploy/host_baremetal.sh9
-rw-r--r--compass/deploy/host_vm.sh59
-rwxr-xr-xcompass/deploy/network.sh70
-rw-r--r--compass/deploy/prepare.sh66
-rw-r--r--compass/deploy/remote_excute.exp23
-rw-r--r--compass/deploy/status_callback.py174
-rw-r--r--compass/deploy/template/network/bridge.xml5
-rw-r--r--compass/deploy/template/network/nat.xml10
-rw-r--r--compass/deploy/template/vm/compass.xml64
-rw-r--r--compass/deploy/template/vm/host.xml67
-rw-r--r--foreman/build/Makefile6
-rw-r--r--foreman/build/cache.mk2
-rw-r--r--foreman/build/opnfv-genesis.spec17
-rw-r--r--foreman/ci/Vagrantfile13
-rwxr-xr-xforeman/ci/bootstrap.sh5
-rwxr-xr-xforeman/ci/clean.sh217
-rwxr-xr-xforeman/ci/deploy.sh1672
-rw-r--r--foreman/ci/inventory/lf_pod2_ksgen_settings.yml36
-rw-r--r--foreman/ci/opnfv_ksgen_settings.yml35
-rw-r--r--foreman/ci/opnfv_ksgen_settings_no_HA.yml272
-rw-r--r--foreman/ci/reload_playbook.yml1
-rwxr-xr-xforeman/ci/resize_lvm.sh37
-rwxr-xr-xforeman/ci/resize_partition.sh33
-rwxr-xr-xforeman/ci/vm_nodes_provision.sh59
-rw-r--r--foreman/docs/src/installation-instructions.rst448
-rw-r--r--foreman/docs/src/release-notes.rst161
-rw-r--r--fuel/TODO5
-rw-r--r--fuel/build/Makefile49
-rw-r--r--fuel/build/README1
-rw-r--r--fuel/build/cache.mk18
-rw-r--r--fuel/build/config.mk9
-rw-r--r--fuel/build/docker/ubuntu-builder/Dockerfile5
-rwxr-xr-xfuel/build/docker/ubuntu-builder/install.sh25
-rw-r--r--fuel/build/f_isoroot/Makefile2
-rwxr-xr-xfuel/build/f_isoroot/f_bootstrap/bootstrap_admin_node.sh107
-rwxr-xr-xfuel/build/f_isoroot/f_bootstrap/bootstrap_admin_node.sh.orig108
-rwxr-xr-xfuel/build/f_isoroot/f_bootstrap/post-scripts/01_fix_iommubug.sh9
-rwxr-xr-xfuel/build/f_isoroot/f_bootstrap/post-scripts/02_fix_console_speed.sh15
-rwxr-xr-xfuel/build/f_isoroot/f_bootstrap/post-scripts/03_install_repo.sh19
-rw-r--r--[-rwxr-xr-x]fuel/build/f_isoroot/f_kscfg/ks.cfg122
-rw-r--r--fuel/build/f_isoroot/f_kscfg/ks.cfg.orig120
-rw-r--r--fuel/build/f_isoroot/f_odlpluginbuild/Makefile38
-rw-r--r--fuel/build/f_isoroot/f_predeployment/Makefile28
-rw-r--r--fuel/build/f_isoroot/f_predeployment/README18
-rwxr-xr-xfuel/build/f_isoroot/f_predeployment/pre-deploy.sh401
-rwxr-xr-xfuel/build/f_isoroot/f_predeployment/sysinfo.sh12
-rwxr-xr-xfuel/build/f_isoroot/f_predeployment/transform_yaml.py68
-rw-r--r--fuel/build/f_isoroot/f_repobuild/Makefile56
-rw-r--r--fuel/build/f_l23network/Makefile28
-rw-r--r--fuel/build/f_l23network/README35
-rw-r--r--fuel/build/f_l23network/puppet/modules/l23network/lib/puppet/parser/functions/extras_to_hosts.rb21
-rw-r--r--fuel/build/f_l23network/puppet/modules/l23network/manifests/hosts_file.pp18
-rw-r--r--fuel/build/f_l23network/puppet/modules/l23network/manifests/hosts_file.pp.orig16
-rw-r--r--fuel/build/f_l23network/testing/README12
-rw-r--r--fuel/build/f_l23network/testing/fake_init.pp13
-rw-r--r--fuel/build/f_ntp/Makefile28
-rw-r--r--fuel/build/f_ntp/README33
-rw-r--r--fuel/build/f_ntp/puppet/modules/opnfv/manifests/ntp.pp80
-rw-r--r--fuel/build/f_ntp/puppet/modules/opnfv/templates/ntp.conf.compute.erb21
-rw-r--r--fuel/build/f_ntp/puppet/modules/opnfv/templates/ntp.conf.controller.erb21
-rw-r--r--fuel/build/f_ntp/testing/README12
-rwxr-xr-xfuel/build/f_odl_docker/Makefile51
-rwxr-xr-xfuel/build/f_odl_docker/dockerfile/Dockerfile72
-rwxr-xr-xfuel/build/f_odl_docker/dockerfile/container_scripts/check_feature.sh8
-rwxr-xr-xfuel/build/f_odl_docker/dockerfile/container_scripts/speak.sh17
-rwxr-xr-xfuel/build/f_odl_docker/dockerfile/container_scripts/start_odl_docker.sh38
-rw-r--r--fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp77
-rw-r--r--fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/change.sh219
-rwxr-xr-xfuel/build/f_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh192
-rwxr-xr-xfuel/build/f_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh54
-rwxr-xr-xfuel/build/f_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh95
-rw-r--r--fuel/build/f_odl_docker/scripts/config_net_odl.sh164
-rw-r--r--fuel/build/f_odl_docker/scripts/config_neutron_for_odl.sh146
-rwxr-xr-xfuel/build/f_odl_docker/scripts/prep_nets_for_odl.sh90
-rw-r--r--fuel/build/f_odl_docker/scripts/setup_ovs_for_odl.sh23
-rw-r--r--fuel/build/f_opnfv_puppet/Makefile28
-rw-r--r--fuel/build/f_opnfv_puppet/README12
-rw-r--r--fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/add_packages.pp9
-rw-r--r--fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp28
-rw-r--r--fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/opncheck.pp21
-rw-r--r--fuel/build/f_osnaily/Makefile28
-rw-r--r--fuel/build/f_osnaily/puppet/modules/osnailyfacter/examples/site.pp366
-rw-r--r--fuel/build/f_osnaily/puppet/modules/osnailyfacter/examples/site.pp.orig353
-rw-r--r--fuel/build/f_resolvconf/Makefile28
-rw-r--r--fuel/build/f_resolvconf/README36
-rw-r--r--fuel/build/f_resolvconf/puppet/modules/opnfv/manifests/resolver.pp73
-rw-r--r--fuel/build/f_resolvconf/puppet/modules/opnfv/templates/resolv.conf.erb15
-rw-r--r--fuel/build/f_resolvconf/testing/README13
-rw-r--r--fuel/build/f_resolvconf/testing/fake_init.pp13
-rw-r--r--fuel/build/fuel-agent_1.patch36
-rw-r--r--fuel/build/fuel-main_1.patch104
-rw-r--r--fuel/build/fuel-main_2.patch18
-rw-r--r--fuel/build/fuel-main_3.patch19
-rw-r--r--fuel/build/fuel-main_5.patch19
-rw-r--r--fuel/build/install/apt-ftparchive-deb.conf6
-rw-r--r--fuel/build/install/apt-ftparchive-release.conf8
-rw-r--r--fuel/build/install/apt-ftparchive-udeb.conf4
-rwxr-xr-xfuel/build/install/install.sh84
-rwxr-xr-xfuel/build/install/uninstall.sh16
-rw-r--r--fuel/build/opendaylight/Makefile102
-rw-r--r--fuel/build/opendaylight/README52
-rw-r--r--fuel/build/opendaylight/f_odl/Makefile49
-rw-r--r--fuel/build/opendaylight/f_odl/README49
-rw-r--r--fuel/build/opendaylight/f_odl/puppet/modules/opnfv/manifests/odl.pp13
-rw-r--r--fuel/build/opendaylight/f_odl/testing/README12
-rw-r--r--fuel/build/opendaylight/f_odl/testing/fake_init.pp13
-rwxr-xr-xfuel/build/opendaylight/make-odl-deb.sh314
-rw-r--r--fuel/build/opendaylight/odl_maven/settings.xml46
-rw-r--r--fuel/build/patch-packages/Makefile2
-rw-r--r--fuel/build/patch-packages/debootstrap/Makefile28
-rw-r--r--fuel/build/patch-packages/debootstrap/debootstrap.patch12
-rw-r--r--fuel/build/patch-packages/neutron-common/Makefile19
-rw-r--r--fuel/build/patch-packages/neutron-common/quota.patch67
-rw-r--r--fuel/build/patch-packages/novnc/Makefile22
-rwxr-xr-xfuel/build/patch-packages/novnc/fix-missing.sh9
-rw-r--r--fuel/ci/README89
-rwxr-xr-xfuel/ci/build.sh358
-rwxr-xr-xfuel/ci/deploy.sh18
-rw-r--r--fuel/deploy/README186
-rw-r--r--fuel/deploy/README.txt71
-rw-r--r--fuel/deploy/__init__.py8
-rw-r--r--fuel/deploy/baremetal/dea.yaml982
-rw-r--r--fuel/deploy/baremetal/dha.yaml53
-rw-r--r--fuel/deploy/cloud/configure_environment.py41
-rw-r--r--fuel/deploy/cloud/configure_network.py15
-rw-r--r--fuel/deploy/cloud/configure_nodes.py40
-rw-r--r--fuel/deploy/cloud/configure_settings.py12
-rw-r--r--fuel/deploy/cloud/deploy.py213
-rw-r--r--fuel/deploy/cloud/deployment.py60
-rw-r--r--fuel/deploy/common.py77
-rw-r--r--fuel/deploy/dea.py39
-rw-r--r--fuel/deploy/deploy.py263
-rw-r--r--fuel/deploy/deploy_env.py207
-rw-r--r--fuel/deploy/dha.py21
-rw-r--r--fuel/deploy/dha_adapters/__init__.py8
-rw-r--r--fuel/deploy/dha_adapters/hardware_adapter.py20
-rw-r--r--fuel/deploy/dha_adapters/hp_adapter.py17
-rw-r--r--fuel/deploy/dha_adapters/ipmi_adapter.py59
-rw-r--r--fuel/deploy/dha_adapters/libvirt_adapter.py23
-rw-r--r--fuel/deploy/environments/__init__.py (renamed from fuel/build/f_ntp/testing/fake_init.pp)11
-rw-r--r--fuel/deploy/environments/execution_environment.py78
-rw-r--r--fuel/deploy/environments/libvirt_environment.py107
-rw-r--r--fuel/deploy/environments/virtual_fuel.py70
-rw-r--r--fuel/deploy/execution_environment.py46
-rw-r--r--fuel/deploy/fuel_patch/ks.cfg.patch19
-rwxr-xr-xfuel/deploy/install-ubuntu-packages.sh18
-rw-r--r--fuel/deploy/install_fuel_master.py137
-rw-r--r--fuel/deploy/libvirt/dha.yaml80
-rw-r--r--fuel/deploy/reap.py339
-rw-r--r--fuel/deploy/setup_environment.py165
-rw-r--r--fuel/deploy/setup_vfuel.py143
-rw-r--r--fuel/deploy/ssh_client.py23
-rw-r--r--fuel/deploy/templates/hardware_environment/conf/ericsson_montreal_lab/dea.yaml844
-rw-r--r--fuel/deploy/templates/hardware_environment/conf/ericsson_montreal_lab/dha.yaml54
-rw-r--r--fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod1/dea.yaml841
-rw-r--r--fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod1/dha.yaml49
-rw-r--r--fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod2/dea.yaml (renamed from fuel/deploy/libvirt/dea.yaml)941
-rw-r--r--fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod2/dha.yaml49
-rw-r--r--fuel/deploy/templates/hardware_environment/conf/opnfv_box/dea.yaml842
-rw-r--r--fuel/deploy/templates/hardware_environment/conf/opnfv_box/dha.yaml49
-rw-r--r--fuel/deploy/templates/hardware_environment/vms/fuel.xml (renamed from fuel/deploy/baremetal/vm/vFuel)32
-rw-r--r--fuel/deploy/templates/virtual_environment/conf/dea.yaml838
-rw-r--r--fuel/deploy/templates/virtual_environment/conf/dha.yaml38
-rw-r--r--fuel/deploy/templates/virtual_environment/networks/fuel1.xml (renamed from fuel/deploy/libvirt/networks/fuel1)0
-rw-r--r--fuel/deploy/templates/virtual_environment/networks/fuel2.xml (renamed from fuel/deploy/libvirt/networks/fuel2)0
-rw-r--r--fuel/deploy/templates/virtual_environment/networks/fuel3.xml (renamed from fuel/deploy/libvirt/networks/fuel3)0
-rw-r--r--fuel/deploy/templates/virtual_environment/networks/fuel4.xml (renamed from fuel/deploy/libvirt/networks/fuel4)0
-rw-r--r--fuel/deploy/templates/virtual_environment/vms/compute.xml (renamed from fuel/deploy/libvirt/vms/compute)4
-rw-r--r--fuel/deploy/templates/virtual_environment/vms/controller.xml (renamed from fuel/deploy/libvirt/vms/controller)7
-rw-r--r--fuel/deploy/templates/virtual_environment/vms/fuel.xml (renamed from fuel/deploy/libvirt/vms/fuel-master)2
-rw-r--r--fuel/deploy/transplant_fuel_settings.py25
-rw-r--r--fuel/docs/src/build-instructions.rst37
-rw-r--r--fuel/docs/src/installation-instructions.rst361
-rw-r--r--fuel/docs/src/release-notes.rst111
-rw-r--r--fuel/prototypes/auto-deploy/configs/lf_pod1/dea_ha.yaml2
-rw-r--r--fuel/prototypes/auto-deploy/configs/lf_pod1/dea_no-ha.yaml2
-rw-r--r--fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute41
-rw-r--r--fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute51
-rw-r--r--fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/controller11
-rw-r--r--fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/fuel-master1
-rw-r--r--opensteak/ci/build.sh19
-rw-r--r--opensteak/ci/deploy.sh28
-rw-r--r--opensteak/config/common.yaml119
-rw-r--r--opensteak/config/infra.yaml81
-rw-r--r--opensteak/tools/README.rst52
-rw-r--r--opensteak/tools/config.yaml78
-rw-r--r--opensteak/tools/create_foreman.py236
-rw-r--r--opensteak/tools/files_foreman/id_rsa27
-rw-r--r--opensteak/tools/files_foreman/id_rsa.pub1
-rw-r--r--opensteak/tools/opensteak/__init__.py18
-rw-r--r--opensteak/tools/opensteak/argparser.py46
-rw-r--r--opensteak/tools/opensteak/conf.py72
-rw-r--r--opensteak/tools/opensteak/foreman.py60
-rw-r--r--opensteak/tools/opensteak/foreman_objects/__init__.py18
-rw-r--r--opensteak/tools/opensteak/foreman_objects/api.py197
-rw-r--r--opensteak/tools/opensteak/foreman_objects/architectures.py49
-rw-r--r--opensteak/tools/opensteak/foreman_objects/compute_resources.py62
-rw-r--r--opensteak/tools/opensteak/foreman_objects/domains.py44
-rw-r--r--opensteak/tools/opensteak/foreman_objects/freeip.py79
-rw-r--r--opensteak/tools/opensteak/foreman_objects/hostgroups.py103
-rw-r--r--opensteak/tools/opensteak/foreman_objects/hosts.py142
-rw-r--r--opensteak/tools/opensteak/foreman_objects/item.py135
-rw-r--r--opensteak/tools/opensteak/foreman_objects/itemHost.py141
-rw-r--r--opensteak/tools/opensteak/foreman_objects/itemHostsGroup.py50
-rw-r--r--opensteak/tools/opensteak/foreman_objects/itemOverrideValues.py61
-rw-r--r--opensteak/tools/opensteak/foreman_objects/itemSmartClassParameter.py62
-rw-r--r--opensteak/tools/opensteak/foreman_objects/objects.py136
-rw-r--r--opensteak/tools/opensteak/foreman_objects/operatingsystems.py66
-rw-r--r--opensteak/tools/opensteak/foreman_objects/puppetClasses.py46
-rw-r--r--opensteak/tools/opensteak/foreman_objects/smart_proxies.py36
-rw-r--r--opensteak/tools/opensteak/foreman_objects/subnets.py67
-rw-r--r--opensteak/tools/opensteak/printer.py141
-rw-r--r--opensteak/tools/opensteak/templateparser.py34
-rw-r--r--opensteak/tools/opensteak/virsh.py174
-rw-r--r--opensteak/tools/templates_foreman/install.sh216
-rw-r--r--opensteak/tools/templates_foreman/kvm-config65
-rw-r--r--opensteak/tools/templates_foreman/meta-data12
-rw-r--r--opensteak/tools/templates_foreman/user-data25
370 files changed, 24488 insertions, 8270 deletions
diff --git a/common/ci/clean.sh b/common/ci/clean.sh
new file mode 100755
index 000000000..cc116e8d5
--- /dev/null
+++ b/common/ci/clean.sh
@@ -0,0 +1,326 @@
+#!/usr/bin/env bash
+
+#Common clean script to uninstall provisioning server
+#author: Tim Rozet (trozet@redhat.com)
+#
+#Removes Libvirt, KVM, Vagrant, VirtualBox
+#
+#Destroys Vagrant VMs running in $vm_dir/
+#Shuts down all nodes found in Khaleesi settings
+#Removes hypervisor kernel modules (VirtualBox & KVM/Libvirt)
+
+##VARS
+reset=`tput sgr0`
+blue=`tput setaf 4`
+red=`tput setaf 1`
+green=`tput setaf 2`
+pxe_bridge='pxebr'
+vm_dir=/var/opt/opnfv
+first_interface='enp6s0'
+second_interface='enp7s0'
+management_vid=300
+management_interface="${first_interface}.${management_vid}"
+##END VARS
+
+##FUNCTIONS
+display_usage() {
+ echo -e "\n\n${blue}This script is used to uninstall and clean the OPNFV Target System${reset}\n\n"
+ echo -e "\nUsage:\n$0 [arguments] \n"
+ echo -e "\n -no_parse : No variable parsing into config. Flag. \n"
+ echo -e "\n -base_config : Full path of ksgen settings file to parse. Required. Will provide BMC info to shutdown hosts. Example: -base_config /opt/myinventory.yml \n"
+}
+
+remove_interface_with_name_pattern() {
+ if [ -z $1 ]; then
+ echo "${red}Cannot remove interface. No interface name pattern specified!${reset}"
+ exit 1
+ fi
+ local interface_name_pattern=$1
+ echo "${blue} Looking for interface with name pattern: ${interface_name_pattern}${reset}"
+ interface=$(ip link show | grep -oP ${interface_name_pattern})
+ if [ ! -z "${interface}" ]; then
+ echo "${blue}Interface ${interface} detected! Removing...${reset}"
+ ip link del ${interface}
+ if ip link show | grep -oP ${interface_name_pattern}; then
+ echo "${red}Could not remove interface ${interface} ${reset}"
+ exit 1
+ else
+ echo "${blue}Interface ${interface} successfully removed${reset}"
+ fi
+ else
+ echo "${blue}Interface with name pattern ${interface_name_pattern} does not exist, nothing to remove${reset}"
+ fi
+}
+##END FUNCTIONS
+
+if [[ ( $1 == "--help") || $1 == "-h" ]]; then
+ display_usage
+ exit 0
+fi
+
+echo -e "\n\n${blue}This script is used to uninstall and clean the OPNFV Target System${reset}\n\n"
+echo "Use -h to display help"
+sleep 2
+
+while [ "`echo $1 | cut -c1`" = "-" ]
+do
+ echo $1
+ case "$1" in
+ -base_config)
+ base_config=$2
+ shift 2
+ ;;
+ *)
+ display_usage
+ exit 1
+ ;;
+esac
+done
+
+if [ ! -z "$base_config" ]; then
+ # Install ipmitool
+ # Major version is pinned to force some consistency for Arno
+ if ! yum list installed | grep -i ipmitool; then
+ if ! yum -y install ipmitool-1*; then
+ echo "${red}Unable to install ipmitool!${reset}"
+ exit 1
+ fi
+ else
+ echo "${blue}Skipping ipmitool as it is already installed!${reset}"
+ fi
+
+ ###find all the bmc IPs and number of nodes
+ node_counter=0
+ output=`grep bmc_ip $base_config | grep -Eo '[0-9]+.[0-9]+.[0-9]+.[0-9]+'`
+ for line in ${output} ; do
+ bmc_ip[$node_counter]=$line
+ ((node_counter++))
+ done
+
+ max_nodes=$((node_counter-1))
+
+ ###find bmc_users per node
+ node_counter=0
+ output=`grep bmc_user $base_config | sed 's/\s*bmc_user:\s*//'`
+ for line in ${output} ; do
+ bmc_user[$node_counter]=$line
+ ((node_counter++))
+ done
+
+ ###find bmc_pass per node
+ node_counter=0
+ output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'`
+ for line in ${output} ; do
+ bmc_pass[$node_counter]=$line
+ ((node_counter++))
+ done
+ for mynode in `seq 0 $max_nodes`; do
+ echo "${blue}Node: ${bmc_ip[$mynode]} ${bmc_user[$mynode]} ${bmc_pass[$mynode]} ${reset}"
+ if ipmitool -I lanplus -P ${bmc_pass[$mynode]} -U ${bmc_user[$mynode]} -H ${bmc_ip[$mynode]} chassis power off; then
+ echo "${blue}Node: $mynode, ${bmc_ip[$mynode]} powered off!${reset}"
+ else
+ echo "${red}Error: Unable to power off $mynode, ${bmc_ip[$mynode]} ${reset}"
+ exit 1
+ fi
+ done
+else
+ echo "${blue}Skipping Baremetal node poweroff as base_config was not provided${reset}"
+fi
+###check to see if vbox is installed
+vboxpkg=`rpm -qa | grep VirtualBox`
+if [ $? -eq 0 ]; then
+ skip_vagrant=0
+else
+ skip_vagrant=1
+fi
+
+###legacy VM location check
+###remove me later
+if [ -d /tmp/bgs_vagrant ]; then
+ cd /tmp/bgs_vagrant
+ vagrant destroy -f
+ rm -rf /tmp/bgs_vagrant
+fi
+
+###destroy vagrant
+if [ $skip_vagrant -eq 0 ]; then
+ if [ -d $vm_dir ]; then
+ ##all vm directories
+ for vm in $( ls $vm_dir ); do
+ cd $vm_dir/$vm
+ if vagrant destroy -f; then
+ echo "${blue}Successfully destroyed $vm Vagrant VM ${reset}"
+ else
+ echo "${red}Unable to destroy $vm Vagrant VM! Attempting to killall vagrant if process is hung ${reset}"
+ killall vagrant
+ echo "${blue}Checking if vagrant was already destroyed and no process is active...${reset}"
+ if ps axf | grep vagrant; then
+ echo "${red}Vagrant process still exists after kill...exiting ${reset}"
+ exit 1
+ else
+ echo "${blue}Vagrant process doesn't exist. Moving on... ${reset}"
+ fi
+ fi
+
+ ##Vagrant boxes appear as VboxHeadless processes
+ ##try to gracefully destroy the VBox VM if it still exists
+ if vboxmanage list runningvms | grep $vm; then
+ echo "${red} $vm VBoxHeadless process still exists...Removing${reset}"
+ vbox_id=$(vboxmanage list runningvms | grep $vm | awk '{print $1}' | sed 's/"//g')
+ vboxmanage controlvm $vbox_id poweroff
+ if vboxmanage unregistervm --delete $vbox_id; then
+ echo "${blue}$vm VM is successfully deleted! ${reset}"
+ else
+ echo "${red} Unable to delete VM $vm ...Exiting ${reset}"
+ exit 1
+ fi
+ else
+ echo "${blue}$vm VM is successfully deleted! ${reset}"
+ fi
+ done
+ else
+ echo "${blue}${vm_dir} doesn't exist, no VMs in OPNFV directory to destroy! ${reset}"
+ fi
+
+ echo "${blue}Checking for any remaining virtual box processes...${reset}"
+ ###kill virtualbox
+ if ps axf | grep virtualbox; then
+ echo "${blue}virtualbox processes are still running. Killing any remaining VirtualBox processes...${reset}"
+ killall virtualbox
+ fi
+
+ ###kill any leftover VMs (brute force)
+ if ps axf | grep VBoxHeadless; then
+ echo "${blue}VBoxHeadless processes are still running. Killing any remaining VBoxHeadless processes...${reset}"
+ killall VBoxHeadless
+ fi
+
+ ###remove virtualbox
+ echo "${blue}Removing VirtualBox... ${reset}"
+ yum -y remove $vboxpkg
+
+else
+ echo "${blue}Skipping Vagrant destroy + VBox Removal as VirtualBox package is already removed ${reset}"
+fi
+
+###remove working vm directory
+echo "${blue}Removing working VM directory: $vm_dir ${reset}"
+rm -rf $vm_dir
+
+###check to see if libvirt is installed
+echo "${blue}Checking if libvirt/KVM is installed"
+if rpm -qa | grep -iE 'libvirt|kvm'; then
+ echo "${blue}Libvirt/KVM is installed${reset}"
+ echo "${blue}Checking for any QEMU/KVM VMs...${reset}"
+ vm_count=0
+ while read -r line; do ((vm_count++)); done < <(virsh list --all | sed 1,2d | head -n -1)
+ if [ $vm_count -gt 0 ]; then
+ echo "${blue}VMs Found: $vm_count${reset}"
+ vm_runnning=0
+ while read -r line; do ((vm_running++)); done < <(virsh list --all | sed 1,2d | head -n -1| grep -i running)
+ echo "${blue}Powering off $vm_running VM(s)${reset}"
+ while read -r vm; do
+ if ! virsh destroy $vm; then
+ echo "${red}WARNING: Unable to power off VM ${vm}${reset}"
+ else
+ echo "${blue}VM $vm powered off!${reset}"
+ fi
+ done < <(virsh list --all | sed 1,2d | head -n -1| grep -i running | sed 's/^[ \t]*//' | awk '{print $2}')
+ echo "${blue}Destroying libvirt VMs...${reset}"
+ while read -r vm; do
+ if ! virsh undefine --remove-all-storage $vm; then
+ echo "${red}ERROR: Unable to remove the VM ${vm}${reset}"
+ exit 1
+ else
+ echo "${blue}VM $vm removed!${reset}"
+ fi
+ done < <(virsh list --all | sed 1,2d | head -n -1| awk '{print $2}')
+ else
+ echo "${blue}No VMs found for removal"
+ fi
+ echo "${blue}Removing libvirt and kvm packages"
+ yum -y remove libvirt-*
+ yum -y remove *qemu*
+else
+ echo "${blue}libvirt/KVM is not installed${reset}"
+fi
+
+###remove possible VMs (needed for 'rmmod kvm_intel')
+if [ -n "$(ps -ef | grep qemu-kvm | grep -v grep)" ]; then
+ echo "${blue}Removing existing VMs ${reset}"
+ killall -9 qemu-kvm
+fi
+
+###remove kernel modules
+echo "${blue}Removing kernel modules ${reset}"
+for kernel_mod in vboxnetadp vboxnetflt vboxpci vboxdrv kvm_intel kvm; do
+ if ! rmmod $kernel_mod; then
+ if rmmod $kernel_mod 2>&1 | grep -i 'not currently loaded'; then
+ echo "${blue} $kernel_mod is not currently loaded! ${reset}"
+ else
+ echo "${red}Error trying to remove Kernel Module: $kernel_mod ${reset}"
+ exit 1
+ fi
+ else
+ echo "${blue}Removed Kernel Module: $kernel_mod ${reset}"
+ fi
+done
+
+###remove PXE bridge
+echo "${blue}Checking whether PXE bridge ${pxe_bridge} exists${reset}"
+if ! brctl show ${pxe_bridge} 2>&1 | grep -i 'No such device'; then
+ echo "${blue}PXE bridge ${pxe_bridge} detected! Removing...${reset}"
+ link_state=$(ip link show ${pxe_bridge} | grep -oP 'state \K[^ ]+')
+ if [[ ${link_state} != 'DOWN' ]]; then
+ ip link set dev ${pxe_bridge} down
+ sleep 5
+ link_state=$(ip link show ${pxe_bridge} | grep -oP 'state \K[^ ]+')
+ if [[ ${link_state} != 'DOWN' ]]; then
+ echo "${red}Could not bring DOWN bridge ${pxe_bridge} link state is ${link_state}${reset}"
+ exit 1
+ fi
+ fi
+ brctl delbr ${pxe_bridge}
+ if ifconfig | grep ${pxe_bridge} || brctl show | grep ${pxe_bridge}; then
+ echo "${red}Error trying to remove ${pxe_bridge}${reset}"
+ exit 1
+ else
+ echo "${blue}PXE bridge ${pxe_bridge} removed${reset}"
+ fi
+else
+ echo "${blue}PXE bridge ${pxe_bridge} does not exist${reset}"
+fi
+
+###remove PXE interface (VLAN 0)
+echo "${blue}Checking whether PXE interface (VLAN 0) exists and remove it${reset}"
+remove_interface_with_name_pattern "enp.+s.+\.0"
+
+###remove Openstack Management interface (VLAN 300)
+echo "${blue}Checking whether Openstack Management interface (VLAN 300) exists and remove it${reset}"
+remove_interface_with_name_pattern "enp.+s.+\.${management_vid}"
+
+###bounce interfaces to restore default IP config
+echo "${blue}Bouncing interfaces to restore IP config${reset}"
+for interface in $first_interface $second_interface; do
+ echo "${blue}Bouncing interface: ${interface}${reset}"
+ ifdown $interface
+ sleep 5
+ ifup $interface
+ tries=5
+ counter=0
+ while [ $counter -lt $tries ]; do
+ if ip addr show $interface | grep -Eo "inet [0-9]+\.[0-9]+\.[0-9]+\.[0-9]+"; then
+ temp_ip=$(ip addr show $interface | grep -Eo "inet [0-9]+\.[0-9]+\.[0-9]+\.[0-9]+" | awk '{print $2}')
+ echo "${blue}IP found on ${interface}. IP is ${temp_ip}${reset}"
+ break
+ else
+ ((counter++))
+ sleep 2
+ fi
+ done
+
+ if [ "$counter" -ge 5 ]; then
+ echo "${red}Error: Unable to get IP address on ${interface}${reset}"
+ exit 1
+ fi
+done
diff --git a/common/ci/setup.sh b/common/ci/setup.sh
new file mode 100755
index 000000000..bb54147b3
--- /dev/null
+++ b/common/ci/setup.sh
@@ -0,0 +1,270 @@
+#!/usr/bin/env bash
+
+#Script that install prerequisites
+#author: Szilard Cserey (szilard.cserey@ericsson.com)
+#
+#Installs qemu-kvm, libvirt and prepares networking for Fuel VM
+
+##VARS
+reset=`tput sgr0`
+blue=`tput setaf 4`
+red=`tput setaf 1`
+green=`tput setaf 2`
+private_interface='enp6s0'
+public_interface='enp8s0'
+pxe_bridge='pxebr'
+fuel_gw_ip='10.20.0.1/16'
+management_vid=300
+management_interface="${private_interface}.${management_vid}"
+##END VARS
+
+##FUNCTIONS
+###check whether qemu-kvm is installed, otherwise install it
+install_qemu_kvm() {
+ echo "${blue}Checking whether qemu-kvm is installed, otherwise install it${reset}"
+ if ! rpm -qa | grep -iE 'qemu-kvm'; then
+ echo "${blue}qemu-kvm is not installed, installing...${reset}"
+ yum -y install qemu-kvm
+ else
+ echo "${green}OK!${reset}"
+ fi
+}
+
+###check whether libvirt is installed, otherwise install it
+install_libvirt() {
+ echo "${blue}Checking whether libvirt is installed, otherwise install it${reset}"
+ if ! rpm -qa | grep -iE 'libvirt'; then
+ echo "${blue}libvirt is not installed, installing...${reset}"
+ yum -y install libvirt
+ else
+ echo "${green}OK!${reset}"
+ fi
+}
+
+###check whether kvm kernel module is loaded, otherwise load it
+load_kvm_kernel_mod() {
+ echo "${blue}Checking whether kvm kernel module is loaded, otherwise load it${reset}"
+ if ! lsmod | grep -iE 'kvm'; then
+ if [[ `lscpu | grep 'Vendor ID' | awk 'BEGIN { FS = ":" } ; {print $2}' | tr -d ' '` == 'GenuineIntel' ]]; then
+ echo "${blue}Intel processor identified, loading kernel module kvm-intel${reset}"
+ kernel_mod='kvm-intel'
+ modprobe ${kernel_mod}
+ fi
+ if [[ `lscpu | grep 'Vendor ID' | awk 'BEGIN { FS = ":" } ; {print $2}' | tr -d ' '` == 'AuthenticAMD' ]]; then
+ echo "${blue}AMD processor identified, loading kernel module kvm-amd${reset}"
+ kernel_mod='kvm-amd'
+ modprobe ${kernel_mod}
+ fi
+ if ! lsmod | grep -iE 'kvm'; then
+ echo "${red}Failed to load kernel module ${kernel_mod}!${reset}"
+ exit 1
+ fi
+ else
+ echo "${green}OK!${reset}"
+ fi
+}
+
+###check whether libvirtd service is running otherwise start it
+start_libvirtd_service() {
+ echo "${blue}Checking whether libvirtd service is running otherwise start it${reset}"
+ if ! sudo systemctl status libvirtd | grep -iE 'active \(running\)'; then
+ echo "${blue}starting libvirtd service${reset}"
+ systemctl start libvirtd
+ if ! sudo systemctl status libvirtd | grep -iE 'active \(running\)'; then
+ echo "${red}Failed to start libvirtd service!${reset}"
+ exit 1
+ fi
+ else
+ echo "${green}OK!${reset}"
+ fi
+}
+
+
+#Check whether interface exists
+check_interface_exists() {
+ if [ -z $1 ]; then
+ echo "${red}Cannot check whether interface exists! No interface specified!${reset}"
+ exit 1
+ fi
+ local interface=$1
+ #Check whether interface exists
+ echo "${blue}Checking whether interface ${interface} exists${reset}"
+ if ! ip link show ${interface}; then
+ echo "${red}Interface ${interface} does not exists!${reset}"
+ exit 1
+ else
+ echo "${green}OK!${reset}"
+ fi
+}
+
+#Check whether interface is UP
+check_interface_up() {
+ if [ -z $1 ]; then
+ echo "${red}Cannot check whether interface is UP! No interface specified!${reset}"
+ exit 1
+ fi
+ local interface=$1
+
+ #Check whether interface is UP
+ echo "${blue}Checking whether interface ${interface} is UP${reset}"
+ link_state=$(ip link show ${interface} | grep -oP 'state \K[^ ]+')
+ if [[ ${link_state} != 'UP' ]]; then
+ echo "${blue}${interface} state is ${link_state}. Bringing it UP!${reset}"
+ ip link set dev ${interface} up
+ sleep 5
+ link_state=$(ip link show ${interface} | grep -oP 'state \K[^ ]+')
+ if [[ ${link_state} == 'DOWN' ]]; then
+ echo "${red}Could not bring UP interface ${interface} link state is ${link_state}${reset}"
+ exit 1
+ fi
+ else
+ echo "${green}OK!${reset}"
+ fi
+}
+
+#Create VLAN interface
+create_vlan_interface() {
+ if [ -z $1 ]; then
+ echo "${red}Cannot create VLAN interface. No base interface specified!${reset}"
+ exit 1
+ fi
+ if [ -z $2 ]; then
+ echo "${red}Cannot create VLAN interface. No VLAN ID specified!${reset}"
+ exit 1
+ fi
+
+ local base_interface=$1
+ local vid=$2
+ local interface="${base_interface}.${vid}"
+
+ echo "${blue}Checking whether VLAN ${vid} interface ${interface} exists, otherwise create it${reset}"
+ if ! ip link show ${interface}; then
+ echo "${blue}Creating VLAN ${vid} interface ${interface}${reset}"
+ ip link add link ${base_interface} name ${interface} type vlan id ${vid}
+ else
+ echo "${green}OK!${reset}"
+ fi
+
+ #Check whether VLAN interface is UP
+ check_interface_up ${interface}
+}
+
+###setup PXE Bridge
+setup_pxe_bridge() {
+ pxe_vid=0
+ pxe_interface="${private_interface}.${pxe_vid}"
+ #Check whether VLAN 0 (PXE) interface exists, otherwise create it
+ create_vlan_interface ${private_interface} ${pxe_vid}
+
+ #Check whether PXE bridge exists
+ echo "${blue}Checking whether PXE bridge ${pxe_bridge} exists${reset}"
+ if brctl show ${pxe_bridge} 2>&1 | grep 'No such device'; then
+ echo "${blue}Creating PXE bridge ${pxe_bridge}${reset}"
+ brctl addbr ${pxe_bridge}
+ else
+ echo "${green}OK!${reset}"
+ fi
+
+ #Add VLAN 0 (PXE) interface to PXE bridge
+ echo "${blue}Checking whether VLAN 0 (PXE) interface ${pxe_interface} is added to PXE bridge ${pxe_bridge} exists${reset}"
+ if ! brctl show ${pxe_bridge} 2>&1 | grep ${pxe_interface}; then
+ echo "${blue}Adding VLAN 0 (PXE) interface ${pxe_interface} to PXE bridge ${pxe_bridge}${reset}"
+ brctl addif ${pxe_bridge} ${pxe_interface}
+ if ! brctl show ${pxe_bridge} 2>&1 | grep ${pxe_interface}; then
+ echo "${red}Could not add VLAN 0 (PXE) interface ${pxe_interface} to PXE bridge ${pxe_bridge}${reset}"
+ exit 1
+ fi
+ else
+ echo "${green}OK!${reset}"
+ fi
+
+ #Check whether PXE bridge is UP
+ check_interface_up ${pxe_bridge}
+
+ #Add Fuel Gateway IP Address to PXE bridge
+ echo "${blue}Checking whether Fuel Gateway IP Address ${fuel_gw_ip} is assigned to PXE bridge ${pxe_bridge}${reset}"
+ if ! ip addr show ${pxe_bridge} | grep ${fuel_gw_ip}; then
+ echo "${blue}Adding Fuel Gateway IP Address ${fuel_gw_ip} to PXE bridge ${pxe_bridge}${reset}"
+ sudo ip addr add ${fuel_gw_ip} dev ${pxe_bridge}
+ if ! ip addr show ${pxe_bridge} | grep ${fuel_gw_ip}; then
+ echo "${red}Could not add Fuel Gateway IP Address ${fuel_gw_ip} to PXE bridge ${pxe_bridge}${reset}"
+ exit 1
+ fi
+ else
+ echo "${green}OK!${reset}"
+ fi
+}
+
+###check whether access to public network is granted
+check_access_enabled_to_public_network() {
+ #Check whether IP forwarding is enabled
+ echo "${blue}Checking whether IP Forwarding is enabled ${reset}"
+ if ! sysctl net.ipv4.ip_forward | grep "net.ipv4.ip_forward = 1"; then
+ sysctl -w net.ipv4.ip_forward=1
+ if ! sysctl net.ipv4.ip_forward | grep "net.ipv4.ip_forward = 1"; then
+ echo "${red}IP Forwarding could not be enabled!${reset}"
+ exit 1
+ fi
+ else
+ echo "${green}OK!${reset}"
+ fi
+
+ echo "${blue}Checking whether access is granted to public network through interface ${public_interface}${reset}"
+ if ! sudo iptables -t nat -L POSTROUTING -v | grep "MASQUERADE.*${public_interface}.*anywhere.*anywhere"; then
+ echo "${blue}Enable access to public network through interface ${public_interface}${reset}"
+ iptables -t nat -A POSTROUTING -o ${public_interface} -j MASQUERADE
+ else
+ echo "${green}OK!${reset}"
+ fi
+}
+
+###setup Openstack Management Interface
+create_openstack_management_interface() {
+ #Check whether Openstack Management interface exists, otherwise create it
+ create_vlan_interface ${private_interface} ${management_vid}
+
+ echo "${blue}Moving IP addresses from interface ${private_interface} to VLAN ${management_vid} interface ${management_interface}${reset}"
+ private_interface_ip_addr_list=$(ip addr show ${private_interface} | grep -oP 'inet \K[^ ]+')
+ if [[ ! -z ${private_interface_ip_addr_list} ]]; then
+ echo -e "${blue}Found IP addresses on interface ${private_interface}:\n${private_interface_ip_addr_list}${reset}"
+ for private_interface_ip_addr in ${private_interface_ip_addr_list}
+ do
+ echo "${blue}Removing IP address ${private_interface_ip_addr} from interface ${private_interface}${reset}"
+ ip addr del ${private_interface_ip_addr} dev ${private_interface}
+ if ip addr show ${private_interface} | grep ${private_interface_ip_addr}; then
+ echo "${red}Could not remove IP address ${private_interface_ip_addr} from interface ${private_interface}${reset}"
+ exit 1
+ fi
+ if ! ip addr show ${management_interface} | grep ${private_interface_ip_addr}; then
+ echo "${blue}Adding IP address ${private_interface_ip_addr} to VLAN ${management_vid} interface ${management_interface}${reset}"
+ ip addr add ${private_interface_ip_addr} dev ${management_interface}
+ if ! ip addr show ${management_interface} | grep ${private_interface_ip_addr}; then
+ echo "${red}Could not set IP address ${private_interface_ip_addr} to VLAN ${management_vid} interface ${management_interface}${reset}"
+ exit 1
+ fi
+ else
+ echo "${blue}VLAN ${management_vid} interface ${management_interface} already has assigned to itself this IP address ${private_interface_ip_addr}${reset}"
+ fi
+ done
+ else
+ echo "${red}No IP Address is assigned to interface ${private_interface}, there isn't any IP address to move to interface ${management_interface}${reset}"
+ fi
+}
+
+##END FUNCTIONS
+
+main() {
+ install_qemu_kvm
+ install_libvirt
+ load_kvm_kernel_mod
+ start_libvirtd_service
+ check_interface_exists ${private_interface}
+ check_interface_up ${private_interface}
+ check_interface_exists ${public_interface}
+ check_interface_up ${public_interface}
+ setup_pxe_bridge
+ check_access_enabled_to_public_network
+ create_openstack_management_interface
+}
+
+main "$@"
diff --git a/common/docs/user-guide.rst b/common/docs/user-guide.rst
index 8e0222493..08b27675f 100644
--- a/common/docs/user-guide.rst
+++ b/common/docs/user-guide.rst
@@ -21,14 +21,13 @@ Version history
| **Date** | **Ver.** | **Author** | **Comment** |
| | | | |
+--------------------+--------------------+--------------------+--------------------+
-| 2015-05-28 | 0.0.1 | Christopher Price | Initial version |
+| 2015-06-04 | 1.0.0 | Christopher Price | Initial revision |
| | | (Ericsson AB) | |
+--------------------+--------------------+--------------------+--------------------+
-| 2015-06-02 | 0.0.2 | Christopher Price | Minor Updates |
-| | | (Ericsson AB) | |
+| 2015-06-05 | 1.0.1 | Christopher Price | Corrected links & |
+| | | (Ericsson AB) | e-mail address |
+--------------------+--------------------+--------------------+--------------------+
-
.. contents:: Table of Contents
:backlinks: none
@@ -60,16 +59,16 @@ Hardware Requirements
The Arno release of OPNFV is intended to be run as a baremetal deployment on a "Pharos compliant" lab infrastructure. The Pharos project in OPNFV is a community activity to provide guidance and establish requirements on hardware platforms supporting the Arno virtualisation platform.
-Prior to deploying the OPNFV platform it is important that the hardware infrastructure be configured according to the Pharos specification: http://artifacts.opnfv.org/pharos/docs/spec.html
+Prior to deploying the OPNFV platform it is important that the hardware infrastructure be configured according to the Pharos specification: https://www.opnfv.org/sites/opnfv/files/release/pharos-spec.arno.2015.1.0.pdf
Arno Platform Deployment
------------------------
The Arno platform supports installation and deployment using two deployment tools; a Foreman based deployment toolchain and a Fuel based deployment toolchain.
-In order to deploy the Arno release on a Pharos compliant lab using the Foreman deployment toolchain you should follow in the Foreman installation guide: http://artifacts.opnfv.org/genesis/foreman/docs/installation-instructions.html
+In order to deploy the Arno release on a Pharos compliant lab using the Foreman deployment toolchain you should follow in the Foreman installation guide: https://www.opnfv.org/sites/opnfv/files/release/foreman_install-guide.arno.2015.1.0.pdf
-In order to deploy the Arno release on a Pharos compliant lab using the Fuel deployment toolchain you should follow in the Fuel installation guide: http://artifacts.opnfv.org/genesis/fuel/docs/installation-instructions.html
+In order to deploy the Arno release on a Pharos compliant lab using the Fuel deployment toolchain you should follow in the Fuel installation guide: https://www.opnfv.org/sites/opnfv/files/release/install-guide.arno.2015.1.0.pdf
Enabling or disabling OpenDaylight and the native Neutron driver
----------------------------------------------------------------
@@ -79,7 +78,7 @@ You may find that you wish to adjust the system by enabling or disabling the nat
Deployment Validation
---------------------
-Once installed you should validate the deployment completed successfully by executing the automated basic platform validation routines outlined in the Arno testing documentation: http://artifacts.opnfv.org/functest/docs/functest.html
+Once installed you should validate the deployment completed successfully by executing the automated basic platform validation routines outlined in the Arno testing documentation: https://www.opnfv.org/sites/opnfv/files/release/functest.arno.2015.1.0.pdf
Operating the Arno platform
===========================
@@ -117,7 +116,7 @@ You can engage with the community to help us improve and further develop the OPN
- To access Jira for issue reporting or improvement proposals head to: https://jira.opnfv.org/
- To get started helping out developing the platform head to: https://wiki.opnfv.org/developer
-Alternatively if you are intending to invest your time as a user of the platform you can ask questions and request help from our mailing list at: mailto://support@opnfv.org
+Alternatively if you are intending to invest your time as a user of the platform you can ask questions and request help from our mailing list at: mailto://opnfv-users@lists.opnfv.org
License
=======
@@ -149,7 +148,7 @@ Fuel
`Fuel User Guide <http://docs.fuel-infra.org/openstack/fuel/fuel-6.0/user-guide.html>`_
:Authors: Christopher Price (christopher.price@ericsson.com)
-:Version: 0.0.2
+:Version: 1.0.1
**Documentation tracking**
diff --git a/common/puppet-opnfv/manifests/compute.pp b/common/puppet-opnfv/manifests/compute.pp
index 0b8175762..2fed2419f 100644
--- a/common/puppet-opnfv/manifests/compute.pp
+++ b/common/puppet-opnfv/manifests/compute.pp
@@ -51,11 +51,11 @@ class opnfv::compute {
if !$ceilometer_metering_secret { $ceilometer_metering_secret = $single_password }
##HA Global params
- if $ha_flag {
+ if $ha_flag and str2bool($ha_flag) {
if $private_network == '' { fail('private_network is empty') }
if !$keystone_private_vip { fail('keystone_private_vip is empty') }
if !$glance_private_vip { fail('glance_private_vip is empty') }
- if !$nova_private_vip { fail('nova_private_vip is empty') }
+ if !$nova_public_vip { fail('nova_public_vip is empty') }
if !$nova_db_password { $nova_db_password = $single_password }
if !$nova_user_password { $nova_user_password = $single_password }
if !$controllers_ip_array { fail('controllers_ip_array is empty') }
@@ -78,19 +78,30 @@ class opnfv::compute {
} else {
##non HA params
- if $ovs_tunnel_if == '' { fail('ovs_tunnel_if is empty') }
- if !$private_ip { fail('private_ip is empty') }
- $keystone_private_vip = $private_ip
- $glance_private_vip = $private_ip
- $nova_private_vip = $private_ip
- $neutron_private_vip = $private_ip
- if !$nova_db_password { fail('nova_db_password is empty') }
- if !$nova_user_password { fail('nova_user_password is empty') }
- if !$odl_control_ip { $odl_control_ip = $private_ip }
- if !$mysql_ip { $mysql_ip = $private_ip }
- if !$amqp_ip { $amqp_ip = $private_ip }
- if !$amqp_username { $amqp_username = 'guest' }
- if !$amqp_password { $amqp_password = 'guest' }
+ ##Mandatory
+ if $private_network == '' { fail('private_network is empty') }
+ if ($odl_flag != '') and str2bool($odl_flag) {
+ if $odl_control_ip == '' { fail('odl_control_ip is empty') }
+ }
+ if $controller_ip == '' { fail('controller_ip is empty') }
+
+ ##Optional
+ ##Find private interface
+ $ovs_tunnel_if = get_nic_from_network("$private_network")
+ ##Find private ip
+ $private_ip = get_ip_from_nic("$ovs_tunnel_if")
+
+ $keystone_private_vip = $controller_ip
+ $glance_private_vip = $controller_ip
+ $nova_public_vip = $controller_ip
+ $neutron_private_vip = $controller_ip
+
+ if !$nova_db_password { $nova_db_password = $single_password }
+ if !$nova_user_password { $nova_user_password = $single_password }
+ if !$mysql_ip { $mysql_ip = $controller_ip }
+ if !$amqp_ip { $amqp_ip = $controller_ip }
+ if !$amqp_username { $amqp_username = $single_username }
+ if !$amqp_password { $amqp_password = $single_password }
if !$ceph_mon_host { $ceph_mon_host= ["$private_ip"] }
if !$ceph_mon_initial_members { $ceph_mon_initial_members = ["$::hostname"] }
}
@@ -103,7 +114,7 @@ class opnfv::compute {
libvirt_inject_password => 'false',
libvirt_inject_key => 'false',
libvirt_images_type => 'rbd',
- nova_host => $nova_private_vip,
+ nova_host => $nova_public_vip,
nova_db_password => $nova_db_password,
nova_user_password => $nova_user_password,
private_network => '',
diff --git a/common/puppet-opnfv/manifests/controller_networker.pp b/common/puppet-opnfv/manifests/controller_networker.pp
index 157bc8f24..60cae3494 100644
--- a/common/puppet-opnfv/manifests/controller_networker.pp
+++ b/common/puppet-opnfv/manifests/controller_networker.pp
@@ -302,6 +302,7 @@ class opnfv::controller_networker {
class { "quickstack::pacemaker::neutron":
agent_type => $this_agent,
enable_tunneling => 'true',
+ external_network_bridge => 'br-ex',
ml2_mechanism_drivers => $ml2_mech_drivers,
ml2_network_vlan_ranges => ["physnet1:10:50"],
odl_controller_ip => $odl_control_ip,
@@ -309,6 +310,18 @@ class opnfv::controller_networker {
ovs_tunnel_iface => $ovs_tunnel_if,
ovs_tunnel_types => ["vxlan"],
verbose => 'true',
+ neutron_conf_additional_params => { default_quota => 'default',
+ quota_network => '50',
+ quota_subnet => '50',
+ quota_port => 'default',
+ quota_security_group => '50',
+ quota_security_group_rule => 'default',
+ quota_vip => 'default',
+ quota_pool => 'default',
+ quota_router => '50',
+ quota_floatingip => '100',
+ network_auto_schedule => 'default',
+ },
}
if ($external_network_flag != '') and str2bool($external_network_flag) {
@@ -316,50 +329,47 @@ class opnfv::controller_networker {
}
} else {
- if $ovs_tunnel_if == '' { fail('ovs_tunnel_if is empty') }
- if $public_ip == '' { fail('public_ip is empty') }
- if $private_ip == '' { fail('private_ip is empty') }
-
- if $odl_control_ip == '' { $odl_control_ip = $private_ip }
-
- if $mysql_ip == '' { fail('mysql_ip is empty') }
- if $mysql_root_password == '' { fail('mysql_root_password is empty') }
- if $amqp_ip == '' { fail('amqp_ip is empty') }
-
- if $memcache_ip == '' { fail('memcache_ip is empty') }
- if $neutron_ip == '' { fail('neutron_ip is empty') }
-
- if $keystone_db_password == '' { fail('keystone_db_password is empty') }
-
- if $horizon_secret_key == '' { fail('horizon_secret_key is empty') }
-
- if $nova_user_password == '' { fail('nova_user_password is empty') }
- if $nova_db_password == '' { fail('nova_db_password is empty') }
-
- if $cinder_user_password == '' { fail('cinder_user_password is empty') }
- if $cinder_db_password == '' { fail('cinder_db_password is empty') }
-
- if $glance_user_password == '' { fail('glance_user_password is empty') }
- if $glance_db_password == '' { fail('glance_db_password is empty') }
-
- if $neutron_user_password == '' { fail('neutron_user_password is empty') }
- if $neutron_db_password == '' { fail('neutron_db_password is empty') }
- if $neutron_metadata_shared_secret == '' { fail('neutron_metadata_shared_secret is empty') }
-
- if $ceilometer_user_password == '' { fail('ceilometer_user_password is empty') }
- if $ceilometer_metering_secret == '' { fail('ceilometer_user_password is empty') }
-
- if $heat_user_password == '' { fail('heat_user_password is empty') }
- if $heat_db_password == '' { fail('heat_db_password is empty') }
- if $heat_auth_encrypt_key == '' { fail('heat_auth_encrypt_key is empty') }
-
- if $swift_user_password == '' { fail('swift_user_password is empty') }
- if $swift_shared_secret == '' { fail('swift_shared_secret is empty') }
- if $swift_admin_password == '' { fail('swift_admin_password is empty') }
+ ##Mandatory Non-HA parameters
+ if $private_network == '' { fail('private_network is empty') }
+ if $public_network == '' { fail('public_network is empty') }
+ ##Optional Non-HA parameters
if !$amqp_username { $amqp_username = $single_username }
if !$amqp_password { $amqp_password = $single_password }
+ if !$mysql_root_password { $mysql_root_password = $single_password }
+ if !$keystone_db_password { $keystone_db_password = $single_password }
+ if !$horizon_secret_key { $horizon_secret_key = $single_password }
+ if !$nova_db_password { $nova_db_password = $single_password }
+ if !$nova_user_password { $nova_user_password = $single_password }
+ if !$cinder_db_password { $cinder_db_password = $single_password }
+ if !$cinder_user_password { $cinder_user_password = $single_password }
+ if !$glance_db_password { $glance_db_password = $single_password }
+ if !$glance_user_password { $glance_user_password = $single_password }
+ if !$neutron_db_password { $neutron_db_password = $single_password }
+ if !$neutron_user_password { $neutron_user_password = $single_password }
+ if !$neutron_metadata_shared_secret { $neutron_metadata_shared_secret = $single_password }
+ if !$ceilometer_user_password { $ceilometer_user_password = $single_password }
+ if !$ceilometer_metering_secret { $ceilometer_metering_secret = $single_password }
+ if !$heat_user_password { $heat_user_password = $single_password }
+ if !$heat_db_password { $heat_db_password = $single_password }
+ if !$heat_auth_encryption_key { $heat_auth_encryption_key = 'octopus1octopus1' }
+ if !$swift_user_password { $swift_user_password = $single_password }
+ if !$swift_shared_secret { $swift_shared_secret = $single_password }
+ if !$swift_admin_password { $swift_admin_password = $single_password }
+ ##Find private interface
+ $ovs_tunnel_if = get_nic_from_network("$private_network")
+ ##Find private ip
+ $private_ip = get_ip_from_nic("$ovs_tunnel_if")
+ #Find public NIC
+ $public_nic = get_nic_from_network("$public_network")
+ $public_ip = get_ip_from_nic("$public_nic")
+
+ if !$mysql_ip { $mysql_ip = $private_ip }
+ if !$amqp_ip { $amqp_ip = $private_ip }
+ if !$memcache_ip { $memcache_ip = $private_ip }
+ if !$neutron_ip { $neutron_ip = $private_ip }
+ if !$odl_control_ip { $odl_control_ip = $private_ip }
class { "quickstack::neutron::controller_networker":
admin_email => $admin_email,
@@ -414,6 +424,8 @@ class opnfv::controller_networker {
horizon_cert => $quickstack::params::horizon_cert,
horizon_key => $quickstack::params::horizon_key,
+ keystonerc => true,
+
ml2_mechanism_drivers => $ml2_mech_drivers,
#neutron => true,
diff --git a/common/puppet-opnfv/manifests/external_net_presetup.pp b/common/puppet-opnfv/manifests/external_net_presetup.pp
index b7c7c5f07..f52b90389 100644
--- a/common/puppet-opnfv/manifests/external_net_presetup.pp
+++ b/common/puppet-opnfv/manifests/external_net_presetup.pp
@@ -24,13 +24,24 @@ class opnfv::external_net_presetup {
$controllers_hostnames_array_str = $controllers_hostnames_array
$controllers_hostnames_array = split($controllers_hostnames_array, ',')
+ if ($admin_network != '') and ($admin_network != 'false') {
+ $admin_nic = get_nic_from_network("$admin_network")
+ if $admin_nic == '' { fail('admin_nic was not found') }
+ #Disable defalute route on Admin network
+ file_line { 'disable-defroute-admin':
+ path => "/etc/sysconfig/network-scripts/ifcfg-$admin_nic",
+ line => 'DEFROUTE=no',
+ match => '^DEFROUTE',
+ }
+ }
+
#find public NIC
$public_nic = get_nic_from_network("$public_network")
$public_nic_ip = get_ip_from_nic("$public_nic")
$public_nic_netmask = get_netmask_from_nic("$public_nic")
if ($public_nic == '') or ($public_nic_ip == '') or ($public_nic == "br-ex") or ($public_nic == "br_ex") {
- notify {"Skipping augeas, public_nic ${public_nic}, public_nic_ip ${public_nic_ip}":}
+ notify {"Skipping augeas, public_nic ${public_nic}, public_nic_ip ${public_nic_ip}":}
exec {'ovs-vsctl -t 10 -- --may-exist add-br br-ex':
path => ["/usr/sbin/", "/usr/bin/"],
@@ -85,7 +96,7 @@ class opnfv::external_net_presetup {
owner => 'root',
group => 'root',
mode => '0644',
- content => template('trystack/br_ex.erb'),
+ content => template('opnfv/br_ex.erb'),
before => Class["quickstack::pacemaker::params"],
}
->
diff --git a/common/puppet-opnfv/manifests/external_net_setup.pp b/common/puppet-opnfv/manifests/external_net_setup.pp
index af00f203e..fc014d424 100644
--- a/common/puppet-opnfv/manifests/external_net_setup.pp
+++ b/common/puppet-opnfv/manifests/external_net_setup.pp
@@ -60,7 +60,7 @@ class opnfv::external_net_setup {
provider_network_type => flat,
provider_physical_network => 'physnet1',
router_external => true,
- tenant_name => 'admin',
+ tenant_name => 'services',
}
->
neutron_subnet { 'provider_subnet':
@@ -70,8 +70,9 @@ class opnfv::external_net_setup {
gateway_ip => $public_gateway,
allocation_pools => [ "start=${public_allocation_start},end=${public_allocation_end}" ],
dns_nameservers => $public_dns,
+ enable_dhcp => false,
network_name => 'provider_network',
- tenant_name => 'admin',
+ tenant_name => 'services',
}
->
neutron_router { 'provider_router':
diff --git a/common/puppet-opnfv/manifests/init.pp b/common/puppet-opnfv/manifests/init.pp
index 7b68df57a..d26bd7a29 100644
--- a/common/puppet-opnfv/manifests/init.pp
+++ b/common/puppet-opnfv/manifests/init.pp
@@ -18,7 +18,6 @@ class opnfv {
include opnfv::resolver
include opnfv::ntp
include opnfv::add_packages
- include opnfv::odl_docker
include opnfv::opncheck
}
diff --git a/common/puppet-opnfv/manifests/odl_docker.pp b/common/puppet-opnfv/manifests/odl_docker.pp
deleted file mode 100644
index 6e70ba077..000000000
--- a/common/puppet-opnfv/manifests/odl_docker.pp
+++ /dev/null
@@ -1,50 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# daniel.smith@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-class opnfv::odl_docker
-{
- case $::fuel_settings['role'] {
- /controller/: {
-
- file { "/opt":
- ensure => "directory",
- }
-
- file { "/opt/opnfv":
- ensure => "directory",
- owner => "root",
- group => "root",
- mode => 777,
- }
-
- file { "/opt/opnfv/odl":
- ensure => "directory",
- }
-
- file { "/opt/opnfv/odl/odl_docker_image.tar":
- ensure => present,
- source => "/etc/puppet/modules/opnfv/odl_docker/odl_docker_image.tar",
- mode => 750,
- }
-
- file { "/opt/opnfv/odl/docker-latest":
- ensure => present,
- source => "/etc/puppet/modules/opnfv/odl_docker/docker-latest",
- mode => 750,
- }
-
- file { "/opt/opnfv/odl/start_odl_conatiner.sh":
- ensure => present,
- source => "/etc/puppet/modules/opnfv/scripts/start_odl_container.sh",
- mode => 750,
- }
- }
- }
-}
-
diff --git a/common/puppet-opnfv/manifests/templates/br_ex.erb b/common/puppet-opnfv/manifests/templates/br_ex.erb
new file mode 100644
index 000000000..6c0e7e7f0
--- /dev/null
+++ b/common/puppet-opnfv/manifests/templates/br_ex.erb
@@ -0,0 +1,10 @@
+DEVICE=br-ex
+DEVICETYPE=ovs
+IPADDR=<%= @public_nic_ip %>
+NETMASK=<%= @public_nic_netmask %>
+GATEWAY=<%= @public_gateway %>
+BOOTPROTO=static
+ONBOOT=yes
+TYPE=OVSBridge
+PROMISC=yes
+PEERDNS=no
diff --git a/common/puppet-opnfv/manifests/templates/dockerfile/Dockerfile b/common/puppet-opnfv/manifests/templates/dockerfile/Dockerfile
deleted file mode 100644
index 80a92d8c5..000000000
--- a/common/puppet-opnfv/manifests/templates/dockerfile/Dockerfile
+++ /dev/null
@@ -1,82 +0,0 @@
-####################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# daniel.smith@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-#
-# DOCKERFILE TO CREATE ODL IN CONTAINER AND EXPOSE DLUX AND OVSDB TO ODL
-#
-#############################################################################
-
-
-#Set the base image - note: the current release of Karaf is using Jdk7 and alot of 12.04, so we will use it rather than 14.04 and backport a ton of stuff
-FROM ubuntu:12.04
-
-# Maintainer Info
-MAINTAINER Daniel Smith
-
-
-#Run apt-get update one start just to check for updates when building
-RUN echo "Updating APT"
-RUN apt-get update
-RUN echo "Adding wget"
-RUN apt-get install -y wget
-RUN apt-get install -y net-tools
-RUN apt-get install -y openjdk-7-jre
-RUN apt-get install -y openjdk-7-jdk
-RUN apt-get install -y openssh-server
-RUN apt-get install -y vim
-RUN apt-get install -y expect
-RUN apt-get install -y daemontools
-RUN mkdir -p /opt/odl_source
-RUN bash -c 'echo "export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64" >> ~/.bashrc'
-
-
-
-#Now lets got and fetch the ODL distribution
-RUN echo "Fetching ODL"
-RUN wget https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.3-Helium-SR3/distribution-karaf-0.2.3-Helium-SR3.tar.gz -O /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz
-
-RUN echo "Untarring ODL inplace"
-RUN mkdir -p /opt/odl
-RUN tar zxvf /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz -C /opt/odl
-
-RUN echo "Installing DLUX and other features into ODL"
-#COPY dockerfile/container_scripts/start_odl_docker.sh /etc/init.d/start_odl_docker.sh
-COPY container_scripts/start_odl_docker_container.sh /etc/init.d/
-COPY container_scripts/speak.sh /etc/init.d/
-#COPY dockerfile/container_scripts/speak.sh /etc/init.d/speak.sh
-RUN chmod 777 /etc/init.d/start_odl_docker_container.sh
-RUN chmod 777 /etc/init.d/speak.sh
-
-
-
-# Expose the ports
-
-# PORTS FOR BASE SYSTEM AND DLUX
-EXPOSE 8101
-EXPOSE 6633
-EXPOSE 1099
-EXPOSE 43506
-EXPOSE 8181
-EXPOSE 8185
-EXPOSE 9000
-EXPOSE 39378
-EXPOSE 33714
-EXPOSE 44444
-EXPOSE 6653
-
-# PORTS FOR OVSDB AND ODL CONTROL
-EXPOSE 12001
-EXPOSE 6640
-EXPOSE 8080
-EXPOSE 7800
-EXPOSE 55130
-EXPOSE 52150
-EXPOSE 36826
-
-# set the ENTRYPOINT - An entry point allows us to run this container as an exectuable
-CMD ["/etc/init.d/start_odl_docker_container.sh"]
diff --git a/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/check_feature.sh b/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/check_feature.sh
deleted file mode 100644
index 533942eb3..000000000
--- a/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/check_feature.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# daniel.smith@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-#!/usr/bin/expect
-spawn /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/client
-expect "root>"
-send "feature:list | grep -i odl-restconf\r"
-send "\r\r\r"
-expect "root>"
-send "logout\r"
-
-
diff --git a/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/speak.sh b/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/speak.sh
deleted file mode 100644
index 95bbaf4e6..000000000
--- a/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/speak.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/expect
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# daniel.smith@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-#
-# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB
-# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY
-#################################################################################
-
-spawn /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/client
-expect "root>"
-send "feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core"
-send "\r\r\r"
-expect "root>"
-send "logout\r"
diff --git a/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/start_odl_docker_container.sh b/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/start_odl_docker_container.sh
deleted file mode 100644
index 8ae05f7bc..000000000
--- a/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/start_odl_docker_container.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/bash
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# daniel.smith@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-#
-# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB
-# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY
-#################################################################################
-# Start up script for calling karaf / ODL inside a docker container.
-#
-# This script will also call a couple expect scripts to load the feature set that we want
-
-
-#ENV
-export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
-
-#MAIN
-echo "Starting up the da Sheilds..."
-/opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/karaf server &
-echo "Sleeping 5 bad hack"
-sleep 10
-echo "should see stuff listening now"
-netstat -na
-echo " should see proess running for karaf"
-ps -efa
-echo " Starting the packages we want"
-/etc/init.d/speak.sh
-echo "Printout the status - if its right, you should see 8181 appear now"
-netstat -na
-ps -efa
-
-
-
-## This is a loop that keeps our container going currently, prinout the "status of karaf" to the docker logs every minute
-## Cheap - but effective
-while true;
-do
- echo "Checking status of ODL:"
- /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/status
- sleep 60
-done
-
-
diff --git a/compass/ci/build.sh b/compass/ci/build.sh
index 4e5b87ba4..2b7fd9af1 100755
--- a/compass/ci/build.sh
+++ b/compass/ci/build.sh
@@ -84,7 +84,7 @@ EOF
# BEGIN of variables to customize
#
BUILD_BASE=$(readlink -e ../build/)
-RESULT_DIR="${BUILD_BASE}/release"
+export RESULT_DIR="${BUILD_BASE}/release"
BUILD_SPEC="${BUILD_BASE}/config.mk"
CACHE_DIR="cache"
LOCAL_CACHE_ARCH_NAME="compass-cache"
@@ -112,11 +112,11 @@ DEBUG=0
INTEGRATION_TEST=0
FULL_INTEGRATION_TEST=0
INTERACTIVE=0
-BUILD_CACHE_URI=
+export BUILD_CACHE_URI=
BUILD_SPEC=
BUILD_DIR=
BUILD_LOG=
-BUILD_VERSION=
+export BUILD_VERSION=
MAKE_ARGS=
#
# END of script assigned variables
@@ -133,6 +133,8 @@ source ${INCLUDE_DIR}/build.sh.debug
############################################################################
# BEGIN of main
#
+build_prepare
+
while getopts "s:c:v:f:l:r:RtTh" OPTION
do
case $OPTION in
@@ -376,15 +378,15 @@ mkdir -p ${BUILD_DIR}
cp ${BUILD_BASE}/.versions ${BUILD_DIR}
cp ${RESULT_DIR}/*.iso* ${BUILD_DIR}
-if [ $POPULATE_CACHE -eq 1 ]; then
- if [ ! -z ${BUILD_CACHE_URI} ]; then
- echo "Building cache ..."
- tar --dereference -C ${BUILD_BASE} -caf ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${CACHE_DIR}
- echo "Uploading cache ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}"
- ${REMOTE_ACCESS_METHD} -T ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}.tgz
- rm ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz
- fi
-fi
+#if [ $POPULATE_CACHE -eq 1 ]; then
+# if [ ! -z ${BUILD_CACHE_URI} ]; then
+# echo "Building cache ..."
+# tar --dereference -C ${BUILD_BASE} -caf ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${CACHE_DIR}
+# echo "Uploading cache ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}"
+# ${REMOTE_ACCESS_METHD} -T ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}.tgz
+# rm ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz
+# fi
+#fi
echo "Success!!!"
exit 0
#
diff --git a/compass/ci/deploy.sh b/compass/ci/deploy.sh
index fe754aac4..197bf63bf 100755
--- a/compass/ci/deploy.sh
+++ b/compass/ci/deploy.sh
@@ -1,5 +1,9 @@
-SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
-CONF_NAME=$1
-source ${SCRIPT_DIR}/../deploy/prepare.sh || exit $?
-source ${SCRIPT_DIR}/../deploy/setup-env.sh || exit $?
-source ${SCRIPT_DIR}/../deploy/deploy-vm.sh || exit $?
+#set -x
+COMPASS_DIR=`cd ${BASH_SOURCE[0]%/*}/../;pwd`
+export COMPASS_DIR
+
+apt-get install screen
+screen -ls |grep deploy|awk -F. '{print $1}'|xargs kill -9
+screen -wipe
+#screen -dmSL deploy bash $COMPASS_DIR/ci/launch.sh $*
+$COMPASS_DIR/ci/launch.sh $*
diff --git a/compass/ci/launch.sh b/compass/ci/launch.sh
new file mode 100755
index 000000000..316b06f53
--- /dev/null
+++ b/compass/ci/launch.sh
@@ -0,0 +1,65 @@
+#set -x
+WORK_DIR=$COMPASS_DIR/ci/work
+
+if [[ $# -ge 1 ]];then
+ CONF_NAME=$1
+else
+ CONF_NAME=cluster
+fi
+
+source ${COMPASS_DIR}/ci/log.sh
+source ${COMPASS_DIR}/deploy/conf/${CONF_NAME}.conf
+source ${COMPASS_DIR}/deploy/prepare.sh
+source ${COMPASS_DIR}/deploy/network.sh
+
+if [[ ! -z $VIRT_NUMBER ]];then
+ source ${COMPASS_DIR}/deploy/host_vm.sh
+else
+ source ${COMPASS_DIR}/deploy/host_baremetal.sh
+fi
+
+source ${COMPASS_DIR}/deploy/compass_vm.sh
+source ${COMPASS_DIR}/deploy/deploy_host.sh
+
+######################### main process
+
+if ! prepare_env;then
+ echo "prepare_env failed"
+ exit 1
+fi
+
+log_info "########## get host mac begin #############"
+machines=`get_host_macs`
+if [[ -z $machines ]];then
+ log_error "get_host_macs failed"
+ exit 1
+fi
+
+log_info "deploy host macs: $machines"
+export machines
+
+log_info "########## set up network begin #############"
+if ! create_nets;then
+ log_error "create_nets failed"
+ exit 1
+fi
+
+if ! launch_compass;then
+ log_error "launch_compass failed"
+ exit 1
+fi
+if [[ ! -z $VIRT_NUMBER ]];then
+ if ! launch_host_vms;then
+ log_error "launch_host_vms failed"
+ exit 1
+ fi
+fi
+if ! deploy_host;then
+ #tear_down_machines
+ #tear_down_compass
+ exit 1
+else
+ #tear_down_machines
+ #tear_down_compass
+ exit 0
+fi
diff --git a/compass/ci/log.sh b/compass/ci/log.sh
new file mode 100755
index 000000000..f54fdca50
--- /dev/null
+++ b/compass/ci/log.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+reset=`tput sgr0`
+red=`tput setaf 1`
+green=`tput setaf 2`
+yellow=`tput setaf 3`
+
+function log_info() {
+ echo -e "${green}$*${reset}"
+}
+
+function log_warn() {
+ echo -e "${yellow}$*${reset}"
+}
+
+function log_error() {
+ echo -e "${red}$*${reset}"
+}
+
+function log_progress() {
+ echo -en "${yellow}$*\r${reset}"
+}
+
diff --git a/compass/deploy/ansible/openstack_juno/HA-ansible-multinodes.yml b/compass/deploy/ansible/openstack_juno/HA-ansible-multinodes.yml
new file mode 100644
index 000000000..9c1d7e7de
--- /dev/null
+++ b/compass/deploy/ansible/openstack_juno/HA-ansible-multinodes.yml
@@ -0,0 +1,42 @@
+---
+- hosts: all
+ remote_user: root
+ sudo: true
+ roles:
+ - repo
+ - common
+
+- hosts: ha
+ remote_user: root
+ sudo: True
+ roles:
+ - ha
+
+- hosts: controller
+ remote_user: root
+ sudo: True
+ roles:
+ - database
+ - mq
+ - keystone
+ - nova-controller
+ - neutron-controller
+ - cinder-controller
+ - glance
+ - neutron-common
+ - neutron-network
+ - dashboard
+
+- hosts: compute
+ remote_user: root
+ sudo: True
+ roles:
+ - nova-compute
+ - neutron-compute
+ - cinder-volume
+
+- hosts: all
+ remote_user: root
+ sudo: True
+ roles:
+ - monitor
diff --git a/compass/deploy/ansible/openstack_juno/allinone.yml b/compass/deploy/ansible/openstack_juno/allinone.yml
new file mode 100644
index 000000000..15220cac4
--- /dev/null
+++ b/compass/deploy/ansible/openstack_juno/allinone.yml
@@ -0,0 +1,38 @@
+---
+- hosts: all
+ remote_user: root
+ sudo: true
+ roles:
+ - repo
+
+- hosts: controller
+ sudo: True
+ roles:
+ - common
+ - database
+ - mq
+ - keystone
+ - nova-controller
+ - neutron-controller
+ - dashboard
+ - cinder-controller
+ - glance
+
+- hosts: network
+ sudo: True
+ roles:
+ - common
+ - neutron-network
+
+- hosts: storage
+ sudo: True
+ roles:
+ - common
+ - cinder-volume
+
+- hosts: compute
+ sudo: True
+ roles:
+ - common
+ - nova-compute
+ - neutron-compute
diff --git a/compass/deploy/ansible/openstack_juno/compute.yml b/compass/deploy/ansible/openstack_juno/compute.yml
new file mode 100644
index 000000000..b2679c0e1
--- /dev/null
+++ b/compass/deploy/ansible/openstack_juno/compute.yml
@@ -0,0 +1,9 @@
+---
+- hosts: all
+ remote_user: vagrant
+ sudo: True
+ roles:
+ - repo
+ - common
+ - nova-compute
+ - neutron-compute
diff --git a/compass/deploy/ansible/openstack_juno/controller.yml b/compass/deploy/ansible/openstack_juno/controller.yml
new file mode 100644
index 000000000..7f4a10ebb
--- /dev/null
+++ b/compass/deploy/ansible/openstack_juno/controller.yml
@@ -0,0 +1,15 @@
+---
+- hosts: controller
+ remote_user: root
+ sudo: True
+ roles:
+ - repo
+ - common
+ - database
+ - mq
+ - keystone
+ - nova-controller
+ - neutron-controller
+ - dashboard
+ - cinder-controller
+ - glance
diff --git a/compass/deploy/ansible/openstack_juno/group_vars/all b/compass/deploy/ansible/openstack_juno/group_vars/all
new file mode 100644
index 000000000..5643fcd9c
--- /dev/null
+++ b/compass/deploy/ansible/openstack_juno/group_vars/all
@@ -0,0 +1,54 @@
+controller_host: 10.1.0.11
+network_host: 10.1.0.12
+compute_host: 10.1.0.13
+storage_host: 10.1.0.14
+odl_controller: 10.1.0.15
+
+DEBUG: False
+VERBOSE: False
+NTP_SERVER_LOCAL: controller
+DB_HOST: "{{ controller_host }}"
+MQ_BROKER: rabbitmq
+
+OPENSTACK_REPO: cloudarchive-juno.list
+ADMIN_TOKEN: admin
+CEILOMETER_TOKEN: c095d479023a0fd58a54
+RABBIT_PASS: guest
+KEYSTONE_DBPASS: keystone_db_secret
+DEMO_PASS: demo_secret
+ADMIN_PASS: admin_secret
+GLANCE_DBPASS: glance_db_secret
+GLANCE_PASS: glance_secret
+NOVA_DBPASS: nova_db_secret
+NOVA_PASS: nova_secret
+DASH_DBPASS: dash_db_secret
+CINDER_DBPASS: cinder_db_secret
+CINDER_PASS: cinder_secret
+NEUTRON_DBPASS: neutron_db_secret
+NEUTRON_PASS: netron_secret
+NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan']
+NEUTRON_TENANT_NETWORK_TYPES: ['vxlan']
+#NEUTRON_MECHANISM_DRIVERS: ['opendaylight']
+NEUTRON_MECHANISM_DRIVERS: ['openvswitch']
+NEUTRON_TUNNEL_TYPES: ['vxlan']
+METADATA_SECRET: metadata_secret
+INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS: 10.1.1.21
+INTERFACE_NAME: eth2
+
+EXTERNAL_NETWORK_CIDR: 203.0.113.0/24
+EXTERNAL_NETWORK_GATEWAY: 203.0.113.1
+FLOATING_IP_START: 203.0.113.101
+FLOATING_IP_END: 203.0.113.200
+
+juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main
+build_in_image: http://cdn.download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
+build_in_image_name: cirros-0.3.3-x86_64-disk.img
+
+physical_device: /dev/sdb
+
+internal_interface: ansible_eth1
+internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}"
+
+odl_username: admin
+odl_password: admin
+odl_api_port: 8080
diff --git a/compass/deploy/ansible/openstack_juno/multinodes.yml b/compass/deploy/ansible/openstack_juno/multinodes.yml
new file mode 100644
index 000000000..ffd29d576
--- /dev/null
+++ b/compass/deploy/ansible/openstack_juno/multinodes.yml
@@ -0,0 +1,75 @@
+---
+- hosts: all
+ remote_user: root
+ sudo: true
+ roles:
+ - repo
+
+- hosts: database
+ sudo: True
+ roles:
+ - common
+ - database
+
+- hosts: messaging
+ sudo: True
+ roles:
+ - common
+ - mq
+
+- hosts: identity
+ sudo: True
+ roles:
+ - common
+ - keystone
+
+- hosts: compute-controller
+ sudo: True
+ roles:
+ - common
+ - nova-controller
+
+- hosts: network-server
+ sudo: True
+ roles:
+ - common
+ - neutron-controller
+
+- hosts: storage-controller
+ sudo: True
+ roles:
+ - common
+ - cinder-controller
+
+- hosts: image
+ sudo: True
+ roles:
+ - common
+ - glance
+
+- hosts: dashboard
+ sudo: True
+ roles:
+ - common
+ - dashboard
+
+- hosts: network-worker
+ sudo: True
+ roles:
+ - common
+ - neutron-network
+
+- hosts: storage-volume
+ sudo: True
+ roles:
+ - common
+ - cinder-volume
+
+- hosts: compute-worker
+ sudo: True
+ roles:
+ - common
+ - nova-compute
+ - neutron-compute
+
+
diff --git a/compass/deploy/ansible/openstack_juno/network.yml b/compass/deploy/ansible/openstack_juno/network.yml
new file mode 100644
index 000000000..558f3175d
--- /dev/null
+++ b/compass/deploy/ansible/openstack_juno/network.yml
@@ -0,0 +1,8 @@
+---
+- hosts: all
+ remote_user: vagrant
+ sudo: True
+ roles:
+ - repo
+ - common
+ - neutron-network
diff --git a/compass/deploy/ansible/openstack_juno/single-controller.yml b/compass/deploy/ansible/openstack_juno/single-controller.yml
new file mode 100644
index 000000000..15220cac4
--- /dev/null
+++ b/compass/deploy/ansible/openstack_juno/single-controller.yml
@@ -0,0 +1,38 @@
+---
+- hosts: all
+ remote_user: root
+ sudo: true
+ roles:
+ - repo
+
+- hosts: controller
+ sudo: True
+ roles:
+ - common
+ - database
+ - mq
+ - keystone
+ - nova-controller
+ - neutron-controller
+ - dashboard
+ - cinder-controller
+ - glance
+
+- hosts: network
+ sudo: True
+ roles:
+ - common
+ - neutron-network
+
+- hosts: storage
+ sudo: True
+ roles:
+ - common
+ - cinder-volume
+
+- hosts: compute
+ sudo: True
+ roles:
+ - common
+ - nova-compute
+ - neutron-compute
diff --git a/compass/deploy/ansible/openstack_juno/storage.yml b/compass/deploy/ansible/openstack_juno/storage.yml
new file mode 100644
index 000000000..3c0aa410f
--- /dev/null
+++ b/compass/deploy/ansible/openstack_juno/storage.yml
@@ -0,0 +1,8 @@
+---
+- hosts: all
+ remote_user: vagrant
+ sudo: True
+ roles:
+ - repo
+ - common
+ - cinder-volume
diff --git a/compass/deploy/ansible/roles/cinder-controller/handlers/main.yml b/compass/deploy/ansible/roles/cinder-controller/handlers/main.yml
new file mode 100644
index 000000000..ef671dd04
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-controller/handlers/main.yml
@@ -0,0 +1,6 @@
+---
+- name: restart cinder-scheduler
+ service: name=cinder-scheduler state=restarted enabled=yes
+- name: restart cinder-api
+ service: name=cinder-api state=restarted enabled=yes
+
diff --git a/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_config.yml b/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_config.yml
new file mode 100644
index 000000000..7796cf775
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_config.yml
@@ -0,0 +1,20 @@
+---
+- name: sync cinder db
+ shell: su -s /bin/sh -c "cinder-manage db sync" cinder && cinder
+ register: result
+ until: result.rc == 0
+ retries: 5
+ delay: 3
+ notify:
+ - restart cinder-scheduler
+ - restart cinder-api
+
+- meta: flush_handlers
+
+- name: upload cinder keystone register script
+ template: src=cinder_init.sh dest=/opt/cinder_init.sh mode=0744
+
+- name: run cinder register script
+ shell: for i in {0..5}; do /opt/cinder_init.sh && touch cinder_init_complete; if [ $? != 0 ]; then sleep 5; else break; fi; done
+ args:
+ creates: cinder_init_complete
diff --git a/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_install.yml b/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_install.yml
new file mode 100644
index 000000000..03ad4322b
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_install.yml
@@ -0,0 +1,20 @@
+---
+- name: install cinder packages
+ apt: name={{ item }} state=present force=yes
+ with_items:
+ - cinder-api
+ - cinder-scheduler
+ - python-cinderclient
+
+- name: generate cinder service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - cinder-api
+ - cinder-scheduler
+
+- name: upload cinder conf
+ template: src=cinder.conf dest=/etc/cinder/cinder.conf
+ notify:
+ - restart cinder-scheduler
+ - restart cinder-api
+
diff --git a/compass/deploy/ansible/roles/cinder-controller/tasks/main.yml b/compass/deploy/ansible/roles/cinder-controller/tasks/main.yml
new file mode 100644
index 000000000..1dbe91f2a
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-controller/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+- include: cinder_install.yml
+ tags:
+ - install
+ - cinder-install
+ - cinder
+
+- include: cinder_config.yml
+ when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == ''
+ tags:
+ - config
+ - cinder-config
+ - cinder
diff --git a/compass/deploy/ansible/roles/cinder-controller/templates/api-paste.ini b/compass/deploy/ansible/roles/cinder-controller/templates/api-paste.ini
new file mode 100644
index 000000000..b568a1792
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-controller/templates/api-paste.ini
@@ -0,0 +1,71 @@
+#############
+# OpenStack #
+#############
+
+[composite:osapi_volume]
+use = call:cinder.api:root_app_factory
+/: apiversions
+/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
+
+[composite:openstack_volume_api_v1]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv1
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv2
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+
+[filter:request_id]
+paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY
+enabled = yes
+
+[filter:noauth]
+paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:apiv1]
+paste.app_factory = cinder.api.v1.router:APIRouter.factory
+
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
+[pipeline:apiversions]
+pipeline = faultwrap osvolumeversionapp
+
+[app:osvolumeversionapp]
+paste.app_factory = cinder.api.versions:Versions.factory
+
+[filter:authtoken]
+paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
+# auth_host = 127.0.0.1
+# auth_port = 35357
+# auth_protocol = http
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = cinder
+admin_password = {{ CINDER_PASS }}
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
diff --git a/compass/deploy/ansible/roles/cinder-controller/templates/cinder.conf b/compass/deploy/ansible/roles/cinder-controller/templates/cinder.conf
new file mode 100644
index 000000000..e34fd2fa0
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-controller/templates/cinder.conf
@@ -0,0 +1,63 @@
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+volume_group = cinder-volumes
+verbose = {{ VERBOSE }}
+debug = {{ DEBUG }}
+auth_strategy = keystone
+state_path = /var/lib/cinder
+lock_path = /var/lock/cinder
+notification_driver=cinder.openstack.common.notifier.rpc_notifier
+volumes_dir = /var/lib/cinder/volumes
+
+log_file=/var/log/cinder/cinder.log
+
+control_exchange = cinder
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_port = 5672
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+my_ip = {{ storage_controller_host }}
+
+glance_host = {{ HA_VIP }}
+glance_port = 9292
+api_rate_limit = False
+storage_availability_zone = nova
+
+quota_volumes = 10
+quota_gigabytes=1000
+quota_driver=cinder.quota.DbQuotaDriver
+
+osapi_volume_listen = {{ storage_controller_host }}
+osapi_volume_listen_port = 8776
+
+db_backend = sqlalchemy
+volume_name_template = volume-%s
+snapshot_name_template = snapshot-%s
+
+max_gigabytes=10000
+volume_group=cinder-volumes
+
+volume_clear=zero
+volume_clear_size=10
+
+iscsi_ip_address={{ storage_controller_host }}
+iscsi_port=3260
+iscsi_helper=tgtadm
+
+volumes_dir=/var/lib/cinder/volumes
+
+volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = cinder
+admin_password = {{ CINDER_PASS }}
+
+[database]
+connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
diff --git a/compass/deploy/ansible/roles/cinder-controller/templates/cinder_init.sh b/compass/deploy/ansible/roles/cinder-controller/templates/cinder_init.sh
new file mode 100644
index 000000000..0ec61b647
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-controller/templates/cinder_init.sh
@@ -0,0 +1,6 @@
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=cinder --pass={{ CINDER_PASS }} --email=cinder@example.com
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=cinder --tenant=service --role=admin
+
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=cinder --type=volume --description="OpenStack Block Storage"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ volume / {print $2}') --publicurl=http://{{ HA_VIP }}:8776/v1/%\(tenant_id\)s --internalurl=http://{{ HA_VIP }}:8776/v1/%\(tenant_id\)s --adminurl=http://{{ HA_VIP }}:8776/v1/%\(tenant_id\)s
+
diff --git a/compass/deploy/ansible/roles/cinder-volume/files/loop.yml b/compass/deploy/ansible/roles/cinder-volume/files/loop.yml
new file mode 100644
index 000000000..e872652a0
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-volume/files/loop.yml
@@ -0,0 +1 @@
+physical_device: /dev/loop0
diff --git a/compass/deploy/ansible/roles/cinder-volume/handlers/main.yml b/compass/deploy/ansible/roles/cinder-volume/handlers/main.yml
new file mode 100644
index 000000000..ad917ce5c
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-volume/handlers/main.yml
@@ -0,0 +1,3 @@
+---
+- name: restart cinder-volume
+ service: name=cinder-volume state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/cinder-volume/tasks/main.yml b/compass/deploy/ansible/roles/cinder-volume/tasks/main.yml
new file mode 100644
index 000000000..8c0e626bb
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-volume/tasks/main.yml
@@ -0,0 +1,55 @@
+---
+- name: install cinder-volume and lvm2 packages
+ apt: name={{ item }} state=present force=yes
+ with_items:
+ - cinder-volume
+ - lvm2
+
+- name: generate cinder volume service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - cinder-volume
+
+- name: check if physical device exists
+ stat: path={{ physical_device }}
+ register: st
+
+- name: repace physical_device if st is false
+ local_action: copy src=loop.yml dest=/tmp/loop.yml
+ when: st.stat.exists == False
+
+- name: load loop.yml
+ include_vars: /tmp/loop.yml
+ when: st.stat.exists == False
+
+- name: check if cinder-volumes is mounted
+ shell: ls /mnt
+ register: cindervolumes
+
+- name: get available partition size
+ shell: df / | awk '$3 ~ /[0-9]+/ { print $4 }'
+ register: partition_size
+
+- name: if not mounted, mount it
+ shell: dd if=/dev/zero of=/mnt/cinder-volumes
+ bs=1 count=0 seek={{ partition_size.stdout }}
+ when: cindervolumes.stdout != 'cinder-volumes'
+
+- name: get first lo device
+ shell: ls /dev/loop* | egrep 'loop[0-9]+'|sed -n 1p
+ register: first_lo
+ when: cindervolumes.stdout != 'cinder-volumes'
+
+- name: do a losetup on /mnt/cinder-volumes
+ shell: losetup {{ first_lo.stdout }} /mnt/cinder-volumes
+ when: cindervolumes.stdout != 'cinder-volumes'
+
+- name: create physical and group volumes
+ lvg: vg=cinder-volumes pvs={{ physical_device }}
+ vg_options=--force
+
+- name: upload cinder-volume configuration
+ template: src=cinder.conf dest=/etc/cinder/cinder.conf
+ backup=yes
+ notify:
+ - restart cinder-volume
diff --git a/compass/deploy/ansible/roles/cinder-volume/templates/cinder.conf b/compass/deploy/ansible/roles/cinder-volume/templates/cinder.conf
new file mode 100644
index 000000000..aa3b8ccd0
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-volume/templates/cinder.conf
@@ -0,0 +1,62 @@
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+volume_group = cinder-volumes
+verbose = True
+auth_strategy = keystone
+state_path = /var/lib/cinder
+lock_path = /var/lock/cinder
+notification_driver=cinder.openstack.common.notifier.rpc_notifier
+volumes_dir = /var/lib/cinder/volumes
+
+log_file=/var/log/cinder/cinder.log
+
+control_exchange = cinder
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_port = 5672
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+my_ip = {{ storage_controller_host }}
+
+glance_host = {{ HA_VIP }}
+glance_port = 9292
+api_rate_limit = False
+storage_availability_zone = nova
+
+quota_volumes = 10
+quota_gigabytes=1000
+quota_driver=cinder.quota.DbQuotaDriver
+
+osapi_volume_listen = {{ storage_controller_host }}
+osapi_volume_listen_port = 8776
+
+db_backend = sqlalchemy
+volume_name_template = volume-%s
+snapshot_name_template = snapshot-%s
+
+max_gigabytes=10000
+volume_group=cinder-volumes
+
+volume_clear=zero
+volume_clear_size=10
+
+iscsi_ip_address={{ storage_controller_host }}
+iscsi_port=3260
+iscsi_helper=tgtadm
+
+volumes_dir=/var/lib/cinder/volumes
+
+volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = cinder
+admin_password = {{ CINDER_PASS }}
+
+[database]
+connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
diff --git a/compass/deploy/ansible/roles/common/files/sources.list.d/cloudarchive-juno.list b/compass/deploy/ansible/roles/common/files/sources.list.d/cloudarchive-juno.list
new file mode 100644
index 000000000..920f3d237
--- /dev/null
+++ b/compass/deploy/ansible/roles/common/files/sources.list.d/cloudarchive-juno.list
@@ -0,0 +1 @@
+deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main
diff --git a/compass/deploy/ansible/roles/common/tasks/main.yml b/compass/deploy/ansible/roles/common/tasks/main.yml
new file mode 100644
index 000000000..ce595f51b
--- /dev/null
+++ b/compass/deploy/ansible/roles/common/tasks/main.yml
@@ -0,0 +1,28 @@
+---
+- name: install ubuntu-cloud-keyring(ubuntu)
+ apt: name={{ item }} state=latest
+ with_items:
+ - ubuntu-cloud-keyring
+
+- name: update hosts files to all hosts
+ template: src=hosts
+ dest=/etc/hosts
+ backup=yes
+
+- name: install common packages
+ apt: name={{ item }} state=latest
+ with_items:
+ - python-pip
+ - python-dev
+ - python-mysqldb
+ - ntp
+
+- name: restart ntp
+ command: su -s /bin/sh -c "service ntp stop; ntpd -gq; hwclock --systohc"
+ ignore_errors: True
+
+- name: update ntp conf
+ template: src=ntp.conf dest=/etc/ntp.conf backup=yes
+
+- name: restart ntp
+ service: name=ntp state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/common/templates/hosts b/compass/deploy/ansible/roles/common/templates/hosts
new file mode 100644
index 000000000..9d27c0a9b
--- /dev/null
+++ b/compass/deploy/ansible/roles/common/templates/hosts
@@ -0,0 +1,22 @@
+# compute-controller
+10.145.89.136 host-136
+# database
+10.145.89.136 host-136
+# messaging
+10.145.89.136 host-136
+# storage-controller
+10.145.89.138 host-138
+# image
+10.145.89.138 host-138
+# identity
+10.145.89.136 host-136
+# network-server
+10.145.89.138 host-138
+# dashboard
+10.145.89.136 host-136
+# storage-volume
+10.145.89.139 host-139
+# network-worker
+10.145.89.139 host-139
+# compute-worker
+10.145.89.137 host-137
diff --git a/compass/deploy/ansible/roles/common/templates/ntp.conf b/compass/deploy/ansible/roles/common/templates/ntp.conf
new file mode 100644
index 000000000..c61380924
--- /dev/null
+++ b/compass/deploy/ansible/roles/common/templates/ntp.conf
@@ -0,0 +1,56 @@
+# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help
+
+driftfile /var/lib/ntp/ntp.drift
+
+
+# Enable this if you want statistics to be logged.
+#statsdir /var/log/ntpstats/
+
+statistics loopstats peerstats clockstats
+filegen loopstats file loopstats type day enable
+filegen peerstats file peerstats type day enable
+filegen clockstats file clockstats type day enable
+
+# Specify one or more NTP servers.
+
+# Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board
+# on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for
+# more information.
+server {{ NTP_SERVER_LOCAL }}
+server 0.ubuntu.pool.ntp.org
+server 1.ubuntu.pool.ntp.org
+server 2.ubuntu.pool.ntp.org
+server 3.ubuntu.pool.ntp.org
+
+# Use Ubuntu's ntp server as a fallback.
+server ntp.ubuntu.com
+
+# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for
+# details. The web page <http://support.ntp.org/bin/view/Support/AccessRestrictions>
+# might also be helpful.
+#
+# Note that "restrict" applies to both servers and clients, so a configuration
+# that might be intended to block requests from certain clients could also end
+# up blocking replies from your own upstream servers.
+
+# By default, exchange time with everybody, but don't allow configuration.
+restrict -4 default kod notrap nomodify nopeer noquery
+restrict -6 default kod notrap nomodify nopeer noquery
+
+# Local users may interrogate the ntp server more closely.
+restrict 127.0.0.1
+restrict ::1
+
+# Clients from this (example!) subnet have unlimited access, but only if
+# cryptographically authenticated.
+#restrict 192.168.123.0 mask 255.255.255.0 notrust
+
+
+# If you want to provide time to your local subnet, change the next line.
+# (Again, the address is an example only.)
+#broadcast 192.168.123.255
+
+# If you want to listen to time broadcasts on your local subnet, de-comment the
+# next lines. Please do this only if you trust everybody on the network!
+#disable auth
+#broadcastclient
diff --git a/compass/deploy/ansible/roles/dashboard/tasks/main.yml b/compass/deploy/ansible/roles/dashboard/tasks/main.yml
new file mode 100644
index 000000000..465b9969b
--- /dev/null
+++ b/compass/deploy/ansible/roles/dashboard/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+- name: install dashboard packages
+ apt: name={{ item }} state=present force=yes
+ with_items:
+ - apache2
+ - memcached
+ - libapache2-mod-wsgi
+ - openstack-dashboard
+
+- name: remove ubuntu theme
+ apt: name=openstack-dashboard-ubuntu-theme
+ state=absent
+
+## horizon configuration is already enabled in apache2/conf-enabled
+## by openstack-dashboard package deploy script.
+#- name: update dashboard conf
+# template: src=openstack-dashboard.conf
+# dest=/etc/apache2/sites-available/openstack-dashboard.conf
+# backup=yes
+
+- name: update horizon settings
+ template: src=local_settings.py
+ dest=/etc/openstack-dashboard/local_settings.py
+ backup=yes
+
+- name: restart apache2
+ service: name=apache2 state=restarted enabled=yes
+
+- name: restart memcached
+ service: name=memcached state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/dashboard/templates/local_settings.py b/compass/deploy/ansible/roles/dashboard/templates/local_settings.py
new file mode 100644
index 000000000..87e06e325
--- /dev/null
+++ b/compass/deploy/ansible/roles/dashboard/templates/local_settings.py
@@ -0,0 +1,511 @@
+import os
+
+from django.utils.translation import ugettext_lazy as _
+
+from openstack_dashboard import exceptions
+
+DEBUG = True
+TEMPLATE_DEBUG = DEBUG
+
+# Required for Django 1.5.
+# If horizon is running in production (DEBUG is False), set this
+# with the list of host/domain names that the application can serve.
+# For more information see:
+# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
+#ALLOWED_HOSTS = ['horizon.example.com', ]
+
+# Set SSL proxy settings:
+# For Django 1.4+ pass this header from the proxy after terminating the SSL,
+# and don't forget to strip it from the client's request.
+# For more information see:
+# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header
+# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
+
+# If Horizon is being served through SSL, then uncomment the following two
+# settings to better secure the cookies from security exploits
+#CSRF_COOKIE_SECURE = True
+#SESSION_COOKIE_SECURE = True
+
+# Overrides for OpenStack API versions. Use this setting to force the
+# OpenStack dashboard to use a specific API version for a given service API.
+# NOTE: The version should be formatted as it appears in the URL for the
+# service API. For example, The identity service APIs have inconsistent
+# use of the decimal point, so valid options would be "2.0" or "3".
+# OPENSTACK_API_VERSIONS = {
+# "identity": 3,
+# "volume": 2
+# }
+
+# Set this to True if running on multi-domain model. When this is enabled, it
+# will require user to enter the Domain name in addition to username for login.
+# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
+
+# Overrides the default domain used when running on single-domain model
+# with Keystone V3. All entities will be created in the default domain.
+# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
+
+# Set Console type:
+# valid options would be "AUTO", "VNC", "SPICE" or "RDP"
+# CONSOLE_TYPE = "AUTO"
+
+# Default OpenStack Dashboard configuration.
+HORIZON_CONFIG = {
+ 'dashboards': ('project', 'admin', 'settings',),
+ 'default_dashboard': 'project',
+ 'user_home': 'openstack_dashboard.views.get_user_home',
+ 'ajax_queue_limit': 10,
+ 'auto_fade_alerts': {
+ 'delay': 3000,
+ 'fade_duration': 1500,
+ 'types': ['alert-success', 'alert-info']
+ },
+ 'help_url': "http://docs.openstack.org",
+ 'exceptions': {'recoverable': exceptions.RECOVERABLE,
+ 'not_found': exceptions.NOT_FOUND,
+ 'unauthorized': exceptions.UNAUTHORIZED},
+}
+
+# Specify a regular expression to validate user passwords.
+# HORIZON_CONFIG["password_validator"] = {
+# "regex": '.*',
+# "help_text": _("Your password does not meet the requirements.")
+# }
+
+# Disable simplified floating IP address management for deployments with
+# multiple floating IP pools or complex network requirements.
+# HORIZON_CONFIG["simple_ip_management"] = False
+
+# Turn off browser autocompletion for the login form if so desired.
+# HORIZON_CONFIG["password_autocomplete"] = "off"
+
+LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
+
+# Set custom secret key:
+# You can either set it to a specific value or you can let horizion generate a
+# default secret key that is unique on this machine, e.i. regardless of the
+# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there
+# may be situations where you would want to set this explicitly, e.g. when
+# multiple dashboard instances are distributed on different machines (usually
+# behind a load-balancer). Either you have to make sure that a session gets all
+# requests routed to the same dashboard instance or you set the same SECRET_KEY
+# for all of them.
+from horizon.utils import secret_key
+SECRET_KEY = 'AJDSKLAJDKASJDKASJDKSAJDKSJAKDSA'
+# We recommend you use memcached for development; otherwise after every reload
+# of the django development server, you will have to login again. To use
+# memcached set CACHES to something like
+CACHES = {
+ 'default': {
+ 'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache',
+ 'LOCATION' : '127.0.0.1:11211',
+ }
+}
+
+#CACHES = {
+# 'default': {
+# 'BACKEND' : 'django.core.cache.backends.locmem.LocMemCache'
+# }
+#}
+
+# Enable the Ubuntu theme if it is present.
+try:
+ from ubuntu_theme import *
+except ImportError:
+ pass
+
+# Default Ubuntu apache configuration uses /horizon as the application root.
+# Configure auth redirects here accordingly.
+LOGIN_URL='/horizon/auth/login/'
+LOGOUT_URL='/horizon/auth/logout/'
+LOGIN_REDIRECT_URL='/horizon'
+
+# The Ubuntu package includes pre-compressed JS and compiled CSS to allow
+# offline compression by default. To enable online compression, install
+# the node-less package and enable the following option.
+COMPRESS_OFFLINE = True
+
+# By default, validation of the HTTP Host header is disabled. Production
+# installations should have this set accordingly. For more information
+# see https://docs.djangoproject.com/en/dev/ref/settings/.
+ALLOWED_HOSTS = ['{{ dashboard_host }}']
+
+# Send email to the console by default
+EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
+# Or send them to /dev/null
+#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
+
+# Configure these for your outgoing email host
+# EMAIL_HOST = 'smtp.my-company.com'
+# EMAIL_PORT = 25
+# EMAIL_HOST_USER = 'djangomail'
+# EMAIL_HOST_PASSWORD = 'top-secret!'
+
+# For multiple regions uncomment this configuration, and add (endpoint, title).
+# AVAILABLE_REGIONS = [
+# ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
+# ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
+# ]
+
+OPENSTACK_HOST = "{{ HA_VIP }}"
+OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
+OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
+
+# Disable SSL certificate checks (useful for self-signed certificates):
+# OPENSTACK_SSL_NO_VERIFY = True
+
+# The CA certificate to use to verify SSL connections
+# OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'
+
+# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
+# capabilities of the auth backend for Keystone.
+# If Keystone has been configured to use LDAP as the auth backend then set
+# can_edit_user to False and name to 'ldap'.
+#
+# TODO(tres): Remove these once Keystone has an API to identify auth backend.
+OPENSTACK_KEYSTONE_BACKEND = {
+ 'name': 'native',
+ 'can_edit_user': True,
+ 'can_edit_group': True,
+ 'can_edit_project': True,
+ 'can_edit_domain': True,
+ 'can_edit_role': True
+}
+
+#Setting this to True, will add a new "Retrieve Password" action on instance,
+#allowing Admin session password retrieval/decryption.
+#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False
+
+# The Xen Hypervisor has the ability to set the mount point for volumes
+# attached to instances (other Hypervisors currently do not). Setting
+# can_set_mount_point to True will add the option to set the mount point
+# from the UI.
+OPENSTACK_HYPERVISOR_FEATURES = {
+ 'can_set_mount_point': False,
+ 'can_set_password': False,
+}
+
+# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
+# services provided by neutron. Options currently available are load
+# balancer service, security groups, quotas, VPN service.
+OPENSTACK_NEUTRON_NETWORK = {
+ 'enable_lb': False,
+ 'enable_firewall': False,
+ 'enable_quotas': True,
+ 'enable_vpn': False,
+ # The profile_support option is used to detect if an external router can be
+ # configured via the dashboard. When using specific plugins the
+ # profile_support can be turned on if needed.
+ 'profile_support': None,
+ #'profile_support': 'cisco',
+}
+
+# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
+# in the OpenStack Dashboard related to the Image service, such as the list
+# of supported image formats.
+# OPENSTACK_IMAGE_BACKEND = {
+# 'image_formats': [
+# ('', ''),
+# ('aki', _('AKI - Amazon Kernel Image')),
+# ('ami', _('AMI - Amazon Machine Image')),
+# ('ari', _('ARI - Amazon Ramdisk Image')),
+# ('iso', _('ISO - Optical Disk Image')),
+# ('qcow2', _('QCOW2 - QEMU Emulator')),
+# ('raw', _('Raw')),
+# ('vdi', _('VDI')),
+# ('vhd', _('VHD')),
+# ('vmdk', _('VMDK'))
+# ]
+# }
+
+# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for
+# image custom property attributes that appear on image detail pages.
+IMAGE_CUSTOM_PROPERTY_TITLES = {
+ "architecture": _("Architecture"),
+ "kernel_id": _("Kernel ID"),
+ "ramdisk_id": _("Ramdisk ID"),
+ "image_state": _("Euca2ools state"),
+ "project_id": _("Project ID"),
+ "image_type": _("Image Type")
+}
+
+# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
+# in the Keystone service catalog. Use this setting when Horizon is running
+# external to the OpenStack environment. The default is 'publicURL'.
+#OPENSTACK_ENDPOINT_TYPE = "publicURL"
+
+# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the
+# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints
+# in the Keystone service catalog. Use this setting when Horizon is running
+# external to the OpenStack environment. The default is None. This
+# value should differ from OPENSTACK_ENDPOINT_TYPE if used.
+#SECONDARY_ENDPOINT_TYPE = "publicURL"
+
+# The number of objects (Swift containers/objects or images) to display
+# on a single page before providing a paging element (a "more" link)
+# to paginate results.
+API_RESULT_LIMIT = 1000
+API_RESULT_PAGE_SIZE = 20
+
+# The timezone of the server. This should correspond with the timezone
+# of your entire OpenStack installation, and hopefully be in UTC.
+TIME_ZONE = "UTC"
+
+# When launching an instance, the menu of available flavors is
+# sorted by RAM usage, ascending. If you would like a different sort order,
+# you can provide another flavor attribute as sorting key. Alternatively, you
+# can provide a custom callback method to use for sorting. You can also provide
+# a flag for reverse sort. For more info, see
+# http://docs.python.org/2/library/functions.html#sorted
+# CREATE_INSTANCE_FLAVOR_SORT = {
+# 'key': 'name',
+# # or
+# 'key': my_awesome_callback_method,
+# 'reverse': False,
+# }
+
+# The Horizon Policy Enforcement engine uses these values to load per service
+# policy rule files. The content of these files should match the files the
+# OpenStack services are using to determine role based access control in the
+# target installation.
+
+# Path to directory containing policy.json files
+#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
+# Map of local copy of service policy files
+#POLICY_FILES = {
+# 'identity': 'keystone_policy.json',
+# 'compute': 'nova_policy.json',
+# 'volume': 'cinder_policy.json',
+# 'image': 'glance_policy.json',
+#}
+
+# Trove user and database extension support. By default support for
+# creating users and databases on database instances is turned on.
+# To disable these extensions set the permission here to something
+# unusable such as ["!"].
+# TROVE_ADD_USER_PERMS = []
+# TROVE_ADD_DATABASE_PERMS = []
+
+LOGGING = {
+ 'version': 1,
+ # When set to True this will disable all logging except
+ # for loggers specified in this configuration dictionary. Note that
+ # if nothing is specified here and disable_existing_loggers is True,
+ # django.db.backends will still log unless it is disabled explicitly.
+ 'disable_existing_loggers': False,
+ 'handlers': {
+ 'null': {
+ 'level': 'DEBUG',
+ 'class': 'django.utils.log.NullHandler',
+ },
+ 'console': {
+ # Set the level to "DEBUG" for verbose output logging.
+ 'level': 'INFO',
+ 'class': 'logging.StreamHandler',
+ },
+ },
+ 'loggers': {
+ # Logging from django.db.backends is VERY verbose, send to null
+ # by default.
+ 'django.db.backends': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ 'requests': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ 'horizon': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'openstack_dashboard': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'novaclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'cinderclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'keystoneclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'glanceclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'neutronclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'heatclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'ceilometerclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'troveclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'swiftclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'openstack_auth': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'nose.plugins.manager': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'django': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'iso8601': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ }
+}
+
+# 'direction' should not be specified for all_tcp/udp/icmp.
+# It is specified in the form.
+SECURITY_GROUP_RULES = {
+ 'all_tcp': {
+ 'name': 'ALL TCP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '1',
+ 'to_port': '65535',
+ },
+ 'all_udp': {
+ 'name': 'ALL UDP',
+ 'ip_protocol': 'udp',
+ 'from_port': '1',
+ 'to_port': '65535',
+ },
+ 'all_icmp': {
+ 'name': 'ALL ICMP',
+ 'ip_protocol': 'icmp',
+ 'from_port': '-1',
+ 'to_port': '-1',
+ },
+ 'ssh': {
+ 'name': 'SSH',
+ 'ip_protocol': 'tcp',
+ 'from_port': '22',
+ 'to_port': '22',
+ },
+ 'smtp': {
+ 'name': 'SMTP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '25',
+ 'to_port': '25',
+ },
+ 'dns': {
+ 'name': 'DNS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '53',
+ 'to_port': '53',
+ },
+ 'http': {
+ 'name': 'HTTP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '80',
+ 'to_port': '80',
+ },
+ 'pop3': {
+ 'name': 'POP3',
+ 'ip_protocol': 'tcp',
+ 'from_port': '110',
+ 'to_port': '110',
+ },
+ 'imap': {
+ 'name': 'IMAP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '143',
+ 'to_port': '143',
+ },
+ 'ldap': {
+ 'name': 'LDAP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '389',
+ 'to_port': '389',
+ },
+ 'https': {
+ 'name': 'HTTPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '443',
+ 'to_port': '443',
+ },
+ 'smtps': {
+ 'name': 'SMTPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '465',
+ 'to_port': '465',
+ },
+ 'imaps': {
+ 'name': 'IMAPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '993',
+ 'to_port': '993',
+ },
+ 'pop3s': {
+ 'name': 'POP3S',
+ 'ip_protocol': 'tcp',
+ 'from_port': '995',
+ 'to_port': '995',
+ },
+ 'ms_sql': {
+ 'name': 'MS SQL',
+ 'ip_protocol': 'tcp',
+ 'from_port': '1433',
+ 'to_port': '1433',
+ },
+ 'mysql': {
+ 'name': 'MYSQL',
+ 'ip_protocol': 'tcp',
+ 'from_port': '3306',
+ 'to_port': '3306',
+ },
+ 'rdp': {
+ 'name': 'RDP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '3389',
+ 'to_port': '3389',
+ },
+}
+
+FLAVOR_EXTRA_KEYS = {
+ 'flavor_keys': [
+ ('quota:read_bytes_sec', _('Quota: Read bytes')),
+ ('quota:write_bytes_sec', _('Quota: Write bytes')),
+ ('quota:cpu_quota', _('Quota: CPU')),
+ ('quota:cpu_period', _('Quota: CPU period')),
+ ('quota:inbound_average', _('Quota: Inbound average')),
+ ('quota:outbound_average', _('Quota: Outbound average')),
+ ]
+}
+
diff --git a/compass/deploy/ansible/roles/dashboard/templates/openstack-dashboard.conf b/compass/deploy/ansible/roles/dashboard/templates/openstack-dashboard.conf
new file mode 100644
index 000000000..a5a791a36
--- /dev/null
+++ b/compass/deploy/ansible/roles/dashboard/templates/openstack-dashboard.conf
@@ -0,0 +1,14 @@
+<VirtualHost *:80>
+
+WSGIScriptAlias / /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi
+WSGIDaemonProcess horizon user=www-data group=www-data processes=3 threads=10
+Alias /static /usr/share/openstack-dashboard/openstack_dashboard/static/
+
+<Directory /usr/share/openstack-dashboard/openstack_dashboard/wsgi>
+Order allow,deny
+Allow from all
+</Directory>
+
+
+</VirtualHost>
+
diff --git a/compass/deploy/ansible/roles/database/files/my.cnf b/compass/deploy/ansible/roles/database/files/my.cnf
new file mode 100644
index 000000000..d61f94746
--- /dev/null
+++ b/compass/deploy/ansible/roles/database/files/my.cnf
@@ -0,0 +1,131 @@
+#
+# The MySQL database server configuration file.
+#
+# You can copy this to one of:
+# - "/etc/mysql/my.cnf" to set global options,
+# - "~/.my.cnf" to set user-specific options.
+#
+# One can use all long options that the program supports.
+# Run program with --help to get a list of available options and with
+# --print-defaults to see which it would actually understand and use.
+#
+# For explanations see
+# http://dev.mysql.com/doc/mysql/en/server-system-variables.html
+
+# This will be passed to all mysql clients
+# It has been reported that passwords should be enclosed with ticks/quotes
+# escpecially if they contain "#" chars...
+# Remember to edit /etc/mysql/debian.cnf when changing the socket location.
+[client]
+port = 3306
+socket = /var/run/mysqld/mysqld.sock
+
+# Here is entries for some specific programs
+# The following values assume you have at least 32M ram
+
+# This was formally known as [safe_mysqld]. Both versions are currently parsed.
+[mysqld_safe]
+socket = /var/run/mysqld/mysqld.sock
+nice = 0
+
+[mysqld]
+#
+# * Basic Settings
+#
+user = mysql
+pid-file = /var/run/mysqld/mysqld.pid
+socket = /var/run/mysqld/mysqld.sock
+port = 3306
+basedir = /usr
+datadir = /var/lib/mysql
+tmpdir = /tmp
+lc-messages-dir = /usr/share/mysql
+skip-external-locking
+#
+# Instead of skip-networking the default is now to listen only on
+# localhost which is more compatible and is not less secure.
+bind-address = 0.0.0.0
+#
+# * Fine Tuning
+#
+key_buffer = 16M
+max_allowed_packet = 16M
+thread_stack = 192K
+thread_cache_size = 8
+# This replaces the startup script and checks MyISAM tables if needed
+# the first time they are touched
+myisam-recover = BACKUP
+#max_connections = 100
+#table_cache = 64
+#thread_concurrency = 10
+#
+# * Query Cache Configuration
+#
+query_cache_limit = 1M
+query_cache_size = 16M
+#
+# * Logging and Replication
+#
+# Both location gets rotated by the cronjob.
+# Be aware that this log type is a performance killer.
+# As of 5.1 you can enable the log at runtime!
+#general_log_file = /var/log/mysql/mysql.log
+#general_log = 1
+#
+# Error log - should be very few entries.
+#
+log_error = /var/log/mysql/error.log
+#
+# Here you can see queries with especially long duration
+#log_slow_queries = /var/log/mysql/mysql-slow.log
+#long_query_time = 2
+#log-queries-not-using-indexes
+#
+# The following can be used as easy to replay backup logs or for replication.
+# note: if you are setting up a replication slave, see README.Debian about
+# other settings you may need to change.
+#server-id = 1
+#log_bin = /var/log/mysql/mysql-bin.log
+expire_logs_days = 10
+max_binlog_size = 100M
+#binlog_do_db = include_database_name
+#binlog_ignore_db = include_database_name
+#
+# * InnoDB
+#
+# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/.
+# Read the manual for more InnoDB related options. There are many!
+#
+# * Security Features
+#
+# Read the manual, too, if you want chroot!
+# chroot = /var/lib/mysql/
+#
+# For generating SSL certificates I recommend the OpenSSL GUI "tinyca".
+#
+# ssl-ca=/etc/mysql/cacert.pem
+# ssl-cert=/etc/mysql/server-cert.pem
+# ssl-key=/etc/mysql/server-key.pem
+default-storage-engine = innodb
+innodb_file_per_table
+collation-server = utf8_general_ci
+init-connect = 'SET NAMES utf8'
+character-set-server = utf8
+
+[mysqldump]
+quick
+quote-names
+max_allowed_packet = 16M
+
+[mysql]
+#no-auto-rehash # faster start of mysql but no tab completition
+
+[isamchk]
+key_buffer = 16M
+
+#
+# * IMPORTANT: Additional settings that can override those from this file!
+# The files must end with '.cnf', otherwise they'll be ignored.
+#
+!includedir /etc/mysql/conf.d/
+
diff --git a/compass/deploy/ansible/roles/database/tasks/main.yml b/compass/deploy/ansible/roles/database/tasks/main.yml
new file mode 100644
index 000000000..e66f0cd2b
--- /dev/null
+++ b/compass/deploy/ansible/roles/database/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: copy data.sh
+ template: src=data.j2 dest=/opt/data.sh mode=777
+ tags:
+ - mysql_user
+
+- include: mysql.yml
+ when: HA_CLUSTER is not defined
+
+- include: mariadb.yml
+ when: HA_CLUSTER is defined
+
diff --git a/compass/deploy/ansible/roles/database/tasks/mariadb.yml b/compass/deploy/ansible/roles/database/tasks/mariadb.yml
new file mode 100644
index 000000000..093dfd11b
--- /dev/null
+++ b/compass/deploy/ansible/roles/database/tasks/mariadb.yml
@@ -0,0 +1,46 @@
+---
+- name: install python-mysqldb
+ apt: name={{ item }} state=present force=yes
+ with_items:
+ - libaio1
+ - libssl0.9.8
+ #- mariadb-client-5.5
+ - mysql-client-5.5
+ - python-mysqldb
+ - mysql-server-wsrep
+ - galera
+
+- name: create mysql log directy
+ file: path=/var/log/mysql state=directory owner=mysql group=mysql mode=0755
+
+- name: update mariadb my.cnf
+ template: src=my.cnf dest=/etc/mysql/my.cnf backup=yes
+
+- name: update galera wsrep.cnf
+ template: src=wsrep.cnf dest=/etc/mysql/conf.d/wsrep.cnf backup=yes
+
+- name: update wsrep_sst_rsync uid
+ lineinfile: dest=/usr/bin/wsrep_sst_rsync state=absent regexp="\s*uid = \$MYUID$" backup=yes
+
+- name: update wsrep_sst_rsync gid
+ lineinfile: dest=/usr/bin/wsrep_sst_rsync state=absent regexp="\s*gid = \$MYGID$" backup=yes
+
+- name: manually restart mysql server
+ service: name=mysql state=restarted enabled=yes
+ register: result
+ until: result|success
+ retries: 5
+ delay: 5
+ tags:
+ - mysql_restart
+
+- name: generate mysql service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - mysql
+
+- name: create database/user
+ shell: /opt/data.sh
+ when: HA_CLUSTER[inventory_hostname] == ''
+ tags:
+ - mysql_user
diff --git a/compass/deploy/ansible/roles/database/tasks/mysql.yml b/compass/deploy/ansible/roles/database/tasks/mysql.yml
new file mode 100644
index 000000000..327b6566a
--- /dev/null
+++ b/compass/deploy/ansible/roles/database/tasks/mysql.yml
@@ -0,0 +1,22 @@
+---
+- name: install mysql client and server packages
+ apt: name={{ item }} state=present
+ with_items:
+ - python-mysqldb
+ - mysql-server
+
+- name: create mysql log directy
+ file: path=/var/log/mysql state=directory owner=mysql group=mysql mode=0755
+
+- name: update mysql my.cnf
+ copy: src=my.cnf
+ dest=/etc/mysql/my.cnf
+ backup=yes
+
+- name: manually restart mysql server
+ shell: service mysql restart
+
+- name: create database/user
+ shell: /opt/data.sh
+ tags:
+ - mysql_user
diff --git a/compass/deploy/ansible/roles/database/templates/data.j2 b/compass/deploy/ansible/roles/database/templates/data.j2
new file mode 100644
index 000000000..c894b322e
--- /dev/null
+++ b/compass/deploy/ansible/roles/database/templates/data.j2
@@ -0,0 +1,39 @@
+#!/bin/sh
+mysql -uroot -Dmysql <<EOF
+drop database if exists keystone;
+drop database if exists glance;
+drop database if exists neutron;
+drop database if exists nova;
+drop database if exists cinder;
+
+CREATE DATABASE keystone;
+{% for host in ['%', 'localhost', inventory_hostname] %}
+GRANT ALL ON keystone.* TO 'keystone'@'{{ host }}' IDENTIFIED BY '{{ KEYSTONE_DBPASS }}';
+{% endfor %}
+
+CREATE DATABASE glance;
+{% for host in ['%', 'localhost', inventory_hostname] %}
+GRANT ALL ON glance.* TO 'glance'@'{{ host }}' IDENTIFIED BY '{{ GLANCE_DBPASS }}';
+{% endfor %}
+
+CREATE DATABASE neutron;
+{% for host in ['%', 'localhost', inventory_hostname] %}
+GRANT ALL ON neutron.* TO 'neutron'@'{{ host }}' IDENTIFIED BY '{{ NEUTRON_DBPASS }}';
+{% endfor %}
+
+CREATE DATABASE nova;
+{% for host in ['%', 'localhost', inventory_hostname] %}
+GRANT ALL ON nova.* TO 'nova'@'{{ host }}' IDENTIFIED BY '{{ NOVA_DBPASS }}';
+{% endfor %}
+
+CREATE DATABASE cinder;
+{% for host in ['%', 'localhost', inventory_hostname] %}
+GRANT ALL ON cinder.* TO 'cinder'@'{{ host }}' IDENTIFIED BY '{{ CINDER_DBPASS }}';
+{% endfor %}
+
+{% if WSREP_SST_USER is defined %}
+{% for host in ['%', 'localhost', inventory_hostname] %}
+GRANT ALL ON *.* TO '{{ WSREP_SST_USER }}'@'{{ host }}' IDENTIFIED BY '{{ WSREP_SST_PASS }}';
+{% endfor %}
+{% endif %}
+EOF
diff --git a/compass/deploy/ansible/roles/database/templates/my.cnf b/compass/deploy/ansible/roles/database/templates/my.cnf
new file mode 100644
index 000000000..165d619ca
--- /dev/null
+++ b/compass/deploy/ansible/roles/database/templates/my.cnf
@@ -0,0 +1,134 @@
+#
+# The MySQL database server configuration file.
+#
+# You can copy this to one of:
+# - "/etc/mysql/my.cnf" to set global options,
+# - "~/.my.cnf" to set user-specific options.
+#
+# One can use all long options that the program supports.
+# Run program with --help to get a list of available options and with
+# --print-defaults to see which it would actually understand and use.
+#
+# For explanations see
+# http://dev.mysql.com/doc/mysql/en/server-system-variables.html
+
+# This will be passed to all mysql clients
+# It has been reported that passwords should be enclosed with ticks/quotes
+# escpecially if they contain "#" chars...
+# Remember to edit /etc/mysql/debian.cnf when changing the socket location.
+[client]
+port = 3306
+socket = /var/run/mysqld/mysqld.sock
+
+# Here is entries for some specific programs
+# The following values assume you have at least 32M ram
+
+# This was formally known as [safe_mysqld]. Both versions are currently parsed.
+[mysqld_safe]
+socket = /var/run/mysqld/mysqld.sock
+nice = 0
+
+[mysqld]
+#
+# * Basic Settings
+#
+user = mysql
+pid-file = /var/run/mysqld/mysqld.pid
+socket = /var/run/mysqld/mysqld.sock
+port = 3306
+basedir = /usr
+datadir = /var/lib/mysql
+tmpdir = /tmp
+lc-messages-dir = /usr/share/mysql
+skip-external-locking
+skip-name-resolve
+#
+# Instead of skip-networking the default is now to listen only on
+# localhost which is more compatible and is not less secure.
+#bind-address = {{ hostvars[inventory_hostname]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}
+bind-address = {{ HA_VIP }}
+#
+# * Fine Tuning
+#
+key_buffer = 16M
+max_allowed_packet = 16M
+thread_stack = 192K
+thread_cache_size = 8
+# This replaces the startup script and checks MyISAM tables if needed
+# the first time they are touched
+myisam-recover = BACKUP
+max_connections = 2000
+max_connect_errors = 8000
+#table_cache = 64
+#thread_concurrency = 10
+#
+# * Query Cache Configuration
+#
+query_cache_limit = 1M
+query_cache_size = 16M
+#
+# * Logging and Replication
+#
+# Both location gets rotated by the cronjob.
+# Be aware that this log type is a performance killer.
+# As of 5.1 you can enable the log at runtime!
+general_log_file = /var/log/mysql/mysql.log
+#general_log = 1
+#
+# Error log - should be very few entries.
+#
+log_error = /var/log/mysql/error.log
+#
+# Here you can see queries with especially long duration
+#log_slow_queries = /var/log/mysql/mysql-slow.log
+#long_query_time = 2
+#log-queries-not-using-indexes
+#
+# The following can be used as easy to replay backup logs or for replication.
+# note: if you are setting up a replication slave, see README.Debian about
+# other settings you may need to change.
+#server-id = 1
+#log_bin = /var/log/mysql/mysql-bin.log
+expire_logs_days = 10
+max_binlog_size = 100M
+#binlog_do_db = include_database_name
+#binlog_ignore_db = include_database_name
+#
+# * InnoDB
+#
+# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/.
+# Read the manual for more InnoDB related options. There are many!
+#
+# * Security Features
+#
+# Read the manual, too, if you want chroot!
+# chroot = /var/lib/mysql/
+#
+# For generating SSL certificates I recommend the OpenSSL GUI "tinyca".
+#
+# ssl-ca=/etc/mysql/cacert.pem
+# ssl-cert=/etc/mysql/server-cert.pem
+# ssl-key=/etc/mysql/server-key.pem
+default-storage-engine = innodb
+innodb_file_per_table
+collation-server = utf8_general_ci
+init-connect = 'SET NAMES utf8'
+character-set-server = utf8
+
+[mysqldump]
+quick
+quote-names
+max_allowed_packet = 16M
+
+[mysql]
+#no-auto-rehash # faster start of mysql but no tab completition
+
+[isamchk]
+key_buffer = 16M
+
+#
+# * IMPORTANT: Additional settings that can override those from this file!
+# The files must end with '.cnf', otherwise they'll be ignored.
+#
+!includedir /etc/mysql/conf.d/
+
diff --git a/compass/deploy/ansible/roles/database/templates/wsrep.cnf b/compass/deploy/ansible/roles/database/templates/wsrep.cnf
new file mode 100644
index 000000000..b9e942451
--- /dev/null
+++ b/compass/deploy/ansible/roles/database/templates/wsrep.cnf
@@ -0,0 +1,126 @@
+# This file contains wsrep-related mysqld options. It should be included
+# in the main MySQL configuration file.
+#
+# Options that need to be customized:
+# - wsrep_provider
+# - wsrep_cluster_address
+# - wsrep_sst_auth
+# The rest of defaults should work out of the box.
+
+##
+## mysqld options _MANDATORY_ for correct opration of the cluster
+##
+[mysqld]
+
+# (This must be substituted by wsrep_format)
+binlog_format=ROW
+
+# Currently only InnoDB storage engine is supported
+default-storage-engine=innodb
+
+# to avoid issues with 'bulk mode inserts' using autoinc
+innodb_autoinc_lock_mode=2
+
+# This is a must for paralell applying
+innodb_locks_unsafe_for_binlog=1
+
+# Query Cache is not supported with wsrep
+query_cache_size=0
+query_cache_type=0
+
+# Override bind-address
+# In some systems bind-address defaults to 127.0.0.1, and with mysqldump SST
+# it will have (most likely) disastrous consequences on donor node
+#bind-address={{ hostvars[inventory_hostname]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}
+bind-address={{ HA_VIP }}
+
+##
+## WSREP options
+##
+
+# Full path to wsrep provider library or 'none'
+wsrep_provider=/usr/lib/galera/libgalera_smm.so
+
+# Provider specific configuration options
+#wsrep_provider_options=
+
+# Logical cluster name. Should be the same for all nodes.
+wsrep_cluster_name="my_wsrep_cluster"
+
+# Group communication system handle
+wsrep_cluster_address=gcomm://{{ HA_CLUSTER[inventory_hostname] }}
+
+# Human-readable node name (non-unique). Hostname by default.
+#wsrep_node_name=
+
+# Base replication <address|hostname>[:port] of the node.
+# The values supplied will be used as defaults for state transfer receiving,
+# listening ports and so on. Default: address of the first network interface.
+wsrep_node_address={{ hostvars[inventory_hostname]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}
+
+# Address for incoming client connections. Autodetect by default.
+#wsrep_node_incoming_address=
+
+# How many threads will process writesets from other nodes
+wsrep_slave_threads=1
+
+# DBUG options for wsrep provider
+#wsrep_dbug_option
+
+# Generate fake primary keys for non-PK tables (required for multi-master
+# and parallel applying operation)
+wsrep_certify_nonPK=1
+
+# Maximum number of rows in write set
+wsrep_max_ws_rows=131072
+
+# Maximum size of write set
+wsrep_max_ws_size=1073741824
+
+# to enable debug level logging, set this to 1
+wsrep_debug=1
+
+# convert locking sessions into transactions
+wsrep_convert_LOCK_to_trx=0
+
+# how many times to retry deadlocked autocommits
+wsrep_retry_autocommit=1
+
+# change auto_increment_increment and auto_increment_offset automatically
+wsrep_auto_increment_control=1
+
+# retry autoinc insert, which failed for duplicate key error
+wsrep_drupal_282555_workaround=0
+
+# enable "strictly synchronous" semantics for read operations
+wsrep_causal_reads=0
+
+# Command to call when node status or cluster membership changes.
+# Will be passed all or some of the following options:
+# --status - new status of this node
+# --uuid - UUID of the cluster
+# --primary - whether the component is primary or not ("yes"/"no")
+# --members - comma-separated list of members
+# --index - index of this node in the list
+wsrep_notify_cmd=
+
+##
+## WSREP State Transfer options
+##
+
+# State Snapshot Transfer method
+wsrep_sst_method=rsync
+
+# Address on THIS node to receive SST at. DON'T SET IT TO DONOR ADDRESS!!!
+# (SST method dependent. Defaults to the first IP of the first interface)
+#wsrep_sst_receive_address=
+
+# SST authentication string. This will be used to send SST to joining nodes.
+# Depends on SST method. For mysqldump method it is root:<root password>
+wsrep_sst_auth={{ WSREP_SST_USER }}:{{ WSREP_SST_PASS }}
+
+# Desired SST donor name.
+#wsrep_sst_donor=
+
+# Protocol version to use
+# wsrep_protocol_version=
diff --git a/compass/deploy/ansible/roles/glance/handlers/main.yml b/compass/deploy/ansible/roles/glance/handlers/main.yml
new file mode 100644
index 000000000..d8eaa44a2
--- /dev/null
+++ b/compass/deploy/ansible/roles/glance/handlers/main.yml
@@ -0,0 +1,6 @@
+---
+- name: restart glance-api
+ service: name=glance-api state=restarted enabled=yes
+
+- name: restart glance-registry
+ service: name=glance-registry state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/glance/tasks/glance_config.yml b/compass/deploy/ansible/roles/glance/tasks/glance_config.yml
new file mode 100644
index 000000000..28392a3f4
--- /dev/null
+++ b/compass/deploy/ansible/roles/glance/tasks/glance_config.yml
@@ -0,0 +1,29 @@
+---
+- name: init glance db version
+ shell: glance-manage db_version_control 0
+
+- name: sync glance db
+ shell: sleep 15; su -s /bin/sh -c "glance-manage db_sync" glance
+ register: result
+ until: result.rc == 0
+ retries: 5
+ delay: 3
+ notify:
+ - restart glance-registry
+ - restart glance-api
+
+- meta: flush_handlers
+
+- name: place image upload script
+ template: src=image_upload.sh dest=/opt/image_upload.sh mode=0744
+
+- name: download cirros image file
+ get_url: url={{ build_in_image }} dest=/opt/{{ build_in_image_name }}
+
+- name: wait for 9292 port to become available
+ wait_for: host={{ image_host }} port=9292 delay=5
+
+- name: run image upload
+ shell: for i in {0..5}; do /opt/image_upload.sh && touch image_upload_completed; if [ $? != 0 ] ;then sleep 5; else break;fi;done
+ args:
+ creates: image_upload_completed
diff --git a/compass/deploy/ansible/roles/glance/tasks/glance_install.yml b/compass/deploy/ansible/roles/glance/tasks/glance_install.yml
new file mode 100644
index 000000000..505b3b089
--- /dev/null
+++ b/compass/deploy/ansible/roles/glance/tasks/glance_install.yml
@@ -0,0 +1,26 @@
+---
+- name: install glance packages
+ apt: name={{ item }} state=latest force=yes
+ with_items:
+ - glance
+ - python-glanceclient
+
+- name: generate glance service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - glance-registry
+ - glance-api
+
+- name: update glance conf
+ template: src={{ item }} dest=/etc/glance/{{ item }}
+ backup=yes
+ with_items:
+ - glance-api.conf
+ - glance-registry.conf
+ notify:
+ - restart glance-registry
+ - restart glance-api
+
+- name: remove default sqlite db
+ shell: rm /var/lib/glance/glance.sqlite || touch glance.sqllite.db.removed
+
diff --git a/compass/deploy/ansible/roles/glance/tasks/main.yml b/compass/deploy/ansible/roles/glance/tasks/main.yml
new file mode 100644
index 000000000..296f0dcac
--- /dev/null
+++ b/compass/deploy/ansible/roles/glance/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+- include: glance_install.yml
+ tags:
+ - install
+ - glance_install
+ - glance
+
+- include: nfs.yml
+ tags:
+ - nfs
+
+- include: glance_config.yml
+ when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == ''
+ tags:
+ - config
+ - glance_config
+ - glance
+
diff --git a/compass/deploy/ansible/roles/glance/tasks/nfs.yml b/compass/deploy/ansible/roles/glance/tasks/nfs.yml
new file mode 100644
index 000000000..c03ab4d6e
--- /dev/null
+++ b/compass/deploy/ansible/roles/glance/tasks/nfs.yml
@@ -0,0 +1,41 @@
+---
+- name: get nfs server
+ local_action: shell /sbin/ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6| grep "10" -m 1 |awk '{print $2}'|tr -d "addr:"
+ register: ip_info
+ run_once: True
+
+- name: install nfs
+ local_action: yum name=nfs-utils state=present
+ run_once: True
+
+- name: create image directory
+ local_action: file path=/opt/images state=directory mode=0777
+ run_once: True
+
+- name: update nfs config
+ local_action: lineinfile dest=/etc/exports state=present
+ regexp="/opt/images *(rw,insecure,sync,all_squash)"
+ line="/opt/images *(rw,insecure,sync,all_squash)"
+ run_once: True
+
+- name: restart nfs service
+ local_action: service name=nfs state=restarted enabled=yes
+ run_once: True
+
+- name: install nfs comm
+ apt: name=nfs-common state=present
+
+- name: get mount info
+ command: mount
+ register: mount_info
+
+- name: mount image directory
+ shell: |
+ mount -t nfs -onfsvers=3 {{ item }}:/opt/images /var/lib/glance/images
+ sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab
+ echo {{ item }}:/opt/images /var/lib/glance/images/ nfs nfsvers=3 >> /etc/fstab
+ when: mount_info.stdout.find('images') == -1
+ with_items:
+ ip_info.stdout_lines
+ retries: 5
+ delay: 3
diff --git a/compass/deploy/ansible/roles/glance/templates/glance-api.conf b/compass/deploy/ansible/roles/glance/templates/glance-api.conf
new file mode 100644
index 000000000..763539ef2
--- /dev/null
+++ b/compass/deploy/ansible/roles/glance/templates/glance-api.conf
@@ -0,0 +1,677 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+#verbose = False
+
+# Show debugging output in logs (sets DEBUG log level output)
+#debug = False
+
+# Which backend scheme should Glance use by default is not specified
+# in a request to add a new image to Glance? Known schemes are determined
+# by the known_stores option below.
+# Default: 'file'
+# "default_store" option has been moved to [glance_store] section in
+# Juno release
+
+# List of which store classes and store class locations are
+# currently known to glance at startup.
+# Existing but disabled stores:
+# glance.store.rbd.Store,
+# glance.store.s3.Store,
+# glance.store.swift.Store,
+# glance.store.sheepdog.Store,
+# glance.store.cinder.Store,
+# glance.store.gridfs.Store,
+# glance.store.vmware_datastore.Store,
+#known_stores = glance.store.filesystem.Store,
+# glance.store.http.Store
+
+
+# Maximum image size (in bytes) that may be uploaded through the
+# Glance API server. Defaults to 1 TB.
+# WARNING: this value should only be increased after careful consideration
+# and must be set to a value under 8 EB (9223372036854775808).
+#image_size_cap = 1099511627776
+
+# Address to bind the API server
+bind_host = {{ image_host }}
+
+# Port the bind the API server to
+bind_port = 9292
+
+# Log to this file. Make sure you do not set the same log file for both the API
+# and registry servers!
+#
+# If `log_file` is omitted and `use_syslog` is false, then log messages are
+# sent to stdout as a fallback.
+log_file = /var/log/glance/api.log
+
+# Backlog requests when creating socket
+backlog = 4096
+
+# TCP_KEEPIDLE value in seconds when creating socket.
+# Not supported on OS X.
+#tcp_keepidle = 600
+
+# API to use for accessing data. Default value points to sqlalchemy
+# package, it is also possible to use: glance.db.registry.api
+# data_api = glance.db.sqlalchemy.api
+
+# Number of Glance API worker processes to start.
+# On machines with more than one CPU increasing this value
+# may improve performance (especially if using SSL with
+# compression turned on). It is typically recommended to set
+# this value to the number of CPUs present on your machine.
+workers = 1
+
+# Maximum line size of message headers to be accepted.
+# max_header_line may need to be increased when using large tokens
+# (typically those generated by the Keystone v3 API with big service
+# catalogs)
+# max_header_line = 16384
+
+# Role used to identify an authenticated user as administrator
+#admin_role = admin
+
+# Allow unauthenticated users to access the API with read-only
+# privileges. This only applies when using ContextMiddleware.
+#allow_anonymous_access = False
+
+# Allow access to version 1 of glance api
+#enable_v1_api = True
+
+# Allow access to version 2 of glance api
+#enable_v2_api = True
+
+# Return the URL that references where the data is stored on
+# the backend storage system. For example, if using the
+# file system store a URL of 'file:///path/to/image' will
+# be returned to the user in the 'direct_url' meta-data field.
+# The default value is false.
+#show_image_direct_url = False
+
+# Send headers containing user and tenant information when making requests to
+# the v1 glance registry. This allows the registry to function as if a user is
+# authenticated without the need to authenticate a user itself using the
+# auth_token middleware.
+# The default value is false.
+#send_identity_headers = False
+
+# Supported values for the 'container_format' image attribute
+#container_formats=ami,ari,aki,bare,ovf,ova
+
+# Supported values for the 'disk_format' image attribute
+#disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso
+
+# Directory to use for lock files. Default to a temp directory
+# (string value). This setting needs to be the same for both
+# glance-scrubber and glance-api.
+#lock_path=<None>
+
+# Property Protections config file
+# This file contains the rules for property protections and the roles/policies
+# associated with it.
+# If this config value is not specified, by default, property protections
+# won't be enforced.
+# If a value is specified and the file is not found, then the glance-api
+# service will not start.
+#property_protection_file =
+
+# Specify whether 'roles' or 'policies' are used in the
+# property_protection_file.
+# The default value for property_protection_rule_format is 'roles'.
+#property_protection_rule_format = roles
+
+# Specifies how long (in hours) a task is supposed to live in the tasks DB
+# after succeeding or failing before getting soft-deleted.
+# The default value for task_time_to_live is 48 hours.
+# task_time_to_live = 48
+
+# This value sets what strategy will be used to determine the image location
+# order. Currently two strategies are packaged with Glance 'location_order'
+# and 'store_type'.
+#location_strategy = location_order
+
+# ================= Syslog Options ============================
+
+# Send logs to syslog (/dev/log) instead of to file specified
+# by `log_file`
+#use_syslog = False
+
+# Facility to use. If unset defaults to LOG_USER.
+#syslog_log_facility = LOG_LOCAL0
+
+# ================= SSL Options ===============================
+
+# Certificate file to use when starting API server securely
+#cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+#key_file = /path/to/keyfile
+
+# CA certificate file to use to verify connecting clients
+#ca_file = /path/to/cafile
+
+# ================= Security Options ==========================
+
+# AES key for encrypting store 'location' metadata, including
+# -- if used -- Swift or S3 credentials
+# Should be set to a random string of length 16, 24 or 32 bytes
+#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
+
+# ============ Registry Options ===============================
+
+# Address to find the registry server
+registry_host = {{ internal_ip }}
+
+# Port the registry server is listening on
+registry_port = 9191
+
+# What protocol to use when connecting to the registry server?
+# Set to https for secure HTTP communication
+registry_client_protocol = http
+
+# The path to the key file to use in SSL connections to the
+# registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file
+#registry_client_key_file = /path/to/key/file
+
+# The path to the cert file to use in SSL connections to the
+# registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file
+#registry_client_cert_file = /path/to/cert/file
+
+# The path to the certifying authority cert file to use in SSL connections
+# to the registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file
+#registry_client_ca_file = /path/to/ca/file
+
+# When using SSL in connections to the registry server, do not require
+# validation via a certifying authority. This is the registry's equivalent of
+# specifying --insecure on the command line using glanceclient for the API
+# Default: False
+#registry_client_insecure = False
+
+# The period of time, in seconds, that the API server will wait for a registry
+# request to complete. A value of '0' implies no timeout.
+# Default: 600
+#registry_client_timeout = 600
+
+# Whether to automatically create the database tables.
+# Default: False
+#db_auto_create = False
+
+# Enable DEBUG log messages from sqlalchemy which prints every database
+# query and response.
+# Default: False
+#sqlalchemy_debug = True
+
+# Pass the user's token through for API requests to the registry.
+# Default: True
+#use_user_token = True
+
+# If 'use_user_token' is not in effect then admin credentials
+# can be specified. Requests to the registry on behalf of
+# the API will use these credentials.
+# Admin user name
+#admin_user = None
+# Admin password
+#admin_password = None
+# Admin tenant name
+#admin_tenant_name = None
+# Keystone endpoint
+#auth_url = None
+# Keystone region
+#auth_region = None
+# Auth strategy
+#auth_strategy = keystone
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when images are create, updated or deleted.
+# There are three methods of sending notifications, logging (via the
+# log_file directive), rabbit (via a rabbitmq queue), qpid (via a Qpid
+# message queue), or noop (no notifications sent, the default)
+# NOTE: THIS CONFIGURATION OPTION HAS BEEN DEPRECATED IN FAVOR OF `notification_driver`
+# notifier_strategy = default
+
+# Driver or drivers to handle sending notifications
+# notification_driver = noop
+
+# Default publisher_id for outgoing notifications.
+# default_publisher_id = image.localhost
+
+# Configuration options if sending notifications via rabbitmq (these are
+# the defaults)
+rabbit_host = localhost
+rabbit_port = 5672
+rabbit_use_ssl = false
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+rabbit_virtual_host = /
+rabbit_notification_exchange = glance
+rabbit_notification_topic = notifications
+rabbit_durable_queues = False
+
+# Configuration options if sending notifications via Qpid (these are
+# the defaults)
+qpid_notification_exchange = glance
+qpid_notification_topic = notifications
+qpid_hostname = localhost
+qpid_port = 5672
+qpid_username =
+qpid_password =
+qpid_sasl_mechanisms =
+qpid_reconnect_timeout = 0
+qpid_reconnect_limit = 0
+qpid_reconnect_interval_min = 0
+qpid_reconnect_interval_max = 0
+qpid_reconnect_interval = 0
+qpid_heartbeat = 5
+# Set to 'ssl' to enable SSL
+qpid_protocol = tcp
+qpid_tcp_nodelay = True
+
+# ============ Filesystem Store Options ========================
+
+# Directory that the Filesystem backend store
+# writes image data to
+# this option has been moved to [glance_store] for Juno release
+# filesystem_store_datadir = /var/lib/glance/images/
+
+# A list of directories where image data can be stored.
+# This option may be specified multiple times for specifying multiple store
+# directories. Either one of filesystem_store_datadirs or
+# filesystem_store_datadir option is required. A priority number may be given
+# after each directory entry, separated by a ":".
+# When adding an image, the highest priority directory will be selected, unless
+# there is not enough space available in cases where the image size is already
+# known. If no priority is given, it is assumed to be zero and the directory
+# will be considered for selection last. If multiple directories have the same
+# priority, then the one with the most free space available is selected.
+# If same store is specified multiple times then BadStoreConfiguration
+# exception will be raised.
+#filesystem_store_datadirs = /var/lib/glance/images/:1
+
+# A path to a JSON file that contains metadata describing the storage
+# system. When show_multiple_locations is True the information in this
+# file will be returned with any location that is contained in this
+# store.
+#filesystem_store_metadata_file = None
+
+# ============ Swift Store Options =============================
+
+# Version of the authentication service to use
+# Valid versions are '2' for keystone and '1' for swauth and rackspace
+swift_store_auth_version = 2
+
+# Address where the Swift authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified, default to 'https://'
+# For swauth, use something like '127.0.0.1:8080/v1.0/'
+swift_store_auth_address = 127.0.0.1:5000/v2.0/
+
+# User to authenticate against the Swift authentication service
+# If you use Swift authentication service, set it to 'account':'user'
+# where 'account' is a Swift storage account and 'user'
+# is a user in that account
+swift_store_user = jdoe:jdoe
+
+# Auth key for the user authenticating against the
+# Swift authentication service
+swift_store_key = a86850deb2742ec3cb41518e26aa2d89
+
+# Container within the account that the account should use
+# for storing images in Swift
+swift_store_container = glance
+
+# Do we create the container if it does not exist?
+swift_store_create_container_on_put = False
+
+# What size, in MB, should Glance start chunking image files
+# and do a large object manifest in Swift? By default, this is
+# the maximum object size in Swift, which is 5GB
+swift_store_large_object_size = 5120
+
+# When doing a large object manifest, what size, in MB, should
+# Glance write chunks to Swift? This amount of data is written
+# to a temporary disk buffer during the process of chunking
+# the image file, and the default is 200MB
+swift_store_large_object_chunk_size = 200
+
+# Whether to use ServiceNET to communicate with the Swift storage servers.
+# (If you aren't RACKSPACE, leave this False!)
+#
+# To use ServiceNET for authentication, prefix hostname of
+# `swift_store_auth_address` with 'snet-'.
+# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
+swift_enable_snet = False
+
+# If set to True enables multi-tenant storage mode which causes Glance images
+# to be stored in tenant specific Swift accounts.
+#swift_store_multi_tenant = False
+
+# A list of swift ACL strings that will be applied as both read and
+# write ACLs to the containers created by Glance in multi-tenant
+# mode. This grants the specified tenants/users read and write access
+# to all newly created image objects. The standard swift ACL string
+# formats are allowed, including:
+# <tenant_id>:<username>
+# <tenant_name>:<username>
+# *:<username>
+# Multiple ACLs can be combined using a comma separated list, for
+# example: swift_store_admin_tenants = service:glance,*:admin
+#swift_store_admin_tenants =
+
+# The region of the swift endpoint to be used for single tenant. This setting
+# is only necessary if the tenant has multiple swift endpoints.
+#swift_store_region =
+
+# If set to False, disables SSL layer compression of https swift requests.
+# Setting to 'False' may improve performance for images which are already
+# in a compressed format, eg qcow2. If set to True, enables SSL layer
+# compression (provided it is supported by the target swift proxy).
+#swift_store_ssl_compression = True
+
+# The number of times a Swift download will be retried before the
+# request fails
+#swift_store_retry_get_count = 0
+
+# ============ S3 Store Options =============================
+
+# Address where the S3 authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified, default to 'http://'
+s3_store_host = 127.0.0.1:8080/v1.0/
+
+# User to authenticate against the S3 authentication service
+s3_store_access_key = <20-char AWS access key>
+
+# Auth key for the user authenticating against the
+# S3 authentication service
+s3_store_secret_key = <40-char AWS secret key>
+
+# Container within the account that the account should use
+# for storing images in S3. Note that S3 has a flat namespace,
+# so you need a unique bucket name for your glance images. An
+# easy way to do this is append your AWS access key to "glance".
+# S3 buckets in AWS *must* be lowercased, so remember to lowercase
+# your AWS access key if you use it in your bucket name below!
+s3_store_bucket = <lowercased 20-char aws access key>glance
+
+# Do we create the bucket if it does not exist?
+s3_store_create_bucket_on_put = False
+
+# When sending images to S3, the data will first be written to a
+# temporary buffer on disk. By default the platform's temporary directory
+# will be used. If required, an alternative directory can be specified here.
+#s3_store_object_buffer_dir = /path/to/dir
+
+# When forming a bucket url, boto will either set the bucket name as the
+# subdomain or as the first token of the path. Amazon's S3 service will
+# accept it as the subdomain, but Swift's S3 middleware requires it be
+# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'.
+#s3_store_bucket_url_format = subdomain
+
+# ============ RBD Store Options =============================
+
+# Ceph configuration file path
+# If using cephx authentication, this file should
+# include a reference to the right keyring
+# in a client.<USER> section
+#rbd_store_ceph_conf = /etc/ceph/ceph.conf
+
+# RADOS user to authenticate as (only applicable if using cephx)
+# If <None>, a default will be chosen based on the client. section
+# in rbd_store_ceph_conf
+#rbd_store_user = <None>
+
+# RADOS pool in which images are stored
+#rbd_store_pool = images
+
+# RADOS images will be chunked into objects of this size (in megabytes).
+# For best performance, this should be a power of two
+#rbd_store_chunk_size = 8
+
+# ============ Sheepdog Store Options =============================
+
+sheepdog_store_address = localhost
+
+sheepdog_store_port = 7000
+
+# Images will be chunked into objects of this size (in megabytes).
+# For best performance, this should be a power of two
+sheepdog_store_chunk_size = 64
+
+# ============ Cinder Store Options ===============================
+
+# Info to match when looking for cinder in the service catalog
+# Format is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info = volume:cinder:publicURL
+
+# Override service catalog lookup with template for cinder endpoint
+# e.g. http://localhost:8776/v1/%(project_id)s (string value)
+#cinder_endpoint_template = <None>
+
+# Region name of this node (string value)
+#os_region_name = <None>
+
+# Location of ca certicates file to use for cinder client requests
+# (string value)
+#cinder_ca_certificates_file = <None>
+
+# Number of cinderclient retries on failed http calls (integer value)
+#cinder_http_retries = 3
+
+# Allow to perform insecure SSL requests to cinder (boolean value)
+#cinder_api_insecure = False
+
+# ============ VMware Datastore Store Options =====================
+
+# ESX/ESXi or vCenter Server target system.
+# The server value can be an IP address or a DNS name
+# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com
+#vmware_server_host = <None>
+
+# Server username (string value)
+#vmware_server_username = <None>
+
+# Server password (string value)
+#vmware_server_password = <None>
+
+# Inventory path to a datacenter (string value)
+# Value optional when vmware_server_ip is an ESX/ESXi host: if specified
+# should be `ha-datacenter`.
+#vmware_datacenter_path = <None>
+
+# Datastore associated with the datacenter (string value)
+#vmware_datastore_name = <None>
+
+# The number of times we retry on failures
+# e.g., socket error, etc (integer value)
+#vmware_api_retry_count = 10
+
+# The interval used for polling remote tasks
+# invoked on VMware ESX/VC server in seconds (integer value)
+#vmware_task_poll_interval = 5
+
+# Absolute path of the folder containing the images in the datastore
+# (string value)
+#vmware_store_image_dir = /openstack_glance
+
+# Allow to perform insecure SSL requests to the target system (boolean value)
+#vmware_api_insecure = False
+
+# ============ Delayed Delete Options =============================
+
+# Turn on/off delayed delete
+delayed_delete = False
+
+# Delayed delete time in seconds
+scrub_time = 43200
+
+# Directory that the scrubber will use to remind itself of what to delete
+# Make sure this is also set in glance-scrubber.conf
+scrubber_datadir = /var/lib/glance/scrubber
+
+# =============== Quota Options ==================================
+
+# The maximum number of image members allowed per image
+#image_member_quota = 128
+
+# The maximum number of image properties allowed per image
+#image_property_quota = 128
+
+# The maximum number of tags allowed per image
+#image_tag_quota = 128
+
+# The maximum number of locations allowed per image
+#image_location_quota = 10
+
+# Set a system wide quota for every user. This value is the total number
+# of bytes that a user can use across all storage systems. A value of
+# 0 means unlimited.
+#user_storage_quota = 0
+
+# =============== Image Cache Options =============================
+
+# Base directory that the Image Cache uses
+image_cache_dir = /var/lib/glance/image-cache/
+
+# =============== Manager Options =================================
+
+# DEPRECATED. TO BE REMOVED IN THE JUNO RELEASE.
+# Whether or not to enforce that all DB tables have charset utf8.
+# If your database tables do not have charset utf8 you will
+# need to convert before this option is removed. This option is
+# only relevant if your database engine is MySQL.
+#db_enforce_mysql_charset = True
+
+# =============== Glance Store ====================================
+[glance_store]
+# Moved from [DEFAULT], for Juno release
+default_store = file
+filesystem_store_datadir = /var/lib/glance/images/
+
+# =============== Database Options =================================
+
+[database]
+# The file name to use with SQLite (string value)
+sqlite_db = /var/lib/glance/glance.sqlite
+
+# If True, SQLite uses synchronous mode (boolean value)
+#sqlite_synchronous = True
+
+# The backend to use for db (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+backend = sqlalchemy
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle sql connections are reaped (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a sql connection
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = <None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on
+# connection lost (boolean value)
+#use_db_reconnect = False
+
+# seconds between db connection retries (integer value)
+#db_retry_interval = 1
+
+# Whether to increase interval between db connection retries,
+# up to db_max_retry_interval (boolean value)
+#db_inc_retry_interval = True
+
+# max seconds between db connection retries, if
+# db_inc_retry_interval is enabled (integer value)
+#db_max_retry_interval = 10
+
+# maximum db connection retries before error is raised.
+# (setting -1 implies an infinite retry count) (integer value)
+#db_max_retries = 20
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = glance
+admin_password = {{ GLANCE_PASS }}
+
+[paste_deploy]
+# Name of the paste configuration file that defines the available pipelines
+#config_file = glance-api-paste.ini
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-api-keystone], you would configure the flavor below
+# as 'keystone'.
+flavor= keystone
+
+[store_type_location_strategy]
+# The scheme list to use to get store preference order. The scheme must be
+# registered by one of the stores defined by the 'known_stores' config option.
+# This option will be applied when you using 'store_type' option as image
+# location strategy defined by the 'location_strategy' config option.
+#store_type_preference =
diff --git a/compass/deploy/ansible/roles/glance/templates/glance-registry.conf b/compass/deploy/ansible/roles/glance/templates/glance-registry.conf
new file mode 100644
index 000000000..8d731a24f
--- /dev/null
+++ b/compass/deploy/ansible/roles/glance/templates/glance-registry.conf
@@ -0,0 +1,190 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+#verbose = False
+
+# Show debugging output in logs (sets DEBUG log level output)
+#debug = False
+
+# Address to bind the registry server
+bind_host = {{ internal_ip }}
+
+# Port the bind the registry server to
+bind_port = 9191
+
+# Log to this file. Make sure you do not set the same log file for both the API
+# and registry servers!
+#
+# If `log_file` is omitted and `use_syslog` is false, then log messages are
+# sent to stdout as a fallback.
+log_file = /var/log/glance/registry.log
+
+# Backlog requests when creating socket
+backlog = 4096
+
+# TCP_KEEPIDLE value in seconds when creating socket.
+# Not supported on OS X.
+#tcp_keepidle = 600
+
+# API to use for accessing data. Default value points to sqlalchemy
+# package.
+#data_api = glance.db.sqlalchemy.api
+
+# Enable Registry API versions individually or simultaneously
+#enable_v1_registry = True
+#enable_v2_registry = True
+
+# Limit the api to return `param_limit_max` items in a call to a container. If
+# a larger `limit` query param is provided, it will be reduced to this value.
+api_limit_max = 1000
+
+# If a `limit` query param is not provided in an api request, it will
+# default to `limit_param_default`
+limit_param_default = 25
+
+# Role used to identify an authenticated user as administrator
+#admin_role = admin
+
+# Whether to automatically create the database tables.
+# Default: False
+#db_auto_create = False
+
+# Enable DEBUG log messages from sqlalchemy which prints every database
+# query and response.
+# Default: False
+#sqlalchemy_debug = True
+
+# ================= Syslog Options ============================
+
+# Send logs to syslog (/dev/log) instead of to file specified
+# by `log_file`
+#use_syslog = False
+
+# Facility to use. If unset defaults to LOG_USER.
+#syslog_log_facility = LOG_LOCAL1
+
+# ================= SSL Options ===============================
+
+# Certificate file to use when starting registry server securely
+#cert_file = /path/to/certfile
+
+# Private key file to use when starting registry server securely
+#key_file = /path/to/keyfile
+
+# CA certificate file to use to verify connecting clients
+#ca_file = /path/to/cafile
+
+# ================= Database Options ==========================
+
+[database]
+# The file name to use with SQLite (string value)
+sqlite_db = /var/lib/glance/glance.sqlite
+
+# If True, SQLite uses synchronous mode (boolean value)
+#sqlite_synchronous = True
+
+# The backend to use for db (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+backend = sqlalchemy
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle sql connections are reaped (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a sql connection
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = <None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on
+# connection lost (boolean value)
+#use_db_reconnect = False
+
+# seconds between db connection retries (integer value)
+#db_retry_interval = 1
+
+# Whether to increase interval between db connection retries,
+# up to db_max_retry_interval (boolean value)
+#db_inc_retry_interval = True
+
+# max seconds between db connection retries, if
+# db_inc_retry_interval is enabled (integer value)
+#db_max_retry_interval = 10
+
+# maximum db connection retries before error is raised.
+# (setting -1 implies an infinite retry count) (integer value)
+#db_max_retries = 20
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = glance
+admin_password = {{ GLANCE_PASS }}
+
+[paste_deploy]
+# Name of the paste configuration file that defines the available pipelines
+#config_file = glance-registry-paste.ini
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-registry-keystone], you would configure the flavor below
+# as 'keystone'.
+flavor= keystone
diff --git a/compass/deploy/ansible/roles/glance/templates/image_upload.sh b/compass/deploy/ansible/roles/glance/templates/image_upload.sh
new file mode 100644
index 000000000..9dd1fa8d3
--- /dev/null
+++ b/compass/deploy/ansible/roles/glance/templates/image_upload.sh
@@ -0,0 +1,2 @@
+sleep 10
+glance --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ HA_VIP }}:35357/v2.0 image-create --name="cirros" --disk-format=qcow2 --container-format=bare --is-public=true < /opt/{{ build_in_image_name }} && touch glance.import.completed
diff --git a/compass/deploy/ansible/roles/ha/files/galera_chk b/compass/deploy/ansible/roles/ha/files/galera_chk
new file mode 100644
index 000000000..9fd165c00
--- /dev/null
+++ b/compass/deploy/ansible/roles/ha/files/galera_chk
@@ -0,0 +1,10 @@
+#! /bin/sh
+
+code=`mysql -uroot -e "show status" | awk '/Threads_running/{print $2}'`
+
+if [ "$code"=="1" ]
+then
+ echo "HTTP/1.1 200 OK\r\n"
+else
+ echo "HTTP/1.1 503 Service Unavailable\r\n"
+fi
diff --git a/compass/deploy/ansible/roles/ha/files/mysqlchk b/compass/deploy/ansible/roles/ha/files/mysqlchk
new file mode 100644
index 000000000..2c03f19f4
--- /dev/null
+++ b/compass/deploy/ansible/roles/ha/files/mysqlchk
@@ -0,0 +1,15 @@
+# default: off
+# description: An xinetd internal service which echo's characters back to
+# clients.
+# This is the tcp version.
+service mysqlchk
+{
+ disable = no
+ flags = REUSE
+ socket_type = stream
+ protocol = tcp
+ user = root
+ wait = no
+ server = /usr/local/bin/galera_chk
+ port = 9200
+}
diff --git a/compass/deploy/ansible/roles/ha/files/notify.sh b/compass/deploy/ansible/roles/ha/files/notify.sh
new file mode 100644
index 000000000..5edffe84b
--- /dev/null
+++ b/compass/deploy/ansible/roles/ha/files/notify.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+python /usr/local/bin/failover.py $1
+mysql -uroot -e"flush hosts"
+service mysql restart
diff --git a/compass/deploy/ansible/roles/ha/handlers/main.yml b/compass/deploy/ansible/roles/ha/handlers/main.yml
new file mode 100644
index 000000000..a02c686bf
--- /dev/null
+++ b/compass/deploy/ansible/roles/ha/handlers/main.yml
@@ -0,0 +1,9 @@
+---
+- name: restart haproxy
+ service: name=haproxy state=restarted enabled=yes
+
+- name: restart xinetd
+ service: name=xinetd state=restarted enabled=yes
+
+- name: restart keepalived
+ service: name=keepalived state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/ha/tasks/main.yml b/compass/deploy/ansible/roles/ha/tasks/main.yml
new file mode 100644
index 000000000..a00c21ac4
--- /dev/null
+++ b/compass/deploy/ansible/roles/ha/tasks/main.yml
@@ -0,0 +1,94 @@
+---
+- name: install keepalived xinet haproxy
+ apt: name={{ item }} state=present
+ with_items:
+ - keepalived
+ - xinetd
+ - haproxy
+
+- name: generate ha service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - keepalived
+ - xinetd
+ - haproxy
+
+- name: install pexpect
+ pip: name=pexpect state=present
+
+- name: activate ip_nonlocal_bind
+ sysctl: name=net.ipv4.ip_nonlocal_bind value=1
+ state=present reload=yes
+
+- name: set net.ipv4.tcp_keepalive_intvl
+ sysctl: name=net.ipv4.tcp_keepalive_intvl value=1
+ state=present reload=yes
+
+- name: set net.ipv4.tcp_keepalive_probes
+ sysctl: name=net.ipv4.tcp_keepalive_probes value=5
+ state=present reload=yes
+
+- name: set net.ipv4.tcp_keepalive_time
+ sysctl: name=net.ipv4.tcp_keepalive_time value=5
+ state=present reload=yes
+
+- name: update haproxy cfg
+ template: src=haproxy.cfg dest=/etc/haproxy/haproxy.cfg
+ notify: restart haproxy
+
+- name: set haproxy enable flag
+ lineinfile: dest=/etc/default/haproxy state=present
+ regexp="ENABLED=*"
+ line="ENABLED=1"
+ notify: restart haproxy
+
+- name: set haproxy log
+ lineinfile: dest=/etc/rsyslog.conf state=present
+ regexp="local0.* /var/log/haproxy.log"
+ line="local0.* /var/log/haproxy.log"
+
+- name: set rsyslog udp module
+ lineinfile: dest=/etc/rsyslog.conf state=present
+ regexp="^#$ModLoad imudp"
+ line="$ModLoad imudp"
+
+- name: set rsyslog udp port
+ lineinfile: dest=/etc/rsyslog.conf state=present
+ regexp="^#$UDPServerRun 514"
+ line="$UDPServerRun 514"
+
+- name: copy galera_chk file
+ copy: src=galera_chk dest=/usr/local/bin/galera_chk mode=0777
+
+- name: copy notify file
+ copy: src=notify.sh dest=/usr/local/bin/notify.sh mode=0777
+
+- name: copy notify template file
+ template: src=failover.j2 dest=/usr/local/bin/failover.py mode=0777
+
+- name: add network service
+ lineinfile: dest=/etc/services state=present
+ line="mysqlchk 9200/tcp"
+ insertafter="Local services"
+ notify: restart xinetd
+
+- name: copy mysqlchk file
+ copy: src=mysqlchk dest=/etc/xinetd.d/mysqlchk mode=0777
+ notify: restart xinetd
+
+- name: set keepalived start param
+ lineinfile: dest=/etc/default/keepalived state=present
+ regexp="^DAEMON_ARGS=*"
+ line="DAEMON_ARGS=\"-D -d -S 1\""
+
+- name: set keepalived log
+ lineinfile: dest=/etc/rsyslog.conf state=present
+ regexp="local1.* /var/log/keepalived.log"
+ line="local1.* /var/log/keepalived.log"
+
+- name: update keepalived info
+ template: src=keepalived.conf dest=/etc/keepalived/keepalived.conf
+ notify: restart keepalived
+
+- name: restart rsyslog
+ shell: service rsyslog restart
diff --git a/compass/deploy/ansible/roles/ha/templates/failover.j2 b/compass/deploy/ansible/roles/ha/templates/failover.j2
new file mode 100644
index 000000000..b03c7375d
--- /dev/null
+++ b/compass/deploy/ansible/roles/ha/templates/failover.j2
@@ -0,0 +1,65 @@
+import ConfigParser, os, socket
+import logging as LOG
+import pxssh
+import sys
+import re
+
+LOG_FILE="/var/log/mysql_failover"
+try:
+ os.remove(LOG_FILE)
+except:
+ pass
+
+LOG.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename=LOG_FILE,level=LOG.DEBUG)
+ha_vip = {{ HA_VIP }}
+LOG.info("ha_vip: %s" % ha_vip)
+
+#ha_vip = "10.1.0.50"
+galera_path = '/etc/mysql/conf.d/wsrep.cnf'
+pattern = re.compile(r"gcomm://(?P<prev_ip>.*)")
+
+def ssh_get_hostname(ip):
+ try:
+ s = pxssh.pxssh()
+ s.login("%s" % ip, "root", "root")
+ s.sendline('hostname') # run a command
+ s.prompt() # match the prompt
+ result = s.before.strip() # print everything before the prompt.
+ return result.split(os.linesep)[1]
+ except pxssh.ExceptionPxssh as e:
+ LOG.error("pxssh failed on login.")
+ raise
+
+def failover(mode):
+ config = ConfigParser.ConfigParser()
+ config.optionxform = str
+ config.readfp(open(galera_path))
+ wsrep_cluster_address = config.get("mysqld", "wsrep_cluster_address")
+ wsrep_cluster_address = pattern.match(wsrep_cluster_address).groupdict()["prev_ip"]
+
+ LOG.info("old wsrep_cluster_address = %s" % wsrep_cluster_address)
+
+ if mode == "master":
+ # refresh wsrep_cluster_address to null
+ LOG.info("I'm being master, set wsrep_cluster_address to null")
+ wsrep_cluster_address = ""
+
+ elif mode == "backup":
+ # refresh wsrep_cluster_address to master int ip
+ hostname = ssh_get_hostname(ha_vip)
+ wsrep_cluster_address = socket.gethostbyname(hostname)
+ LOG.info("I'm being slave, set wsrep_cluster_address to master internal ip")
+
+ LOG.info("new wsrep_cluster_address = %s" % wsrep_cluster_address)
+ wsrep_cluster_address = "gcomm://%s" % wsrep_cluster_address
+ config.set("mysqld", "wsrep_cluster_address", wsrep_cluster_address)
+ with open(galera_path, 'wb') as fp:
+ #config.write(sys.stdout)
+ config.write(fp)
+
+ os.system("service mysql restart")
+ LOG.info("failover success!!!")
+
+if __name__ == "__main__":
+ LOG.debug("call me: %s" % sys.argv)
+ failover(sys.argv[1])
diff --git a/compass/deploy/ansible/roles/ha/templates/haproxy.cfg b/compass/deploy/ansible/roles/ha/templates/haproxy.cfg
new file mode 100644
index 000000000..4ed528ad6
--- /dev/null
+++ b/compass/deploy/ansible/roles/ha/templates/haproxy.cfg
@@ -0,0 +1,133 @@
+
+global
+ #chroot /var/run/haproxy
+ daemon
+ user haproxy
+ group haproxy
+ maxconn 4000
+ pidfile /var/run/haproxy/haproxy.pid
+ #log 127.0.0.1 local0
+ tune.bufsize 1000000
+ stats socket /var/run/haproxy.sock
+ stats timeout 2m
+
+defaults
+ log global
+ maxconn 8000
+ option redispatch
+ option dontlognull
+ option splice-auto
+ timeout http-request 10s
+ timeout queue 1m
+ timeout connect 10s
+ timeout client 6m
+ timeout server 6m
+ timeout check 10s
+ retries 5
+
+listen proxy-glance_registry_cluster
+ bind {{ HA_VIP }}:9191
+ option tcpka
+ option tcplog
+ balance source
+{% for host in groups['controller'] %}
+ server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:9191 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-glance_api_cluster
+ bind {{ HA_VIP }}:9292
+ option tcpka
+ option httpchk
+ option tcplog
+ balance source
+{% for host in groups['controller'] %}
+ server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:9292 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-nova-novncproxy
+ bind {{ HA_VIP }}:6080
+ option tcpka
+ option tcplog
+ balance source
+{% for host in groups['controller'] %}
+ server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:6080 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-network
+ bind {{ HA_VIP }}:9696
+ option tcpka
+ option tcplog
+ balance source
+{% for host in groups['controller'] %}
+ server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:9696 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-volume
+ bind {{ HA_VIP }}:8776
+ option tcpka
+ option httpchk
+ option tcplog
+ balance source
+{% for host in groups['controller'] %}
+ server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8776 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-keystone_admin_cluster
+ bind {{ HA_VIP }}:35357
+ option tcpka
+ option httpchk
+ option tcplog
+ balance source
+{% for host in groups['controller'] %}
+ server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:35357 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-keystone_public_internal_cluster
+ bind {{ HA_VIP }}:5000
+ option tcpka
+ option httpchk
+ option tcplog
+ balance source
+{% for host in groups['controller'] %}
+ server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:5000 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-nova_compute_api_cluster
+ bind {{ HA_VIP }}:8774
+ mode tcp
+ option httpchk
+ option tcplog
+ balance source
+{% for host in groups['controller'] %}
+ server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8774 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-nova_metadata_api_cluster
+ bind {{ HA_VIP }}:8775
+ option tcpka
+ option tcplog
+ balance source
+{% for host in groups['controller'] %}
+ server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8775 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-cinder_api_cluster
+ bind {{ HA_VIP }}:8776
+ mode tcp
+ option httpchk
+ option tcplog
+ balance source
+{% for host in groups['controller'] %}
+ server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8776 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen stats
+ mode http
+ bind 0.0.0.0:8888
+ stats enable
+ stats refresh 30s
+ stats uri /
+ stats realm Global\ statistics
+ stats auth admin:admin
+
+
diff --git a/compass/deploy/ansible/roles/ha/templates/keepalived.conf b/compass/deploy/ansible/roles/ha/templates/keepalived.conf
new file mode 100644
index 000000000..0b491376e
--- /dev/null
+++ b/compass/deploy/ansible/roles/ha/templates/keepalived.conf
@@ -0,0 +1,42 @@
+global_defs {
+
+ notification_email{
+ root@huawei.com
+ }
+
+ notification_email_from keepalived@huawei.com
+
+ smtp_server localhost
+
+ smtp_connect_timeout 30
+
+ router_id NodeA
+
+}
+
+vrrp_instance VI_1 {
+
+ interface {{ INTERNAL_INTERFACE }}
+ virtual_router_id 51
+ state BACKUP
+ nopreempt
+ advert_int 1
+{% for host in groups['controller'] %}
+{% if host == inventory_hostname %}
+ priority {{ 100 - loop.index0 * 5 }}
+{% endif %}
+{% endfor %}
+
+ authentication {
+ auth_type PASS
+ auth_pass 1111
+ }
+
+ virtual_ipaddress {
+ {{ HA_VIP }} dev {{ INTERNAL_INTERFACE }}
+ }
+
+ notify_master "/usr/local/bin/notify.sh master"
+ notify_backup "/usr/local/bin/notify.sh backup"
+}
+
diff --git a/compass/deploy/ansible/roles/keystone/tasks/keystone_config.yml b/compass/deploy/ansible/roles/keystone/tasks/keystone_config.yml
new file mode 100644
index 000000000..3203b2631
--- /dev/null
+++ b/compass/deploy/ansible/roles/keystone/tasks/keystone_config.yml
@@ -0,0 +1,16 @@
+---
+- name: keystone-manage db-sync
+ shell: su -s /bin/sh -c "keystone-manage db_sync"
+ register: result
+ until: result.rc == 0
+ retries: 5
+ delay: 3
+
+- name: place keystone init script under /opt/
+ template: src=keystone_init dest=/opt/keystone_init mode=0744
+
+- name: run keystone_init
+ shell: /opt/keystone_init && touch keystone_init_complete || keystone_init_failed
+ args:
+ creates: keystone_init_complete
+
diff --git a/compass/deploy/ansible/roles/keystone/tasks/keystone_install.yml b/compass/deploy/ansible/roles/keystone/tasks/keystone_install.yml
new file mode 100644
index 000000000..e69c06954
--- /dev/null
+++ b/compass/deploy/ansible/roles/keystone/tasks/keystone_install.yml
@@ -0,0 +1,29 @@
+---
+- name: install keystone packages
+ apt: name=keystone state=present force=yes
+
+- name: generate keystone service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - keystone
+
+- name: update keystone conf
+ template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes
+
+- name: delete sqlite database
+ shell: rm /var/lib/keystone/keystone.db || echo sqllite database already removed
+
+- name: cron job to purge expired tokens hourly
+ shell: (crontab -l -u keystone 2>&1 | grep -q token_flush) || echo '@hourly /usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1' >> /var/spool/cron/crontabs/keystone
+
+- name: modify keystone cron rights
+ file: path=/var/spool/cron/crontabs/keystone mode=0600
+
+- name: keystone source files
+ template: src={{ item }} dest=/opt/{{ item }}
+ with_items:
+ - admin-openrc.sh
+ - demo-openrc.sh
+
+- name: manually start keystone
+ service: name=keystone state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/keystone/tasks/main.yml b/compass/deploy/ansible/roles/keystone/tasks/main.yml
new file mode 100644
index 000000000..2f36e9119
--- /dev/null
+++ b/compass/deploy/ansible/roles/keystone/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+- include: keystone_install.yml
+ tags:
+ - install
+ - keystone_install
+ - keystone
+
+- include: keystone_config.yml
+ when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == ''
+ tags:
+ - config
+ - keystone_config
+ - keystone
diff --git a/compass/deploy/ansible/roles/keystone/templates/admin-openrc.sh b/compass/deploy/ansible/roles/keystone/templates/admin-openrc.sh
new file mode 100644
index 000000000..f2e0d615b
--- /dev/null
+++ b/compass/deploy/ansible/roles/keystone/templates/admin-openrc.sh
@@ -0,0 +1,6 @@
+# Verify the Identity Service installation
+export OS_PASSWORD={{ ADMIN_PASS }}
+export OS_TENANT_NAME=admin
+export OS_AUTH_URL=http://{{ HA_VIP }}:35357/v2.0
+export OS_USERNAME=ADMIN
+
diff --git a/compass/deploy/ansible/roles/keystone/templates/demo-openrc.sh b/compass/deploy/ansible/roles/keystone/templates/demo-openrc.sh
new file mode 100644
index 000000000..8bdc51ba7
--- /dev/null
+++ b/compass/deploy/ansible/roles/keystone/templates/demo-openrc.sh
@@ -0,0 +1,5 @@
+export OS_USERNAME=demo
+export OS_PASSWORD={{ DEMO_PASS }}
+export OS_TENANT_NAME=demo
+export OS_AUTH_URL=http://{{ HA_VIP }}:35357/v2.0
+
diff --git a/compass/deploy/ansible/roles/keystone/templates/keystone.conf b/compass/deploy/ansible/roles/keystone/templates/keystone.conf
new file mode 100644
index 000000000..fc8bf1f1c
--- /dev/null
+++ b/compass/deploy/ansible/roles/keystone/templates/keystone.conf
@@ -0,0 +1,1317 @@
+[DEFAULT]
+
+admin_token={{ ADMIN_TOKEN }}
+
+public_bind_host= {{ identity_host }}
+
+admin_bind_host= {{ identity_host }}
+
+#compute_port=8774
+
+#admin_port=35357
+
+#public_port=5000
+
+# The base public endpoint URL for keystone that are
+# advertised to clients (NOTE: this does NOT affect how
+# keystone listens for connections) (string value).
+# Defaults to the base host URL of the request. Eg a
+# request to http://server:5000/v2.0/users will
+# default to http://server:5000. You should only need
+# to set this value if the base URL contains a path
+# (eg /prefix/v2.0) or the endpoint should be found on
+# a different server.
+#public_endpoint=http://localhost:%(public_port)s/
+
+# The base admin endpoint URL for keystone that are advertised
+# to clients (NOTE: this does NOT affect how keystone listens
+# for connections) (string value).
+# Defaults to the base host URL of the request. Eg a
+# request to http://server:35357/v2.0/users will
+# default to http://server:35357. You should only need
+# to set this value if the base URL contains a path
+# (eg /prefix/v2.0) or the endpoint should be found on
+# a different server.
+#admin_endpoint=http://localhost:%(admin_port)s/
+
+# onready allows you to send a notification when the process
+# is ready to serve For example, to have it notify using
+# systemd, one could set shell command: "onready = systemd-
+# notify --ready" or a module with notify() method: "onready =
+# keystone.common.systemd". (string value)
+#onready=<None>
+
+# enforced by optional sizelimit middleware
+# (keystone.middleware:RequestBodySizeLimiter). (integer
+# value)
+#max_request_body_size=114688
+
+# limit the sizes of user & tenant ID/names. (integer value)
+#max_param_size=64
+
+# similar to max_param_size, but provides an exception for
+# token values. (integer value)
+#max_token_size=8192
+
+# During a SQL upgrade member_role_id will be used to create a
+# new role that will replace records in the
+# user_tenant_membership table with explicit role grants.
+# After migration, the member_role_id will be used in the API
+# add_user_to_project. (string value)
+#member_role_id=9fe2ff9ee4384b1894a90878d3e92bab
+
+# During a SQL upgrade member_role_id will be used to create a
+# new role that will replace records in the
+# user_tenant_membership table with explicit role grants.
+# After migration, member_role_name will be ignored. (string
+# value)
+#member_role_name=_member_
+
+# The value passed as the keyword "rounds" to passlib encrypt
+# method. (integer value)
+#crypt_strength=40000
+
+# Set this to True if you want to enable TCP_KEEPALIVE on
+# server sockets i.e. sockets used by the keystone wsgi server
+# for client connections. (boolean value)
+#tcp_keepalive=false
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server
+# socket. Only applies if tcp_keepalive is True. Not supported
+# on OS X. (integer value)
+#tcp_keepidle=600
+
+# The maximum number of entities that will be returned in a
+# collection can be set with list_limit, with no limit set by
+# default. This global limit may be then overridden for a
+# specific driver, by specifying a list_limit in the
+# appropriate section (e.g. [assignment]). (integer value)
+#list_limit=<None>
+
+# Set this to false if you want to enable the ability for
+# user, group and project entities to be moved between domains
+# by updating their domain_id. Allowing such movement is not
+# recommended if the scope of a domain admin is being
+# restricted by use of an appropriate policy file (see
+# policy.v3cloudsample as an example). (boolean value)
+#domain_id_immutable=true
+
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in amqp. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in amqp. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size=30
+
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call. (list value)
+#allowed_rpc_exception_modules=oslo.messaging.exceptions,nova.exception,cinder.exception,exceptions
+# Qpid broker hostname. (string value)
+#qpid_hostname=localhost
+
+# Qpid broker port. (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs. (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+#qpid_username=
+
+# Password for Qpid connection. (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth.
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats. (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+#qpid_protocol=tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+#qpid_tcp_nodelay=true
+
+# The qpid topology version to use. Version 1 is what was
+# originally used by impl_qpid. Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+# SSL version to use (valid only if SSL enabled). valid values
+# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
+# distributions. (string value)
+#kombu_ssl_version=
+
+# SSL key file (valid only if SSL enabled). (string value)
+#kombu_ssl_keyfile=
+
+# SSL cert file (valid only if SSL enabled). (string value)
+#kombu_ssl_certfile=
+
+# SSL certification authority file (valid only if SSL
+# enabled). (string value)
+#kombu_ssl_ca_certs=
+
+# How long to wait before reconnecting in response to an AMQP
+# consumer cancel notification. (floating point value)
+#kombu_reconnect_delay=1.0
+
+# The RabbitMQ broker address where a single node is used.
+# (string value)
+#rabbit_host=localhost
+
+# The RabbitMQ broker port where a single node is used.
+# (integer value)
+#rabbit_port=5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+#rabbit_use_ssl=false
+
+# The RabbitMQ userid. (string value)
+rabbit_userid={{ RABBIT_USER }}
+
+# The RabbitMQ password. (string value)
+rabbit_password={{ RABBIT_PASS }}
+
+# the RabbitMQ login method (string value)
+#rabbit_login_method=AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+#rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ. (integer
+# value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to
+# RabbitMQ. (integer value)
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
+
+# If passed, use a fake RabbitMQ provider. (boolean value)
+#fake_rabbit=false
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=keystone
+
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
+
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+# Host to locate redis. (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server (optional). (string value)
+#password=<None>
+
+# Size of RPC greenthread pool. (integer value)
+#rpc_thread_pool_size=64
+
+# Driver or drivers to handle sending notifications. (multi
+# valued)
+#notification_driver=
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+#notification_topics=notifications
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout=60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend
+# option and driver specific configuration. (string value)
+#transport_url=<None>
+
+# The messaging driver to use, defaults to rabbit. Other
+# drivers include qpid and zmq. (string value)
+#rpc_backend=rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the
+# transport_url option. (string value)
+#control_exchange=openstack
+
+
+#
+# Options defined in keystone.notifications
+#
+
+# Default publisher_id for outgoing notifications (string
+# value)
+#default_publisher_id=<None>
+
+
+#
+# Options defined in keystone.middleware.ec2_token
+#
+
+# URL to get token from ec2 request. (string value)
+#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens
+
+# Required if EC2 server requires client certificate. (string
+# value)
+#keystone_ec2_keyfile=<None>
+
+# Client certificate key filename. Required if EC2 server
+# requires client certificate. (string value)
+#keystone_ec2_certfile=<None>
+
+# A PEM encoded certificate authority to use when verifying
+# HTTPS connections. Defaults to the system CAs. (string
+# value)
+#keystone_ec2_cafile=<None>
+
+# Disable SSL certificate verification. (boolean value)
+#keystone_ec2_insecure=false
+
+
+#
+# Options defined in keystone.openstack.common.eventlet_backdoor
+#
+
+# Enable eventlet backdoor. Acceptable values are 0, <port>,
+# and <start>:<end>, where 0 results in listening on a random
+# tcp port number; <port> results in listening on the
+# specified port number (and not enabling backdoor if that
+# port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range
+# of port numbers. The chosen port is displayed in the
+# service's log file. (string value)
+#backdoor_port=<None>
+
+
+#
+# Options defined in keystone.openstack.common.lockutils
+#
+
+# Whether to disable inter-process locks (boolean value)
+#disable_process_locking=false
+
+# Directory to use for lock files. (string value)
+#lock_path=<None>
+
+
+#
+# Options defined in keystone.openstack.common.log
+#
+
+# Print debugging output (set logging level to DEBUG instead
+# of default WARNING level). (boolean value)
+debug={{ DEBUG }}
+
+# Print more verbose output (set logging level to INFO instead
+# of default WARNING level). (boolean value)
+verbose={{ VERBOSE }}
+
+# Log output to standard error (boolean value)
+#use_stderr=true
+
+# Format string to use for log messages with context (string
+# value)
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages without context
+# (string value)
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG (string
+# value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format
+# (string value)
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs (list value)
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN
+
+# Publish error events (boolean value)
+#publish_errors=false
+
+# Make deprecations fatal (boolean value)
+#fatal_deprecations=false
+
+# If an instance is passed with the log message, format it
+# like this (string value)
+#instance_format="[instance: %(uuid)s] "
+
+# If an instance UUID is passed with the log message, format
+# it like this (string value)
+#instance_uuid_format="[instance: %(uuid)s] "
+
+# The name of logging configuration file. It does not disable
+# existing loggers, but just appends specified logging
+# configuration to any other existing logging options. Please
+# see the Python logging module documentation for details on
+# logging configuration files. (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append=<None>
+
+# DEPRECATED. A logging.Formatter log message format string
+# which may use any of the available logging.LogRecord
+# attributes. This option is deprecated. Please use
+# logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format=<None>
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is
+# set, logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file=<None>
+
+# (Optional) The base directory used for relative --log-file
+# paths (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+log_dir = /var/log/keystone
+
+# Use syslog for logging. Existing syslog format is DEPRECATED
+# during I, and then will be changed in J to honor RFC5424
+# (boolean value)
+#use_syslog=false
+
+# (Optional) Use syslog rfc5424 format for logging. If
+# enabled, will add APP-NAME (RFC5424) before the MSG part of
+# the syslog message. The old format without APP-NAME is
+# deprecated in I, and will be removed in J. (boolean value)
+#use_syslog_rfc_format=false
+
+# Syslog facility to receive log lines (string value)
+#syslog_log_facility=LOG_USER
+
+
+#
+# Options defined in keystone.openstack.common.policy
+#
+
+# JSON file containing policy (string value)
+#policy_file=policy.json
+
+# Rule enforced when requested rule is not found (string
+# value)
+#policy_default_rule=default
+
+
+[assignment]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Assignment backend driver. (string value)
+#driver=<None>
+
+# Toggle for assignment caching. This has no effect unless
+# global caching is enabled. (boolean value)
+#caching=true
+
+# TTL (in seconds) to cache assignment data. This has no
+# effect unless global caching is enabled. (integer value)
+#cache_time=<None>
+
+# Maximum number of entities that will be returned in an
+# assignment collection. (integer value)
+#list_limit=<None>
+
+
+[auth]
+
+#
+# Options defined in keystone
+#
+
+# Default auth methods. (list value)
+#methods=external,password,token
+
+# The password auth plugin module. (string value)
+#password=keystone.auth.plugins.password.Password
+
+# The token auth plugin module. (string value)
+#token=keystone.auth.plugins.token.Token
+
+# The external (REMOTE_USER) auth plugin module. (string
+# value)
+#external=keystone.auth.plugins.external.DefaultDomain
+
+
+[cache]
+
+#
+# Options defined in keystone
+#
+
+# Prefix for building the configuration dictionary for the
+# cache region. This should not need to be changed unless
+# there is another dogpile.cache region with the same
+# configuration name. (string value)
+#config_prefix=cache.keystone
+
+# Default TTL, in seconds, for any cached item in the
+# dogpile.cache region. This applies to any cached method that
+# doesn't have an explicit cache expiration time defined for
+# it. (integer value)
+#expiration_time=600
+
+# Dogpile.cache backend module. It is recommended that
+# Memcache (dogpile.cache.memcache) or Redis
+# (dogpile.cache.redis) be used in production deployments.
+# Small workloads (single process) like devstack can use the
+# dogpile.cache.memory backend. (string value)
+#backend=keystone.common.cache.noop
+
+# Use a key-mangling function (sha1) to ensure fixed length
+# cache-keys. This is toggle-able for debugging purposes, it
+# is highly recommended to always leave this set to True.
+# (boolean value)
+#use_key_mangler=true
+
+# Arguments supplied to the backend module. Specify this
+# option once per argument to be passed to the dogpile.cache
+# backend. Example format: "<argname>:<value>". (multi valued)
+#backend_argument=
+
+# Proxy Classes to import that will affect the way the
+# dogpile.cache backend functions. See the dogpile.cache
+# documentation on changing-backend-behavior. Comma delimited
+# list e.g. my.dogpile.proxy.Class, my.dogpile.proxyClass2.
+# (list value)
+#proxies=
+
+# Global toggle for all caching using the should_cache_fn
+# mechanism. (boolean value)
+#enabled=false
+
+# Extra debugging from the cache backend (cache keys,
+# get/set/delete/etc calls) This is only really useful if you
+# need to see the specific cache-backend get/set/delete calls
+# with the keys/values. Typically this should be left set to
+# False. (boolean value)
+#debug_cache_backend=false
+
+
+[catalog]
+
+#
+# Options defined in keystone
+#
+
+# Catalog template file name for use with the template catalog
+# backend. (string value)
+#template_file=default_catalog.templates
+
+# Keystone catalog backend driver. (string value)
+#driver=keystone.catalog.backends.sql.Catalog
+
+# Maximum number of entities that will be returned in a
+# catalog collection. (integer value)
+#list_limit=<None>
+
+
+[credential]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Credential backend driver. (string value)
+#driver=keystone.credential.backends.sql.Credential
+
+
+[database]
+
+#
+# Options defined in keystone.openstack.common.db.options
+#
+
+# The file name to use with SQLite (string value)
+#sqlite_db=keystone.sqlite
+
+# If True, SQLite uses synchronous mode (boolean value)
+#sqlite_synchronous=true
+
+# The backend to use for db (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend=sqlalchemy
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection=<None>
+connection = mysql://keystone:{{ KEYSTONE_DBPASS }}@{{ db_host }}/keystone
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode=TRADITIONAL
+
+# Timeout before idle sql connections are reaped (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size=<None>
+
+# Maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries=10
+
+# Interval between retries of opening a sql connection
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval=10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug=0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace=false
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout=<None>
+
+# Enable the experimental use of database reconnect on
+# connection lost (boolean value)
+#use_db_reconnect=false
+
+# seconds between db connection retries (integer value)
+#db_retry_interval=1
+
+# Whether to increase interval between db connection retries,
+# up to db_max_retry_interval (boolean value)
+#db_inc_retry_interval=true
+
+# max seconds between db connection retries, if
+# db_inc_retry_interval is enabled (integer value)
+#db_max_retry_interval=10
+
+# maximum db connection retries before error is raised.
+# (setting -1 implies an infinite retry count) (integer value)
+#db_max_retries=20
+
+
+[ec2]
+
+#
+# Options defined in keystone
+#
+
+# Keystone EC2Credential backend driver. (string value)
+#driver=keystone.contrib.ec2.backends.kvs.Ec2
+
+
+[endpoint_filter]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Endpoint Filter backend driver (string value)
+#driver=keystone.contrib.endpoint_filter.backends.sql.EndpointFilter
+
+# Toggle to return all active endpoints if no filter exists.
+# (boolean value)
+#return_all_endpoints_if_no_filter=true
+
+
+[federation]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Federation backend driver. (string value)
+#driver=keystone.contrib.federation.backends.sql.Federation
+
+# Value to be used when filtering assertion parameters from
+# the environment. (string value)
+#assertion_prefix=
+
+
+[identity]
+
+#
+# Options defined in keystone
+#
+
+# This references the domain to use for all Identity API v2
+# requests (which are not aware of domains). A domain with
+# this ID will be created for you by keystone-manage db_sync
+# in migration 008. The domain referenced by this ID cannot
+# be deleted on the v3 API, to prevent accidentally breaking
+# the v2 API. There is nothing special about this domain,
+# other than the fact that it must exist to order to maintain
+# support for your v2 clients. (string value)
+#default_domain_id=default
+
+# A subset (or all) of domains can have their own identity
+# driver, each with their own partial configuration file in a
+# domain configuration directory. Only values specific to the
+# domain need to be placed in the domain specific
+# configuration file. This feature is disabled by default; set
+# to True to enable. (boolean value)
+#domain_specific_drivers_enabled=false
+
+# Path for Keystone to locate the domain specificidentity
+# configuration files if domain_specific_drivers_enabled is
+# set to true. (string value)
+#domain_config_dir=/etc/keystone/domains
+
+# Keystone Identity backend driver. (string value)
+#driver=keystone.identity.backends.sql.Identity
+
+# Maximum supported length for user passwords; decrease to
+# improve performance. (integer value)
+#max_password_length=4096
+
+# Maximum number of entities that will be returned in an
+# identity collection. (integer value)
+#list_limit=<None>
+
+
+[kvs]
+
+#
+# Options defined in keystone
+#
+
+# Extra dogpile.cache backend modules to register with the
+# dogpile.cache library. (list value)
+#backends=
+
+# Prefix for building the configuration dictionary for the KVS
+# region. This should not need to be changed unless there is
+# another dogpile.cache region with the same configuration
+# name. (string value)
+#config_prefix=keystone.kvs
+
+# Toggle to disable using a key-mangling function to ensure
+# fixed length keys. This is toggle-able for debugging
+# purposes, it is highly recommended to always leave this set
+# to True. (boolean value)
+#enable_key_mangler=true
+
+# Default lock timeout for distributed locking. (integer
+# value)
+#default_lock_timeout=5
+
+
+[ldap]
+
+#
+# Options defined in keystone
+#
+
+# URL for connecting to the LDAP server. (string value)
+#url=ldap://localhost
+
+# User BindDN to query the LDAP server. (string value)
+#user=<None>
+
+# Password for the BindDN to query the LDAP server. (string
+# value)
+#password=<None>
+
+# LDAP server suffix (string value)
+#suffix=cn=example,cn=com
+
+# If true, will add a dummy member to groups. This is required
+# if the objectclass for groups requires the "member"
+# attribute. (boolean value)
+#use_dumb_member=false
+
+# DN of the "dummy member" to use when "use_dumb_member" is
+# enabled. (string value)
+#dumb_member=cn=dumb,dc=nonexistent
+
+# allow deleting subtrees. (boolean value)
+#allow_subtree_delete=false
+
+# The LDAP scope for queries, this can be either "one"
+# (onelevel/singleLevel) or "sub" (subtree/wholeSubtree).
+# (string value)
+#query_scope=one
+
+# Maximum results per page; a value of zero ("0") disables
+# paging. (integer value)
+#page_size=0
+
+# The LDAP dereferencing option for queries. This can be
+# either "never", "searching", "always", "finding" or
+# "default". The "default" option falls back to using default
+# dereferencing configured by your ldap.conf. (string value)
+#alias_dereferencing=default
+
+# Override the system's default referral chasing behavior for
+# queries. (boolean value)
+#chase_referrals=<None>
+
+# Search base for users. (string value)
+#user_tree_dn=<None>
+
+# LDAP search filter for users. (string value)
+#user_filter=<None>
+
+# LDAP objectClass for users. (string value)
+#user_objectclass=inetOrgPerson
+
+# LDAP attribute mapped to user id. (string value)
+#user_id_attribute=cn
+
+# LDAP attribute mapped to user name. (string value)
+#user_name_attribute=sn
+
+# LDAP attribute mapped to user email. (string value)
+#user_mail_attribute=email
+
+# LDAP attribute mapped to password. (string value)
+#user_pass_attribute=userPassword
+
+# LDAP attribute mapped to user enabled flag. (string value)
+#user_enabled_attribute=enabled
+
+# Bitmask integer to indicate the bit that the enabled value
+# is stored in if the LDAP server represents "enabled" as a
+# bit on an integer rather than a boolean. A value of "0"
+# indicates the mask is not used. If this is not set to "0"
+# the typical value is "2". This is typically used when
+# "user_enabled_attribute = userAccountControl". (integer
+# value)
+#user_enabled_mask=0
+
+# Default value to enable users. This should match an
+# appropriate int value if the LDAP server uses non-boolean
+# (bitmask) values to indicate if a user is enabled or
+# disabled. If this is not set to "True"the typical value is
+# "512". This is typically used when "user_enabled_attribute =
+# userAccountControl". (string value)
+#user_enabled_default=True
+
+# List of attributes stripped off the user on update. (list
+# value)
+#user_attribute_ignore=default_project_id,tenants
+
+# LDAP attribute mapped to default_project_id for users.
+# (string value)
+#user_default_project_id_attribute=<None>
+
+# Allow user creation in LDAP backend. (boolean value)
+#user_allow_create=true
+
+# Allow user updates in LDAP backend. (boolean value)
+#user_allow_update=true
+
+# Allow user deletion in LDAP backend. (boolean value)
+#user_allow_delete=true
+
+# If True, Keystone uses an alternative method to determine if
+# a user is enabled or not by checking if they are a member of
+# the "user_enabled_emulation_dn" group. (boolean value)
+#user_enabled_emulation=false
+
+# DN of the group entry to hold enabled users when using
+# enabled emulation. (string value)
+#user_enabled_emulation_dn=<None>
+
+# List of additional LDAP attributes used for mapping
+# Additional attribute mappings for users. Attribute mapping
+# format is <ldap_attr>:<user_attr>, where ldap_attr is the
+# attribute in the LDAP entry and user_attr is the Identity
+# API attribute. (list value)
+#user_additional_attribute_mapping=
+
+# Search base for projects (string value)
+#tenant_tree_dn=<None>
+
+# LDAP search filter for projects. (string value)
+#tenant_filter=<None>
+
+# LDAP objectClass for projects. (string value)
+#tenant_objectclass=groupOfNames
+
+# LDAP attribute mapped to project id. (string value)
+#tenant_id_attribute=cn
+
+# LDAP attribute mapped to project membership for user.
+# (string value)
+#tenant_member_attribute=member
+
+# LDAP attribute mapped to project name. (string value)
+#tenant_name_attribute=ou
+
+# LDAP attribute mapped to project description. (string value)
+#tenant_desc_attribute=description
+
+# LDAP attribute mapped to project enabled. (string value)
+#tenant_enabled_attribute=enabled
+
+# LDAP attribute mapped to project domain_id. (string value)
+#tenant_domain_id_attribute=businessCategory
+
+# List of attributes stripped off the project on update. (list
+# value)
+#tenant_attribute_ignore=
+
+# Allow tenant creation in LDAP backend. (boolean value)
+#tenant_allow_create=true
+
+# Allow tenant update in LDAP backend. (boolean value)
+#tenant_allow_update=true
+
+# Allow tenant deletion in LDAP backend. (boolean value)
+#tenant_allow_delete=true
+
+# If True, Keystone uses an alternative method to determine if
+# a project is enabled or not by checking if they are a member
+# of the "tenant_enabled_emulation_dn" group. (boolean value)
+#tenant_enabled_emulation=false
+
+# DN of the group entry to hold enabled projects when using
+# enabled emulation. (string value)
+#tenant_enabled_emulation_dn=<None>
+
+# Additional attribute mappings for projects. Attribute
+# mapping format is <ldap_attr>:<user_attr>, where ldap_attr
+# is the attribute in the LDAP entry and user_attr is the
+# Identity API attribute. (list value)
+#tenant_additional_attribute_mapping=
+
+# Search base for roles. (string value)
+#role_tree_dn=<None>
+
+# LDAP search filter for roles. (string value)
+#role_filter=<None>
+
+# LDAP objectClass for roles. (string value)
+#role_objectclass=organizationalRole
+
+# LDAP attribute mapped to role id. (string value)
+#role_id_attribute=cn
+
+# LDAP attribute mapped to role name. (string value)
+#role_name_attribute=ou
+
+# LDAP attribute mapped to role membership. (string value)
+#role_member_attribute=roleOccupant
+
+# List of attributes stripped off the role on update. (list
+# value)
+#role_attribute_ignore=
+
+# Allow role creation in LDAP backend. (boolean value)
+#role_allow_create=true
+
+# Allow role update in LDAP backend. (boolean value)
+#role_allow_update=true
+
+# Allow role deletion in LDAP backend. (boolean value)
+#role_allow_delete=true
+
+# Additional attribute mappings for roles. Attribute mapping
+# format is <ldap_attr>:<user_attr>, where ldap_attr is the
+# attribute in the LDAP entry and user_attr is the Identity
+# API attribute. (list value)
+#role_additional_attribute_mapping=
+
+# Search base for groups. (string value)
+#group_tree_dn=<None>
+
+# LDAP search filter for groups. (string value)
+#group_filter=<None>
+
+# LDAP objectClass for groups. (string value)
+#group_objectclass=groupOfNames
+
+# LDAP attribute mapped to group id. (string value)
+#group_id_attribute=cn
+
+# LDAP attribute mapped to group name. (string value)
+#group_name_attribute=ou
+
+# LDAP attribute mapped to show group membership. (string
+# value)
+#group_member_attribute=member
+
+# LDAP attribute mapped to group description. (string value)
+#group_desc_attribute=description
+
+# List of attributes stripped off the group on update. (list
+# value)
+#group_attribute_ignore=
+
+# Allow group creation in LDAP backend. (boolean value)
+#group_allow_create=true
+
+# Allow group update in LDAP backend. (boolean value)
+#group_allow_update=true
+
+# Allow group deletion in LDAP backend. (boolean value)
+#group_allow_delete=true
+
+# Additional attribute mappings for groups. Attribute mapping
+# format is <ldap_attr>:<user_attr>, where ldap_attr is the
+# attribute in the LDAP entry and user_attr is the Identity
+# API attribute. (list value)
+#group_additional_attribute_mapping=
+
+# CA certificate file path for communicating with LDAP
+# servers. (string value)
+#tls_cacertfile=<None>
+
+# CA certificate directory path for communicating with LDAP
+# servers. (string value)
+#tls_cacertdir=<None>
+
+# Enable TLS for communicating with LDAP servers. (boolean
+# value)
+#use_tls=false
+
+# valid options for tls_req_cert are demand, never, and allow.
+# (string value)
+#tls_req_cert=demand
+
+
+[matchmaker_ring]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Matchmaker ring file (JSON). (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+
+[memcache]
+
+#
+# Options defined in keystone
+#
+
+# Memcache servers in the format of "host:port" (list value)
+#servers=localhost:11211
+
+# Number of compare-and-set attempts to make when using
+# compare-and-set in the token memcache back end. (integer
+# value)
+#max_compare_and_set_retry=16
+
+
+[oauth1]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Credential backend driver. (string value)
+#driver=keystone.contrib.oauth1.backends.sql.OAuth1
+
+# Duration (in seconds) for the OAuth Request Token. (integer
+# value)
+#request_token_duration=28800
+
+# Duration (in seconds) for the OAuth Access Token. (integer
+# value)
+#access_token_duration=86400
+
+
+[os_inherit]
+
+#
+# Options defined in keystone
+#
+
+# role-assignment inheritance to projects from owning domain
+# can be optionally enabled. (boolean value)
+#enabled=false
+
+
+[paste_deploy]
+
+#
+# Options defined in keystone
+#
+
+# Name of the paste configuration file that defines the
+# available pipelines. (string value)
+#config_file=keystone-paste.ini
+
+
+[policy]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Policy backend driver. (string value)
+#driver=keystone.policy.backends.sql.Policy
+
+# Maximum number of entities that will be returned in a policy
+# collection. (integer value)
+#list_limit=<None>
+
+
+[revoke]
+
+#
+# Options defined in keystone
+#
+
+# An implementation of the backend for persisting revocation
+# events. (string value)
+#driver=keystone.contrib.revoke.backends.kvs.Revoke
+
+# This value (calculated in seconds) is added to token
+# expiration before a revocation event may be removed from the
+# backend. (integer value)
+#expiration_buffer=1800
+
+# Toggle for revocation event cacheing. This has no effect
+# unless global caching is enabled. (boolean value)
+#caching=true
+
+
+[signing]
+
+#
+# Options defined in keystone
+#
+
+# Deprecated in favor of provider in the [token] section.
+# (string value)
+#token_format=<None>
+
+# Path of the certfile for token signing. (string value)
+#certfile=/etc/keystone/ssl/certs/signing_cert.pem
+
+# Path of the keyfile for token signing. (string value)
+#keyfile=/etc/keystone/ssl/private/signing_key.pem
+
+# Path of the CA for token signing. (string value)
+#ca_certs=/etc/keystone/ssl/certs/ca.pem
+
+# Path of the CA Key for token signing. (string value)
+#ca_key=/etc/keystone/ssl/private/cakey.pem
+
+# Key Size (in bits) for token signing cert (auto generated
+# certificate). (integer value)
+#key_size=2048
+
+# Day the token signing cert is valid for (auto generated
+# certificate). (integer value)
+#valid_days=3650
+
+# Certificate Subject (auto generated certificate) for token
+# signing. (string value)
+#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com
+
+
+[ssl]
+
+#
+# Options defined in keystone
+#
+
+# Toggle for SSL support on the keystone eventlet servers.
+# (boolean value)
+#enable=false
+
+# Path of the certfile for SSL. (string value)
+#certfile=/etc/keystone/ssl/certs/keystone.pem
+
+# Path of the keyfile for SSL. (string value)
+#keyfile=/etc/keystone/ssl/private/keystonekey.pem
+
+# Path of the ca cert file for SSL. (string value)
+#ca_certs=/etc/keystone/ssl/certs/ca.pem
+
+# Path of the CA key file for SSL. (string value)
+#ca_key=/etc/keystone/ssl/private/cakey.pem
+
+# Require client certificate. (boolean value)
+#cert_required=false
+
+# SSL Key Length (in bits) (auto generated certificate).
+# (integer value)
+#key_size=1024
+
+# Days the certificate is valid for once signed (auto
+# generated certificate). (integer value)
+#valid_days=3650
+
+# SSL Certificate Subject (auto generated certificate).
+# (string value)
+#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost
+
+
+[stats]
+
+#
+# Options defined in keystone
+#
+
+# Keystone stats backend driver. (string value)
+#driver=keystone.contrib.stats.backends.kvs.Stats
+
+
+[token]
+
+#
+# Options defined in keystone
+#
+
+# External auth mechanisms that should add bind information to
+# token e.g. kerberos, x509. (list value)
+#bind=
+
+# Enforcement policy on tokens presented to keystone with bind
+# information. One of disabled, permissive, strict, required
+# or a specifically required bind mode e.g. kerberos or x509
+# to require binding to that authentication. (string value)
+#enforce_token_bind=permissive
+
+# Amount of time a token should remain valid (in seconds).
+# (integer value)
+#expiration=3600
+
+# Controls the token construction, validation, and revocation
+# operations. Core providers are
+# "keystone.token.providers.[pki|uuid].Provider". (string
+# value)
+provider=keystone.token.providers.uuid.Provider
+
+# Keystone Token persistence backend driver. (string value)
+driver=keystone.token.persistence.backends.sql.Token
+
+# Toggle for token system cacheing. This has no effect unless
+# global caching is enabled. (boolean value)
+#caching=true
+
+# Time to cache the revocation list and the revocation events
+# if revoke extension is enabled (in seconds). This has no
+# effect unless global and token caching are enabled. (integer
+# value)
+revocation_cache_time=3600
+
+# Time to cache tokens (in seconds). This has no effect unless
+# global and token caching are enabled. (integer value)
+#cache_time=<None>
+
+# Revoke token by token identifier. Setting revoke_by_id to
+# True enables various forms of enumerating tokens, e.g. `list
+# tokens for user`. These enumerations are processed to
+# determine the list of tokens to revoke. Only disable if
+# you are switching to using the Revoke extension with a
+# backend other than KVS, which stores events in memory.
+# (boolean value)
+#revoke_by_id=true
+
+
+[trust]
+
+#
+# Options defined in keystone
+#
+
+# delegation and impersonation features can be optionally
+# disabled. (boolean value)
+#enabled=true
+
+# Keystone Trust backend driver. (string value)
+#driver=keystone.trust.backends.sql.Trust
+
+
+[extra_headers]
+Distribution = Ubuntu
+
diff --git a/compass/deploy/ansible/roles/keystone/templates/keystone_init b/compass/deploy/ansible/roles/keystone/templates/keystone_init
new file mode 100644
index 000000000..729669bfb
--- /dev/null
+++ b/compass/deploy/ansible/roles/keystone/templates/keystone_init
@@ -0,0 +1,43 @@
+# create an administrative user
+
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=admin --pass={{ ADMIN_PASS }} --email=admin@admin.com
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 role-create --name=admin
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=admin --pass={{ ADMIN_PASS }} --email=admin@admin.com
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-create --name=admin --description="Admin Tenant"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=admin --tenant=admin --role=admin
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=admin --role=_member_ --tenant=admin
+
+# create a normal user
+
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=demo --pass={{ DEMO_PASS }} --email=DEMO_EMAIL
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-create --name=demo --description="Demo Tenant"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=demo --role=_member_ --tenant=demo
+
+# create a service tenant
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-create --name=service --description="Service Tenant"
+
+# regist keystone
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=keystone --type=identity --description="OpenStack Identity"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service_id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ identity / {print $2}') --publicurl=http://{{ HA_VIP }}:5000/v2.0 --internalurl=http://{{ HA_VIP }}:5000/v2.0 --adminurl=http://{{ HA_VIP }}:35357/v2.0
+
+# Create a glance user that the Image Service can use to authenticate with the Identity service
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=glance --pass={{ GLANCE_PASS }} --email=glance@example.com
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=glance --tenant=service --role=admin
+
+#Register the Image Service with the Identity service so that other OpenStack services can locate it
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=glance --type=image --description="OpenStack Image Service"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ image / {print $2}') --publicurl=http://{{ HA_VIP }}:9292 --internalurl=http://{{ HA_VIP }}:9292 --adminurl=http://{{ HA_VIP }}:9292
+
+#Create a nova user that Compute uses to authenticate with the Identity Service
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=nova --pass={{ NOVA_PASS }} --email=nova@example.com
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=nova --tenant=service --role=admin
+
+# register Compute with the Identity Service so that other OpenStack services can locate it
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=nova --type=compute --description="OpenStack Compute"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ compute / {print $2}') --publicurl=http://{{ HA_VIP }}:8774/v2/%\(tenant_id\)s --internalurl=http://{{ HA_VIP }}:8774/v2/%\(tenant_id\)s --adminurl=http://{{ HA_VIP }}:8774/v2/%\(tenant_id\)s
+
+# register netron user, role and service
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name neutron --pass {{ NEUTRON_PASS }} --email neutron@example.com
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user neutron --tenant service --role admin
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name neutron --type network --description "OpenStack Networking"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id $(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ network / {print $2}') --publicurl http://{{ HA_VIP }}:9696 --adminurl http://{{ HA_VIP }}:9696 --internalurl http://{{ HA_VIP }}:9696
diff --git a/compass/deploy/ansible/roles/monitor/files/check_service.sh b/compass/deploy/ansible/roles/monitor/files/check_service.sh
new file mode 100644
index 000000000..d30967343
--- /dev/null
+++ b/compass/deploy/ansible/roles/monitor/files/check_service.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+services=`cat /opt/service | uniq`
+for service in $services; do
+ if [ `/sbin/initctl list|awk '/stop\/waiting/{print $1}'|uniq | grep $service` ]; then
+ /sbin/start $service
+ fi
+done
diff --git a/compass/deploy/ansible/roles/monitor/files/root b/compass/deploy/ansible/roles/monitor/files/root
new file mode 100644
index 000000000..9c55c4f16
--- /dev/null
+++ b/compass/deploy/ansible/roles/monitor/files/root
@@ -0,0 +1 @@
+* * * * * /usr/local/bin/check_service.sh >> /var/log/check_service.log 2>&1
diff --git a/compass/deploy/ansible/roles/monitor/tasks/main.yml b/compass/deploy/ansible/roles/monitor/tasks/main.yml
new file mode 100644
index 000000000..e5b93f396
--- /dev/null
+++ b/compass/deploy/ansible/roles/monitor/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+- name: copy service check file
+ copy: src=check_service.sh dest=/usr/local/bin/check_service.sh mode=0777
+
+- name: copy cron file
+ copy: src=root dest=/var/spool/cron/crontabs/root mode=0600
+
+- name: restart cron
+ service: name=cron state=restarted
+
+
diff --git a/compass/deploy/ansible/roles/mq/tasks/main.yml b/compass/deploy/ansible/roles/mq/tasks/main.yml
new file mode 100644
index 000000000..4ae406533
--- /dev/null
+++ b/compass/deploy/ansible/roles/mq/tasks/main.yml
@@ -0,0 +1,5 @@
+---
+- include: rabbitmq.yml
+
+#- include: rabbitmq_cluster.yml
+# when: HA_CLUSTER is defined
diff --git a/compass/deploy/ansible/roles/mq/tasks/rabbitmq.yml b/compass/deploy/ansible/roles/mq/tasks/rabbitmq.yml
new file mode 100644
index 000000000..571440662
--- /dev/null
+++ b/compass/deploy/ansible/roles/mq/tasks/rabbitmq.yml
@@ -0,0 +1,45 @@
+---
+- name: create rabbitmq directory
+ file: path=/etc/rabbitmq state=directory mode=0755
+
+- name: copy rabbitmq config file
+ template: src=rabbitmq-env.conf dest=/etc/rabbitmq/rabbitmq-env.conf mode=0755
+
+- name: install rabbitmq-server
+ apt: name=rabbitmq-server state=present
+
+- name: stop rabbitmq-server
+ service: name=rabbitmq-server
+ state=stopped
+
+- name: update .erlang.cookie
+ template: src=.erlang.cookie dest=/var/lib/rabbitmq/.erlang.cookie
+ group=rabbitmq
+ owner=rabbitmq
+ mode=0400
+ when: ERLANG_TOKEN is defined
+
+- name: start and enable rabbitmq-server
+ service: name=rabbitmq-server
+ state=started
+ enabled=yes
+
+- name: generate mq service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - rabbitmq-server
+
+- name: modify rabbitmq password
+ command: rabbitmqctl change_password guest {{ RABBIT_PASS }}
+ when: "RABBIT_USER is defined and RABBIT_USER == 'guest'"
+ ignore_errors: True
+
+- name: add rabbitmq user
+ command: rabbitmqctl add_user {{ RABBIT_USER }} {{ RABBIT_PASS }}
+ when: "RABBIT_USER is defined and RABBIT_USER != 'guest'"
+ ignore_errors: True
+
+- name: set rabbitmq user permission
+ command: rabbitmqctl set_permissions -p / {{ RABBIT_USER }} ".*" ".*" ".*"
+ when: "RABBIT_USER is defined and RABBIT_USER != 'guest'"
+
diff --git a/compass/deploy/ansible/roles/mq/tasks/rabbitmq_cluster.yml b/compass/deploy/ansible/roles/mq/tasks/rabbitmq_cluster.yml
new file mode 100644
index 000000000..afd4c779f
--- /dev/null
+++ b/compass/deploy/ansible/roles/mq/tasks/rabbitmq_cluster.yml
@@ -0,0 +1,27 @@
+---
+- name: stop rabbitmq app
+ command: rabbitmqctl stop_app
+ when: HA_CLUSTER[inventory_hostname] != ''
+
+- name: rabbitmqctl reset
+ command: rabbitmqctl reset
+ when: HA_CLUSTER[inventory_hostname] != ''
+
+- name: stop rabbitmq
+ shell: rabbitmqctl stop
+
+- name: set detach
+ shell: rabbitmq-server -detached
+
+- name: join cluster
+ command: rabbitmqctl join_cluster rabbit@{{ item }}
+ when: item != inventory_hostname and HA_CLUSTER[item] == ''
+ with_items:
+ groups['controller']
+
+- name: start rabbitmq app
+ command: rabbitmqctl start_app
+
+- name: set the HA policy
+ rabbitmq_policy: name=ha-all pattern='^(?!amq\.).*' tags="ha-mode=all"
+
diff --git a/compass/deploy/ansible/roles/mq/templates/rabbitmq-env.conf b/compass/deploy/ansible/roles/mq/templates/rabbitmq-env.conf
new file mode 100644
index 000000000..6dd7349c7
--- /dev/null
+++ b/compass/deploy/ansible/roles/mq/templates/rabbitmq-env.conf
@@ -0,0 +1 @@
+RABBITMQ_NODE_IP_ADDRESS={{ HA_VIP }}
diff --git a/compass/deploy/ansible/roles/neutron-common/handlers/main.yml b/compass/deploy/ansible/roles/neutron-common/handlers/main.yml
new file mode 100644
index 000000000..36d779dd5
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-common/handlers/main.yml
@@ -0,0 +1,13 @@
+---
+- name: restart neutron-plugin-openvswitch-agent
+ service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes
+ when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: restart neutron-l3-agent
+ service: name=neutron-l3-agent state=restarted enabled=yes
+
+- name: restart neutron-dhcp-agent
+ service: name=neutron-dhcp-agent state=restarted enabled=yes
+
+- name: restart neutron-metadata-agent
+ service: name=neutron-metadata-agent state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/neutron-compute/defaults/main.yml b/compass/deploy/ansible/roles/neutron-compute/defaults/main.yml
new file mode 100644
index 000000000..825178b2a
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+neutron_ovs_bridge_mappings: ""
diff --git a/compass/deploy/ansible/roles/neutron-compute/handlers/main.yml b/compass/deploy/ansible/roles/neutron-compute/handlers/main.yml
new file mode 100644
index 000000000..36d779dd5
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/handlers/main.yml
@@ -0,0 +1,13 @@
+---
+- name: restart neutron-plugin-openvswitch-agent
+ service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes
+ when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: restart neutron-l3-agent
+ service: name=neutron-l3-agent state=restarted enabled=yes
+
+- name: restart neutron-dhcp-agent
+ service: name=neutron-dhcp-agent state=restarted enabled=yes
+
+- name: restart neutron-metadata-agent
+ service: name=neutron-metadata-agent state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/neutron-compute/tasks/main.yml b/compass/deploy/ansible/roles/neutron-compute/tasks/main.yml
new file mode 100644
index 000000000..93ee46fc2
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/tasks/main.yml
@@ -0,0 +1,55 @@
+---
+
+- name: activate ipv4 forwarding
+ sysctl: name=net.ipv4.ip_forward value=1
+ state=present reload=yes
+
+- name: deactivate ipv4 rp filter
+ sysctl: name=net.ipv4.conf.all.rp_filter value=0
+ state=present reload=yes
+
+- name: deactivate ipv4 default rp filter
+ sysctl: name=net.ipv4.conf.default.rp_filter
+ value=0 state=present reload=yes
+
+- name: install compute-related neutron packages
+ apt: name={{ item }} state=present force=yes
+ with_items:
+ - neutron-common
+ - neutron-plugin-ml2
+ - openvswitch-datapath-dkms
+ - openvswitch-switch
+
+- name: generate neutron computer service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - neutron-plugin-openvswitch-agent
+
+- name: install neutron openvswitch agent
+ apt: name=neutron-plugin-openvswitch-agent
+ state=present force=yes
+ when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: config neutron
+ template: src=neutron-network.conf
+ dest=/etc/neutron/neutron.conf backup=yes
+ notify:
+ - restart neutron-plugin-openvswitch-agent
+
+- name: config ml2 plugin
+ template: src=ml2_conf.ini
+ dest=/etc/neutron/plugins/ml2/ml2_conf.ini
+ backup=yes
+ notify:
+ - restart neutron-plugin-openvswitch-agent
+
+- name: add br-int
+ openvswitch_bridge: bridge=br-int state=present
+ notify:
+ - restart neutron-plugin-openvswitch-agent
+ - restart nova-compute
+
+- include: ../../neutron-network/tasks/odl.yml
+ when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- meta: flush_handlers
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/dhcp_agent.ini b/compass/deploy/ansible/roles/neutron-compute/templates/dhcp_agent.ini
new file mode 100644
index 000000000..19eb62ec4
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/templates/dhcp_agent.ini
@@ -0,0 +1,90 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# The DHCP agent will resync its state with Neutron to recover from any
+# transient notification or rpc errors. The interval is number of
+# seconds between attempts.
+resync_interval = 5
+
+# The DHCP agent requires an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
+# BigSwitch/Floodlight)
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Name of Open vSwitch bridge to use
+# ovs_integration_bridge = br-int
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
+# no additional setup of the DHCP server.
+dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# The DHCP server can assist with providing metadata support on isolated
+# networks. Setting this value to True will cause the DHCP server to append
+# specific host routes to the DHCP request. The metadata service will only
+# be activated when the subnet does not contain any router port. The guest
+# instance must be configured to request host routes via DHCP (Option 121).
+enable_isolated_metadata = False
+
+# Allows for serving metadata requests coming from a dedicated metadata
+# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
+# is connected to a Neutron router from which the VMs send metadata
+# request. In this case DHCP Option 121 will not be injected in VMs, as
+# they will be able to reach 169.254.169.254 through a router.
+# This option requires enable_isolated_metadata = True
+enable_metadata_network = False
+
+# Number of threads to use during sync process. Should not exceed connection
+# pool size configured on server.
+# num_sync_threads = 4
+
+# Location to store DHCP server config files
+# dhcp_confs = $state_path/dhcp
+
+# Domain to use for building the hostnames
+dhcp_domain = openstacklocal
+
+# Override the default dnsmasq settings with this file
+# dnsmasq_config_file =
+dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
+
+# Comma-separated list of DNS servers which will be used by dnsmasq
+# as forwarders.
+# dnsmasq_dns_servers =
+
+# Limit number of leases to prevent a denial-of-service.
+dnsmasq_lease_max = 16777216
+
+# Location to DHCP lease relay UNIX domain socket
+# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# dhcp_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the dhcp agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a dhcp server is disabled.
+# dhcp_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf
new file mode 100644
index 000000000..7bcbd9df2
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf
@@ -0,0 +1,2 @@
+dhcp-option-force=26,1454
+
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/etc/xorp/config.boot b/compass/deploy/ansible/roles/neutron-compute/templates/etc/xorp/config.boot
new file mode 100644
index 000000000..32caf96dd
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/templates/etc/xorp/config.boot
@@ -0,0 +1,25 @@
+interfaces {
+ restore-original-config-on-shutdown: false
+ interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ description: "Internal pNodes interface"
+ disable: false
+ default-system-config
+ }
+}
+
+protocols {
+ igmp {
+ disable: false
+ interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ disable: false
+ version: 3
+ }
+ }
+ traceoptions {
+ flag all {
+ disable: false
+ }
+ }
+ }
+}
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/l3_agent.ini b/compass/deploy/ansible/roles/neutron-compute/templates/l3_agent.ini
new file mode 100644
index 000000000..b394c0082
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/templates/l3_agent.ini
@@ -0,0 +1,81 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# L3 requires that an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
+# that supports L3 agent
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# If use_namespaces is set as False then the agent can only configure one router.
+
+# This is done by setting the specific router_id.
+# router_id =
+
+# When external_network_bridge is set, each L3 agent can be associated
+# with no more than one external network. This value should be set to the UUID
+# of that external network. To allow L3 agent support multiple external
+# networks, both the external_network_bridge and gateway_external_network_id
+# must be left empty.
+# gateway_external_network_id =
+
+# Indicates that this L3 agent should also handle routers that do not have
+# an external network gateway configured. This option should be True only
+# for a single agent in a Neutron deployment, and may be False for all agents
+# if all routers must have an external network gateway
+handle_internal_only_routers = True
+
+# Name of bridge used for external network traffic. This should be set to
+# empty value for the linux bridge. when this parameter is set, each L3 agent
+# can be associated with no more than one external network.
+external_network_bridge = br-ex
+
+# TCP Port used by Neutron metadata server
+metadata_port = 9697
+
+# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
+# to disable this feature.
+send_arp_for_ha = 3
+
+# seconds between re-sync routers' data if needed
+periodic_interval = 40
+
+# seconds to start to sync routers' data after
+# starting agent
+periodic_fuzzy_delay = 5
+
+# enable_metadata_proxy, which is true by default, can be set to False
+# if the Nova metadata server is not available
+# enable_metadata_proxy = True
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# router_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the L3 agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a router is destroyed.
+# router_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/metadata_agent.ini b/compass/deploy/ansible/roles/neutron-compute/templates/metadata_agent.ini
new file mode 100644
index 000000000..6badf2877
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/templates/metadata_agent.ini
@@ -0,0 +1,46 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+debug = True
+
+# The Neutron user information for accessing the Neutron API.
+auth_url = http://{{ HA_VIP }}:5000/v2.0
+auth_region = RegionOne
+# Turn off verification of the certificate for ssl
+# auth_insecure = False
+# Certificate Authority public key (CA cert) file for ssl
+# auth_ca_cert =
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+
+# Network service endpoint type to pull from the keystone catalog
+# endpoint_type = adminURL
+
+# IP address used by Nova metadata server
+nova_metadata_ip = {{ HA_VIP }}
+
+# TCP Port used by Nova metadata server
+nova_metadata_port = 8775
+
+# When proxying metadata requests, Neutron signs the Instance-ID header with a
+# shared secret to prevent spoofing. You may select any string for a secret,
+# but it must match here and in the configuration used by the Nova Metadata
+# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
+metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# Number of separate worker processes for metadata server
+# metadata_workers = 0
+
+# Number of backlog requests to configure the metadata server socket with
+# metadata_backlog = 128
+
+# URL to connect to the cache backend.
+# Example of URL using memory caching backend
+# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
+# default_ttl=0 parameter will cause cache entries to never expire.
+# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
+# No cache is used in case no value is passed.
+# cache_url =
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/ml2_conf.ini b/compass/deploy/ansible/roles/neutron-compute/templates/ml2_conf.ini
new file mode 100644
index 000000000..a7900693e
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/templates/ml2_conf.ini
@@ -0,0 +1,108 @@
+[ml2]
+# (ListOpt) List of network type driver entrypoints to be loaded from
+# the neutron.ml2.type_drivers namespace.
+#
+# type_drivers = local,flat,vlan,gre,vxlan
+# Example: type_drivers = flat,vlan,gre,vxlan
+type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
+
+# (ListOpt) Ordered list of network_types to allocate as tenant
+# networks. The default value 'local' is useful for single-box testing
+# but provides no connectivity between hosts.
+#
+# tenant_network_types = local
+# Example: tenant_network_types = vlan,gre,vxlan
+tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
+
+# (ListOpt) Ordered list of networking mechanism driver entrypoints
+# to be loaded from the neutron.ml2.mechanism_drivers namespace.
+# mechanism_drivers =
+# Example: mechanism_drivers = openvswitch,mlnx
+# Example: mechanism_drivers = arista
+# Example: mechanism_drivers = cisco,logger
+# Example: mechanism_drivers = openvswitch,brocade
+# Example: mechanism_drivers = linuxbridge,brocade
+mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
+
+[ml2_type_flat]
+# (ListOpt) List of physical_network names with which flat networks
+# can be created. Use * to allow flat networks with arbitrary
+# physical_network names.
+#
+flat_networks = external
+# Example:flat_networks = physnet1,physnet2
+# Example:flat_networks = *
+
+[ml2_type_vlan]
+# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
+# specifying physical_network names usable for VLAN provider and
+# tenant networks, as well as ranges of VLAN tags on each
+# physical_network available for allocation as tenant networks.
+#
+network_vlan_ranges =
+# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
+
+[ml2_type_gre]
+# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
+tunnel_id_ranges = 1:1000
+
+[ml2_type_vxlan]
+# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
+# ranges of VXLAN VNI IDs that are available for tenant network allocation.
+#
+vni_ranges = 1001:4095
+
+# (StrOpt) Multicast group for the VXLAN interface. When configured, will
+# enable sending all broadcast traffic to this multicast group. When left
+# unconfigured, will disable multicast VXLAN mode.
+#
+vxlan_group = 239.1.1.1
+# Example: vxlan_group = 239.1.1.1
+
+[securitygroup]
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
+firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+enable_security_group = True
+
+[database]
+connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
+
+[ovs]
+local_ip = {{ internal_ip }}
+{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
+integration_bridge = br-int
+tunnel_bridge = br-tun
+tunnel_id_ranges = 1001:4095
+tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
+{% endif %}
+
+[agent]
+root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
+tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
+vxlan_udp_port = 4789
+{% endif %}
+l2_population = False
+
+[odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+network_vlan_ranges = 1001:4095
+tunnel_id_ranges = 1001:4095
+tun_peer_patch_port = patch-int
+int_peer_patch_port = patch-tun
+tenant_network_type = vxlan
+tunnel_bridge = br-tun
+integration_bridge = br-int
+controllers = 10.1.0.15:8080:admin:admin
+{% endif %}
+
+[ml2_odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+username = {{ odl_username }}
+password = {{ odl_password }}
+url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
+{% endif %}
+
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/neutron-network.conf b/compass/deploy/ansible/roles/neutron-compute/templates/neutron-network.conf
new file mode 100644
index 000000000..93be9cbc0
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/templates/neutron-network.conf
@@ -0,0 +1,465 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ DEBUG }}
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/neutron.conf b/compass/deploy/ansible/roles/neutron-compute/templates/neutron.conf
new file mode 100644
index 000000000..15753675e
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/templates/neutron.conf
@@ -0,0 +1,466 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ VERBOSE }}
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/neutron_init.sh b/compass/deploy/ansible/roles/neutron-compute/templates/neutron_init.sh
new file mode 100644
index 000000000..b92e202f2
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/templates/neutron_init.sh
@@ -0,0 +1,4 @@
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
+
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}
+
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/nova.conf b/compass/deploy/ansible/roles/neutron-compute/templates/nova.conf
new file mode 100644
index 000000000..4988cb036
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/templates/nova.conf
@@ -0,0 +1,73 @@
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose={{ VERBOSE}}
+debug={{ DEBUG }}
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=ec2,osapi_compute,metadata
+
+vif_plugging_is_fatal: false
+vif_plugging_timeout: 0
+
+auth_strategy = keystone
+
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+my_ip = {{ internal_ip }}
+vnc_enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
+
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
+
+network_api_class = nova.network.neutronv2.api.API
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = nova.openstack.common.notifier.rpc_notifier
+notification_driver = ceilometer.compute.nova_notifier
+
+[database]
+# The SQLAlchemy connection string used to connect to the database
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
+
+[conductor]
+manager = nova.conductor.manager.ConductorManager
+topic = conductor
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = nova
+admin_password = {{ NOVA_PASS }}
+
+[glance]
+host = {{ HA_VIP }}
+
+[neutron]
+url = http://{{ HA_VIP }}:9696
+auth_strategy = keystone
+admin_tenant_name = service
+admin_username = neutron
+admin_password = {{ NEUTRON_PASS }}
+admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/compass/deploy/ansible/roles/neutron-controller/handlers/main.yml b/compass/deploy/ansible/roles/neutron-controller/handlers/main.yml
new file mode 100644
index 000000000..b4c1585d3
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/handlers/main.yml
@@ -0,0 +1,24 @@
+---
+- name: restart nova-api
+ service: name=nova-api state=restarted enabled=yes
+
+- name: restart nova-cert
+ service: name=nova-cert state=restarted enabled=yes
+
+- name: restart nova-consoleauth
+ service: name=nova-consoleauth state=restarted enabled=yes
+
+- name: restart nova-scheduler
+ service: name=nova-scheduler state=restarted enabled=yes
+
+- name: restart nova-conductor
+ service: name=nova-conductor state=restarted enabled=yes
+
+- name: restart nova-novncproxy
+ service: name=nova-novncproxy state=restarted enabled=yes
+
+- name: remove nova-sqlite-db
+ shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.db.removed
+
+- name: restart neutron-server
+ service: name=neutron-server state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/neutron-controller/tasks/main.yml b/compass/deploy/ansible/roles/neutron-controller/tasks/main.yml
new file mode 100644
index 000000000..9c04d74e0
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+- include: neutron_install.yml
+ tags:
+ - install
+ - neutron_install
+ - neutron
+
+- include: neutron_config.yml
+ when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == ''
+ tags:
+ - config
+ - neutron_config
+ - neutron
diff --git a/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_config.yml b/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_config.yml
new file mode 100644
index 000000000..77cc29ae4
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_config.yml
@@ -0,0 +1,10 @@
+---
+- name: neutron-db-manage upgrade to Juno
+ shell: neutron-db-manage --config-file=/etc/neutron/neutron.conf --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini upgrade head
+ register: result
+ until: result.rc == 0
+ retries: 5
+ delay: 3
+ notify:
+ - restart neutron-server
+
diff --git a/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_install.yml b/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_install.yml
new file mode 100644
index 000000000..616529939
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_install.yml
@@ -0,0 +1,29 @@
+---
+- name: install controller-related neutron packages
+ apt: name={{ item }} state=present force=yes
+ with_items:
+ - neutron-server
+ - neutron-plugin-ml2
+
+- name: generate neutron controll service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - neutron-server
+ - neutron-plugin-ml2
+
+- name: get tenant id to fill neutron.conf
+ shell: keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-get service | grep id | awk '{print $4}'
+ register: NOVA_ADMIN_TENANT_ID
+
+- name: update neutron conf
+ template: src=neutron.conf dest=/etc/neutron/neutron.conf backup=yes
+ notify:
+ - restart neutron-server
+
+- name: update ml2 plugin conf
+ template: src=ml2_conf.ini dest=/etc/neutron/plugins/ml2/ml2_conf.ini backup=yes
+ notify:
+ - restart neutron-server
+
+- meta: flush_handlers
+
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/dhcp_agent.ini b/compass/deploy/ansible/roles/neutron-controller/templates/dhcp_agent.ini
new file mode 100644
index 000000000..19eb62ec4
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/templates/dhcp_agent.ini
@@ -0,0 +1,90 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# The DHCP agent will resync its state with Neutron to recover from any
+# transient notification or rpc errors. The interval is number of
+# seconds between attempts.
+resync_interval = 5
+
+# The DHCP agent requires an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
+# BigSwitch/Floodlight)
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Name of Open vSwitch bridge to use
+# ovs_integration_bridge = br-int
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
+# no additional setup of the DHCP server.
+dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# The DHCP server can assist with providing metadata support on isolated
+# networks. Setting this value to True will cause the DHCP server to append
+# specific host routes to the DHCP request. The metadata service will only
+# be activated when the subnet does not contain any router port. The guest
+# instance must be configured to request host routes via DHCP (Option 121).
+enable_isolated_metadata = False
+
+# Allows for serving metadata requests coming from a dedicated metadata
+# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
+# is connected to a Neutron router from which the VMs send metadata
+# request. In this case DHCP Option 121 will not be injected in VMs, as
+# they will be able to reach 169.254.169.254 through a router.
+# This option requires enable_isolated_metadata = True
+enable_metadata_network = False
+
+# Number of threads to use during sync process. Should not exceed connection
+# pool size configured on server.
+# num_sync_threads = 4
+
+# Location to store DHCP server config files
+# dhcp_confs = $state_path/dhcp
+
+# Domain to use for building the hostnames
+dhcp_domain = openstacklocal
+
+# Override the default dnsmasq settings with this file
+# dnsmasq_config_file =
+dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
+
+# Comma-separated list of DNS servers which will be used by dnsmasq
+# as forwarders.
+# dnsmasq_dns_servers =
+
+# Limit number of leases to prevent a denial-of-service.
+dnsmasq_lease_max = 16777216
+
+# Location to DHCP lease relay UNIX domain socket
+# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# dhcp_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the dhcp agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a dhcp server is disabled.
+# dhcp_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf
new file mode 100644
index 000000000..7bcbd9df2
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf
@@ -0,0 +1,2 @@
+dhcp-option-force=26,1454
+
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/etc/xorp/config.boot b/compass/deploy/ansible/roles/neutron-controller/templates/etc/xorp/config.boot
new file mode 100644
index 000000000..32caf96dd
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/templates/etc/xorp/config.boot
@@ -0,0 +1,25 @@
+interfaces {
+ restore-original-config-on-shutdown: false
+ interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ description: "Internal pNodes interface"
+ disable: false
+ default-system-config
+ }
+}
+
+protocols {
+ igmp {
+ disable: false
+ interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ disable: false
+ version: 3
+ }
+ }
+ traceoptions {
+ flag all {
+ disable: false
+ }
+ }
+ }
+}
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/l3_agent.ini b/compass/deploy/ansible/roles/neutron-controller/templates/l3_agent.ini
new file mode 100644
index 000000000..b394c0082
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/templates/l3_agent.ini
@@ -0,0 +1,81 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# L3 requires that an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
+# that supports L3 agent
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# If use_namespaces is set as False then the agent can only configure one router.
+
+# This is done by setting the specific router_id.
+# router_id =
+
+# When external_network_bridge is set, each L3 agent can be associated
+# with no more than one external network. This value should be set to the UUID
+# of that external network. To allow L3 agent support multiple external
+# networks, both the external_network_bridge and gateway_external_network_id
+# must be left empty.
+# gateway_external_network_id =
+
+# Indicates that this L3 agent should also handle routers that do not have
+# an external network gateway configured. This option should be True only
+# for a single agent in a Neutron deployment, and may be False for all agents
+# if all routers must have an external network gateway
+handle_internal_only_routers = True
+
+# Name of bridge used for external network traffic. This should be set to
+# empty value for the linux bridge. when this parameter is set, each L3 agent
+# can be associated with no more than one external network.
+external_network_bridge = br-ex
+
+# TCP Port used by Neutron metadata server
+metadata_port = 9697
+
+# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
+# to disable this feature.
+send_arp_for_ha = 3
+
+# seconds between re-sync routers' data if needed
+periodic_interval = 40
+
+# seconds to start to sync routers' data after
+# starting agent
+periodic_fuzzy_delay = 5
+
+# enable_metadata_proxy, which is true by default, can be set to False
+# if the Nova metadata server is not available
+# enable_metadata_proxy = True
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# router_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the L3 agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a router is destroyed.
+# router_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/metadata_agent.ini b/compass/deploy/ansible/roles/neutron-controller/templates/metadata_agent.ini
new file mode 100644
index 000000000..6badf2877
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/templates/metadata_agent.ini
@@ -0,0 +1,46 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+debug = True
+
+# The Neutron user information for accessing the Neutron API.
+auth_url = http://{{ HA_VIP }}:5000/v2.0
+auth_region = RegionOne
+# Turn off verification of the certificate for ssl
+# auth_insecure = False
+# Certificate Authority public key (CA cert) file for ssl
+# auth_ca_cert =
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+
+# Network service endpoint type to pull from the keystone catalog
+# endpoint_type = adminURL
+
+# IP address used by Nova metadata server
+nova_metadata_ip = {{ HA_VIP }}
+
+# TCP Port used by Nova metadata server
+nova_metadata_port = 8775
+
+# When proxying metadata requests, Neutron signs the Instance-ID header with a
+# shared secret to prevent spoofing. You may select any string for a secret,
+# but it must match here and in the configuration used by the Nova Metadata
+# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
+metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# Number of separate worker processes for metadata server
+# metadata_workers = 0
+
+# Number of backlog requests to configure the metadata server socket with
+# metadata_backlog = 128
+
+# URL to connect to the cache backend.
+# Example of URL using memory caching backend
+# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
+# default_ttl=0 parameter will cause cache entries to never expire.
+# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
+# No cache is used in case no value is passed.
+# cache_url =
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/ml2_conf.ini b/compass/deploy/ansible/roles/neutron-controller/templates/ml2_conf.ini
new file mode 100644
index 000000000..a7900693e
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/templates/ml2_conf.ini
@@ -0,0 +1,108 @@
+[ml2]
+# (ListOpt) List of network type driver entrypoints to be loaded from
+# the neutron.ml2.type_drivers namespace.
+#
+# type_drivers = local,flat,vlan,gre,vxlan
+# Example: type_drivers = flat,vlan,gre,vxlan
+type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
+
+# (ListOpt) Ordered list of network_types to allocate as tenant
+# networks. The default value 'local' is useful for single-box testing
+# but provides no connectivity between hosts.
+#
+# tenant_network_types = local
+# Example: tenant_network_types = vlan,gre,vxlan
+tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
+
+# (ListOpt) Ordered list of networking mechanism driver entrypoints
+# to be loaded from the neutron.ml2.mechanism_drivers namespace.
+# mechanism_drivers =
+# Example: mechanism_drivers = openvswitch,mlnx
+# Example: mechanism_drivers = arista
+# Example: mechanism_drivers = cisco,logger
+# Example: mechanism_drivers = openvswitch,brocade
+# Example: mechanism_drivers = linuxbridge,brocade
+mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
+
+[ml2_type_flat]
+# (ListOpt) List of physical_network names with which flat networks
+# can be created. Use * to allow flat networks with arbitrary
+# physical_network names.
+#
+flat_networks = external
+# Example:flat_networks = physnet1,physnet2
+# Example:flat_networks = *
+
+[ml2_type_vlan]
+# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
+# specifying physical_network names usable for VLAN provider and
+# tenant networks, as well as ranges of VLAN tags on each
+# physical_network available for allocation as tenant networks.
+#
+network_vlan_ranges =
+# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
+
+[ml2_type_gre]
+# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
+tunnel_id_ranges = 1:1000
+
+[ml2_type_vxlan]
+# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
+# ranges of VXLAN VNI IDs that are available for tenant network allocation.
+#
+vni_ranges = 1001:4095
+
+# (StrOpt) Multicast group for the VXLAN interface. When configured, will
+# enable sending all broadcast traffic to this multicast group. When left
+# unconfigured, will disable multicast VXLAN mode.
+#
+vxlan_group = 239.1.1.1
+# Example: vxlan_group = 239.1.1.1
+
+[securitygroup]
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
+firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+enable_security_group = True
+
+[database]
+connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
+
+[ovs]
+local_ip = {{ internal_ip }}
+{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
+integration_bridge = br-int
+tunnel_bridge = br-tun
+tunnel_id_ranges = 1001:4095
+tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
+{% endif %}
+
+[agent]
+root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
+tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
+vxlan_udp_port = 4789
+{% endif %}
+l2_population = False
+
+[odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+network_vlan_ranges = 1001:4095
+tunnel_id_ranges = 1001:4095
+tun_peer_patch_port = patch-int
+int_peer_patch_port = patch-tun
+tenant_network_type = vxlan
+tunnel_bridge = br-tun
+integration_bridge = br-int
+controllers = 10.1.0.15:8080:admin:admin
+{% endif %}
+
+[ml2_odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+username = {{ odl_username }}
+password = {{ odl_password }}
+url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
+{% endif %}
+
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/neutron-network.conf b/compass/deploy/ansible/roles/neutron-controller/templates/neutron-network.conf
new file mode 100644
index 000000000..93be9cbc0
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/templates/neutron-network.conf
@@ -0,0 +1,465 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ DEBUG }}
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/neutron.conf b/compass/deploy/ansible/roles/neutron-controller/templates/neutron.conf
new file mode 100644
index 000000000..2a66e94a2
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/templates/neutron.conf
@@ -0,0 +1,466 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ VERBOSE }}
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/neutron_init.sh b/compass/deploy/ansible/roles/neutron-controller/templates/neutron_init.sh
new file mode 100644
index 000000000..b92e202f2
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/templates/neutron_init.sh
@@ -0,0 +1,4 @@
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
+
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}
+
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/nova.conf b/compass/deploy/ansible/roles/neutron-controller/templates/nova.conf
new file mode 100644
index 000000000..95870732b
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/templates/nova.conf
@@ -0,0 +1,69 @@
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose={{ VERBOSE}}
+debug={{ DEBUG }}
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=ec2,osapi_compute,metadata
+
+vif_plugging_is_fatal: false
+vif_plugging_timeout: 0
+
+auth_strategy = keystone
+
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+my_ip = {{ internal_ip }}
+vnc_enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
+
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
+
+network_api_class = nova.network.neutronv2.api.API
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = nova.openstack.common.notifier.rpc_notifier
+notification_driver = ceilometer.compute.nova_notifier
+
+[database]
+# The SQLAlchemy connection string used to connect to the database
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = nova
+admin_password = {{ NOVA_PASS }}
+
+[glance]
+host = {{ HA_VIP }}
+
+[neutron]
+url = http://{{ HA_VIP }}:9696
+auth_strategy = keystone
+admin_tenant_name = service
+admin_username = neutron
+admin_password = {{ NEUTRON_PASS }}
+admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/compass/deploy/ansible/roles/neutron-network/handlers/main.yml b/compass/deploy/ansible/roles/neutron-network/handlers/main.yml
new file mode 100644
index 000000000..d6c5cc8f1
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/handlers/main.yml
@@ -0,0 +1,21 @@
+---
+- name: restart neutron-plugin-openvswitch-agent
+ service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes
+ when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: restart neutron-l3-agent
+ service: name=neutron-l3-agent state=restarted enabled=yes
+
+- name: kill dnsmasq
+ command: killall dnsmasq
+ ignore_errors: True
+
+- name: restart neutron-dhcp-agent
+ service: name=neutron-dhcp-agent state=restarted enabled=yes
+
+- name: restart neutron-metadata-agent
+ service: name=neutron-metadata-agent state=restarted enabled=yes
+
+- name: restart xorp
+ service: name=xorp state=restarted enabled=yes sleep=10
+ ignore_errors: True
diff --git a/compass/deploy/ansible/roles/neutron-network/tasks/igmp-router.yml b/compass/deploy/ansible/roles/neutron-network/tasks/igmp-router.yml
new file mode 100644
index 000000000..d6f38a0e0
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/tasks/igmp-router.yml
@@ -0,0 +1,20 @@
+---
+- name: Install XORP to provide IGMP router functionality
+ apt: pkg=xorp
+
+- name: configure xorp
+ template: src=etc/xorp/config.boot dest=/etc/xorp/config.boot
+ notify:
+ - restart xorp
+
+- name: set xorp defaults
+ lineinfile: dest=/etc/default/xorp regexp=^RUN= line=RUN=yes
+ notify:
+ - restart xorp
+
+- meta: flush_handlers
+
+- name: start and enable xorp service
+ service: name=xorp state=started enabled=yes
+ retries: 2
+ delay: 10
diff --git a/compass/deploy/ansible/roles/neutron-network/tasks/main.yml b/compass/deploy/ansible/roles/neutron-network/tasks/main.yml
new file mode 100644
index 000000000..1d4b591c0
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/tasks/main.yml
@@ -0,0 +1,114 @@
+---
+- name: activate ipv4 forwarding
+ sysctl: name=net.ipv4.ip_forward value=1
+ state=present reload=yes
+
+- name: deactivate ipv4 rp filter
+ sysctl: name=net.ipv4.conf.all.rp_filter value=0
+ state=present reload=yes
+
+- name: deactivate ipv4 default rp filter
+ sysctl: name=net.ipv4.conf.default.rp_filter
+ value=0 state=present reload=yes
+
+- name: install neutron network related packages
+ apt: name={{ item }} state=present force=yes
+ with_items:
+ - neutron-plugin-ml2
+ - openvswitch-datapath-dkms
+ - openvswitch-switch
+ - neutron-l3-agent
+ - neutron-dhcp-agent
+
+- name: generate neutron service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - openvswitch-switch
+ - neutron-l3-agent
+ - neutron-dhcp-agent
+ - neutron-plugin-openvswitch-agent
+ - neutron-metadata-agent
+ - xorp
+
+- name: install neutron openvswitch agent
+ apt: name=neutron-plugin-openvswitch-agent
+ state=present force=yes
+ when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: config neutron
+ template: src=neutron-network.conf
+ dest=/etc/neutron/neutron.conf backup=yes
+ notify:
+ - restart neutron-plugin-openvswitch-agent
+ - restart neutron-l3-agent
+ - kill dnsmasq
+ - restart neutron-dhcp-agent
+ - restart neutron-metadata-agent
+
+- name: config l3 agent
+ template: src=l3_agent.ini dest=/etc/neutron/l3_agent.ini
+ backup=yes
+ notify:
+ - restart neutron-l3-agent
+
+- name: config dhcp agent
+ template: src=dhcp_agent.ini dest=/etc/neutron/dhcp_agent.ini
+ backup=yes
+ notify:
+ - kill dnsmasq
+ - restart neutron-dhcp-agent
+
+- name: update dnsmasq-neutron.conf
+ template: src=dnsmasq-neutron.conf
+ dest=/etc/neutron/dnsmasq-neutron.conf
+ notify:
+ - kill dnsmasq
+ - restart neutron-dhcp-agent
+
+- name: config metadata agent
+ template: src=metadata_agent.ini
+ dest=/etc/neutron/metadata_agent.ini backup=yes
+ notify:
+ - restart neutron-metadata-agent
+
+- name: config ml2 plugin
+ template: src=ml2_conf.ini
+ dest=/etc/neutron/plugins/ml2/ml2_conf.ini
+ backup=yes
+ notify:
+ - restart neutron-plugin-openvswitch-agent
+
+- meta: flush_handlers
+
+- name: add br-int
+ openvswitch_bridge: bridge=br-int state=present
+
+- name: add br-ex
+ openvswitch_bridge: bridge=br-ex state=present
+ when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: assign a port to br-ex for physical ext interface
+ openvswitch_port: bridge=br-ex port={{ INTERFACE_NAME }}
+ state=present
+ when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- include: igmp-router.yml
+ when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
+
+- name: assert kernel support for vxlan
+ command: modinfo -F version vxlan
+ when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
+
+- name: assert iproute2 suppport for vxlan
+ command: ip link add type vxlan help
+ register: iproute_out
+ failed_when: iproute_out.rc == 255
+ when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
+
+- include: odl.yml
+ when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: restart ovs service
+ service: name=openvswitch-switch state=restarted enabled=yes
+
+- meta: flush_handlers
diff --git a/compass/deploy/ansible/roles/neutron-network/tasks/odl.yml b/compass/deploy/ansible/roles/neutron-network/tasks/odl.yml
new file mode 100644
index 000000000..a2b449ce5
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/tasks/odl.yml
@@ -0,0 +1,13 @@
+---
+- name: ovs set manager
+ command: ovs-vsctl set-manager tcp:{{ controller }}:6640
+
+- name: get ovs uuid
+ shell: ovs-vsctl get Open_vSwitch . _uuid
+ register: ovs_uuid
+
+- name: set bridge_mappings
+ command: ovs-vsctl set Open_vSwitch {{ ovs_uuid.stdout }} other_config:bridge_mappings=physnet1:{{ INTERFACE_NAME }}
+
+- name: set local ip
+ command: ovs-vsctl set Open_vSwitch {{ ovs_uuid.stdout }} other_config:local_ip={{ internal_ip }}
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/dhcp_agent.ini b/compass/deploy/ansible/roles/neutron-network/templates/dhcp_agent.ini
new file mode 100644
index 000000000..19eb62ec4
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/templates/dhcp_agent.ini
@@ -0,0 +1,90 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# The DHCP agent will resync its state with Neutron to recover from any
+# transient notification or rpc errors. The interval is number of
+# seconds between attempts.
+resync_interval = 5
+
+# The DHCP agent requires an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
+# BigSwitch/Floodlight)
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Name of Open vSwitch bridge to use
+# ovs_integration_bridge = br-int
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
+# no additional setup of the DHCP server.
+dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# The DHCP server can assist with providing metadata support on isolated
+# networks. Setting this value to True will cause the DHCP server to append
+# specific host routes to the DHCP request. The metadata service will only
+# be activated when the subnet does not contain any router port. The guest
+# instance must be configured to request host routes via DHCP (Option 121).
+enable_isolated_metadata = False
+
+# Allows for serving metadata requests coming from a dedicated metadata
+# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
+# is connected to a Neutron router from which the VMs send metadata
+# request. In this case DHCP Option 121 will not be injected in VMs, as
+# they will be able to reach 169.254.169.254 through a router.
+# This option requires enable_isolated_metadata = True
+enable_metadata_network = False
+
+# Number of threads to use during sync process. Should not exceed connection
+# pool size configured on server.
+# num_sync_threads = 4
+
+# Location to store DHCP server config files
+# dhcp_confs = $state_path/dhcp
+
+# Domain to use for building the hostnames
+dhcp_domain = openstacklocal
+
+# Override the default dnsmasq settings with this file
+# dnsmasq_config_file =
+dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
+
+# Comma-separated list of DNS servers which will be used by dnsmasq
+# as forwarders.
+# dnsmasq_dns_servers =
+
+# Limit number of leases to prevent a denial-of-service.
+dnsmasq_lease_max = 16777216
+
+# Location to DHCP lease relay UNIX domain socket
+# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# dhcp_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the dhcp agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a dhcp server is disabled.
+# dhcp_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf
new file mode 100644
index 000000000..7bcbd9df2
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf
@@ -0,0 +1,2 @@
+dhcp-option-force=26,1454
+
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/etc/xorp/config.boot b/compass/deploy/ansible/roles/neutron-network/templates/etc/xorp/config.boot
new file mode 100644
index 000000000..32caf96dd
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/templates/etc/xorp/config.boot
@@ -0,0 +1,25 @@
+interfaces {
+ restore-original-config-on-shutdown: false
+ interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ description: "Internal pNodes interface"
+ disable: false
+ default-system-config
+ }
+}
+
+protocols {
+ igmp {
+ disable: false
+ interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ disable: false
+ version: 3
+ }
+ }
+ traceoptions {
+ flag all {
+ disable: false
+ }
+ }
+ }
+}
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/l3_agent.ini b/compass/deploy/ansible/roles/neutron-network/templates/l3_agent.ini
new file mode 100644
index 000000000..b394c0082
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/templates/l3_agent.ini
@@ -0,0 +1,81 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# L3 requires that an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
+# that supports L3 agent
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# If use_namespaces is set as False then the agent can only configure one router.
+
+# This is done by setting the specific router_id.
+# router_id =
+
+# When external_network_bridge is set, each L3 agent can be associated
+# with no more than one external network. This value should be set to the UUID
+# of that external network. To allow L3 agent support multiple external
+# networks, both the external_network_bridge and gateway_external_network_id
+# must be left empty.
+# gateway_external_network_id =
+
+# Indicates that this L3 agent should also handle routers that do not have
+# an external network gateway configured. This option should be True only
+# for a single agent in a Neutron deployment, and may be False for all agents
+# if all routers must have an external network gateway
+handle_internal_only_routers = True
+
+# Name of bridge used for external network traffic. This should be set to
+# empty value for the linux bridge. when this parameter is set, each L3 agent
+# can be associated with no more than one external network.
+external_network_bridge = br-ex
+
+# TCP Port used by Neutron metadata server
+metadata_port = 9697
+
+# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
+# to disable this feature.
+send_arp_for_ha = 3
+
+# seconds between re-sync routers' data if needed
+periodic_interval = 40
+
+# seconds to start to sync routers' data after
+# starting agent
+periodic_fuzzy_delay = 5
+
+# enable_metadata_proxy, which is true by default, can be set to False
+# if the Nova metadata server is not available
+# enable_metadata_proxy = True
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# router_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the L3 agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a router is destroyed.
+# router_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/metadata_agent.ini b/compass/deploy/ansible/roles/neutron-network/templates/metadata_agent.ini
new file mode 100644
index 000000000..6badf2877
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/templates/metadata_agent.ini
@@ -0,0 +1,46 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+debug = True
+
+# The Neutron user information for accessing the Neutron API.
+auth_url = http://{{ HA_VIP }}:5000/v2.0
+auth_region = RegionOne
+# Turn off verification of the certificate for ssl
+# auth_insecure = False
+# Certificate Authority public key (CA cert) file for ssl
+# auth_ca_cert =
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+
+# Network service endpoint type to pull from the keystone catalog
+# endpoint_type = adminURL
+
+# IP address used by Nova metadata server
+nova_metadata_ip = {{ HA_VIP }}
+
+# TCP Port used by Nova metadata server
+nova_metadata_port = 8775
+
+# When proxying metadata requests, Neutron signs the Instance-ID header with a
+# shared secret to prevent spoofing. You may select any string for a secret,
+# but it must match here and in the configuration used by the Nova Metadata
+# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
+metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# Number of separate worker processes for metadata server
+# metadata_workers = 0
+
+# Number of backlog requests to configure the metadata server socket with
+# metadata_backlog = 128
+
+# URL to connect to the cache backend.
+# Example of URL using memory caching backend
+# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
+# default_ttl=0 parameter will cause cache entries to never expire.
+# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
+# No cache is used in case no value is passed.
+# cache_url =
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/ml2_conf.ini b/compass/deploy/ansible/roles/neutron-network/templates/ml2_conf.ini
new file mode 100644
index 000000000..a7900693e
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/templates/ml2_conf.ini
@@ -0,0 +1,108 @@
+[ml2]
+# (ListOpt) List of network type driver entrypoints to be loaded from
+# the neutron.ml2.type_drivers namespace.
+#
+# type_drivers = local,flat,vlan,gre,vxlan
+# Example: type_drivers = flat,vlan,gre,vxlan
+type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
+
+# (ListOpt) Ordered list of network_types to allocate as tenant
+# networks. The default value 'local' is useful for single-box testing
+# but provides no connectivity between hosts.
+#
+# tenant_network_types = local
+# Example: tenant_network_types = vlan,gre,vxlan
+tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
+
+# (ListOpt) Ordered list of networking mechanism driver entrypoints
+# to be loaded from the neutron.ml2.mechanism_drivers namespace.
+# mechanism_drivers =
+# Example: mechanism_drivers = openvswitch,mlnx
+# Example: mechanism_drivers = arista
+# Example: mechanism_drivers = cisco,logger
+# Example: mechanism_drivers = openvswitch,brocade
+# Example: mechanism_drivers = linuxbridge,brocade
+mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
+
+[ml2_type_flat]
+# (ListOpt) List of physical_network names with which flat networks
+# can be created. Use * to allow flat networks with arbitrary
+# physical_network names.
+#
+flat_networks = external
+# Example:flat_networks = physnet1,physnet2
+# Example:flat_networks = *
+
+[ml2_type_vlan]
+# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
+# specifying physical_network names usable for VLAN provider and
+# tenant networks, as well as ranges of VLAN tags on each
+# physical_network available for allocation as tenant networks.
+#
+network_vlan_ranges =
+# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
+
+[ml2_type_gre]
+# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
+tunnel_id_ranges = 1:1000
+
+[ml2_type_vxlan]
+# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
+# ranges of VXLAN VNI IDs that are available for tenant network allocation.
+#
+vni_ranges = 1001:4095
+
+# (StrOpt) Multicast group for the VXLAN interface. When configured, will
+# enable sending all broadcast traffic to this multicast group. When left
+# unconfigured, will disable multicast VXLAN mode.
+#
+vxlan_group = 239.1.1.1
+# Example: vxlan_group = 239.1.1.1
+
+[securitygroup]
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
+firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+enable_security_group = True
+
+[database]
+connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
+
+[ovs]
+local_ip = {{ internal_ip }}
+{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
+integration_bridge = br-int
+tunnel_bridge = br-tun
+tunnel_id_ranges = 1001:4095
+tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
+{% endif %}
+
+[agent]
+root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
+tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
+vxlan_udp_port = 4789
+{% endif %}
+l2_population = False
+
+[odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+network_vlan_ranges = 1001:4095
+tunnel_id_ranges = 1001:4095
+tun_peer_patch_port = patch-int
+int_peer_patch_port = patch-tun
+tenant_network_type = vxlan
+tunnel_bridge = br-tun
+integration_bridge = br-int
+controllers = 10.1.0.15:8080:admin:admin
+{% endif %}
+
+[ml2_odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+username = {{ odl_username }}
+password = {{ odl_password }}
+url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
+{% endif %}
+
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/neutron-network.conf b/compass/deploy/ansible/roles/neutron-network/templates/neutron-network.conf
new file mode 100644
index 000000000..93be9cbc0
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/templates/neutron-network.conf
@@ -0,0 +1,465 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ DEBUG }}
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/neutron.conf b/compass/deploy/ansible/roles/neutron-network/templates/neutron.conf
new file mode 100644
index 000000000..15753675e
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/templates/neutron.conf
@@ -0,0 +1,466 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ VERBOSE }}
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/neutron_init.sh b/compass/deploy/ansible/roles/neutron-network/templates/neutron_init.sh
new file mode 100644
index 000000000..b92e202f2
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/templates/neutron_init.sh
@@ -0,0 +1,4 @@
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
+
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}
+
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/nova.conf b/compass/deploy/ansible/roles/neutron-network/templates/nova.conf
new file mode 100644
index 000000000..95870732b
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/templates/nova.conf
@@ -0,0 +1,69 @@
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose={{ VERBOSE}}
+debug={{ DEBUG }}
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=ec2,osapi_compute,metadata
+
+vif_plugging_is_fatal: false
+vif_plugging_timeout: 0
+
+auth_strategy = keystone
+
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+my_ip = {{ internal_ip }}
+vnc_enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
+
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
+
+network_api_class = nova.network.neutronv2.api.API
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = nova.openstack.common.notifier.rpc_notifier
+notification_driver = ceilometer.compute.nova_notifier
+
+[database]
+# The SQLAlchemy connection string used to connect to the database
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = nova
+admin_password = {{ NOVA_PASS }}
+
+[glance]
+host = {{ HA_VIP }}
+
+[neutron]
+url = http://{{ HA_VIP }}:9696
+auth_strategy = keystone
+admin_tenant_name = service
+admin_username = neutron
+admin_password = {{ NEUTRON_PASS }}
+admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/compass/deploy/ansible/roles/nova-compute/handlers/main.yml b/compass/deploy/ansible/roles/nova-compute/handlers/main.yml
new file mode 100644
index 000000000..c1350030f
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-compute/handlers/main.yml
@@ -0,0 +1,3 @@
+---
+- name: restart nova-compute
+ service: name=nova-compute state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/nova-compute/tasks/main.yml b/compass/deploy/ansible/roles/nova-compute/tasks/main.yml
new file mode 100644
index 000000000..51c8dfaa3
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-compute/tasks/main.yml
@@ -0,0 +1,21 @@
+---
+- name: install nova-compute related packages
+ apt: name=nova-compute-kvm state=present force=yes
+
+- name: update nova-compute conf
+ template: src={{ item }} dest=/etc/nova/{{ item }}
+ with_items:
+ - nova.conf
+ - nova-compute.conf
+ notify:
+ - restart nova-compute
+
+- name: generate neutron controll service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - nova-compute
+
+- meta: flush_handlers
+
+- name: remove nova sqlite db
+ shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.removed
diff --git a/compass/deploy/ansible/roles/nova-compute/templates/nova-compute.conf b/compass/deploy/ansible/roles/nova-compute/templates/nova-compute.conf
new file mode 100644
index 000000000..401dee791
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-compute/templates/nova-compute.conf
@@ -0,0 +1,7 @@
+[DEFAULT]
+compute_driver=libvirt.LibvirtDriver
+force_raw_images = true
+[libvirt]
+virt_type=qemu
+images_type = raw
+mem_stats_period_seconds=0
diff --git a/compass/deploy/ansible/roles/nova-compute/templates/nova.conf b/compass/deploy/ansible/roles/nova-compute/templates/nova.conf
new file mode 100644
index 000000000..4988cb036
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-compute/templates/nova.conf
@@ -0,0 +1,73 @@
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose={{ VERBOSE}}
+debug={{ DEBUG }}
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=ec2,osapi_compute,metadata
+
+vif_plugging_is_fatal: false
+vif_plugging_timeout: 0
+
+auth_strategy = keystone
+
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+my_ip = {{ internal_ip }}
+vnc_enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
+
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
+
+network_api_class = nova.network.neutronv2.api.API
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = nova.openstack.common.notifier.rpc_notifier
+notification_driver = ceilometer.compute.nova_notifier
+
+[database]
+# The SQLAlchemy connection string used to connect to the database
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
+
+[conductor]
+manager = nova.conductor.manager.ConductorManager
+topic = conductor
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = nova
+admin_password = {{ NOVA_PASS }}
+
+[glance]
+host = {{ HA_VIP }}
+
+[neutron]
+url = http://{{ HA_VIP }}:9696
+auth_strategy = keystone
+admin_tenant_name = service
+admin_username = neutron
+admin_password = {{ NEUTRON_PASS }}
+admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/compass/deploy/ansible/roles/nova-controller/handlers/main.yml b/compass/deploy/ansible/roles/nova-controller/handlers/main.yml
new file mode 100644
index 000000000..b4c1585d3
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/handlers/main.yml
@@ -0,0 +1,24 @@
+---
+- name: restart nova-api
+ service: name=nova-api state=restarted enabled=yes
+
+- name: restart nova-cert
+ service: name=nova-cert state=restarted enabled=yes
+
+- name: restart nova-consoleauth
+ service: name=nova-consoleauth state=restarted enabled=yes
+
+- name: restart nova-scheduler
+ service: name=nova-scheduler state=restarted enabled=yes
+
+- name: restart nova-conductor
+ service: name=nova-conductor state=restarted enabled=yes
+
+- name: restart nova-novncproxy
+ service: name=nova-novncproxy state=restarted enabled=yes
+
+- name: remove nova-sqlite-db
+ shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.db.removed
+
+- name: restart neutron-server
+ service: name=neutron-server state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/nova-controller/tasks/main.yml b/compass/deploy/ansible/roles/nova-controller/tasks/main.yml
new file mode 100644
index 000000000..72a9f4d46
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+- include: nova_install.yml
+ tags:
+ - install
+ - nova_install
+ - nova
+
+- include: nova_config.yml
+ when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == ''
+ tags:
+ - config
+ - nova_config
+ - nova
diff --git a/compass/deploy/ansible/roles/nova-controller/tasks/nova_config.yml b/compass/deploy/ansible/roles/nova-controller/tasks/nova_config.yml
new file mode 100644
index 000000000..62351faa9
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/tasks/nova_config.yml
@@ -0,0 +1,16 @@
+---
+- name: nova db sync
+ command: su -s /bin/sh -c "nova-manage db sync" nova
+ register: result
+ until: result.rc == 0
+ retries: 5
+ delay: 3
+ notify:
+ - restart nova-api
+ - restart nova-cert
+ - restart nova-consoleauth
+ - restart nova-scheduler
+ - restart nova-conductor
+ - restart nova-novncproxy
+
+- meta: flush_handlers
diff --git a/compass/deploy/ansible/roles/nova-controller/tasks/nova_install.yml b/compass/deploy/ansible/roles/nova-controller/tasks/nova_install.yml
new file mode 100644
index 000000000..a1cded568
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/tasks/nova_install.yml
@@ -0,0 +1,35 @@
+---
+- name: install nova related packages
+ apt: name={{ item }} state=present force=yes
+ with_items:
+ - nova-api
+ - nova-cert
+ - nova-conductor
+ - nova-consoleauth
+ - nova-novncproxy
+ - nova-scheduler
+ - python-novaclient
+ - python-oslo.rootwrap
+
+- name: generate nova controll service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - nova-api
+ - nova-cert
+ - nova-conductor
+ - nova-consoleauth
+ - nova-novncproxy
+ - nova-scheduler
+
+- name: update nova conf
+ template: src=nova.conf
+ dest=/etc/nova/nova.conf
+ backup=yes
+ notify:
+ - restart nova-api
+ - restart nova-cert
+ - restart nova-consoleauth
+ - restart nova-scheduler
+ - restart nova-conductor
+ - restart nova-novncproxy
+ - remove nova-sqlite-db
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/dhcp_agent.ini b/compass/deploy/ansible/roles/nova-controller/templates/dhcp_agent.ini
new file mode 100644
index 000000000..19eb62ec4
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/templates/dhcp_agent.ini
@@ -0,0 +1,90 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# The DHCP agent will resync its state with Neutron to recover from any
+# transient notification or rpc errors. The interval is number of
+# seconds between attempts.
+resync_interval = 5
+
+# The DHCP agent requires an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
+# BigSwitch/Floodlight)
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Name of Open vSwitch bridge to use
+# ovs_integration_bridge = br-int
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
+# no additional setup of the DHCP server.
+dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# The DHCP server can assist with providing metadata support on isolated
+# networks. Setting this value to True will cause the DHCP server to append
+# specific host routes to the DHCP request. The metadata service will only
+# be activated when the subnet does not contain any router port. The guest
+# instance must be configured to request host routes via DHCP (Option 121).
+enable_isolated_metadata = False
+
+# Allows for serving metadata requests coming from a dedicated metadata
+# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
+# is connected to a Neutron router from which the VMs send metadata
+# request. In this case DHCP Option 121 will not be injected in VMs, as
+# they will be able to reach 169.254.169.254 through a router.
+# This option requires enable_isolated_metadata = True
+enable_metadata_network = False
+
+# Number of threads to use during sync process. Should not exceed connection
+# pool size configured on server.
+# num_sync_threads = 4
+
+# Location to store DHCP server config files
+# dhcp_confs = $state_path/dhcp
+
+# Domain to use for building the hostnames
+dhcp_domain = openstacklocal
+
+# Override the default dnsmasq settings with this file
+# dnsmasq_config_file =
+dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
+
+# Comma-separated list of DNS servers which will be used by dnsmasq
+# as forwarders.
+# dnsmasq_dns_servers =
+
+# Limit number of leases to prevent a denial-of-service.
+dnsmasq_lease_max = 16777216
+
+# Location to DHCP lease relay UNIX domain socket
+# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# dhcp_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the dhcp agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a dhcp server is disabled.
+# dhcp_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf
new file mode 100644
index 000000000..7bcbd9df2
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf
@@ -0,0 +1,2 @@
+dhcp-option-force=26,1454
+
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/etc/xorp/config.boot b/compass/deploy/ansible/roles/nova-controller/templates/etc/xorp/config.boot
new file mode 100644
index 000000000..32caf96dd
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/templates/etc/xorp/config.boot
@@ -0,0 +1,25 @@
+interfaces {
+ restore-original-config-on-shutdown: false
+ interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ description: "Internal pNodes interface"
+ disable: false
+ default-system-config
+ }
+}
+
+protocols {
+ igmp {
+ disable: false
+ interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ disable: false
+ version: 3
+ }
+ }
+ traceoptions {
+ flag all {
+ disable: false
+ }
+ }
+ }
+}
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/l3_agent.ini b/compass/deploy/ansible/roles/nova-controller/templates/l3_agent.ini
new file mode 100644
index 000000000..b394c0082
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/templates/l3_agent.ini
@@ -0,0 +1,81 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# L3 requires that an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
+# that supports L3 agent
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# If use_namespaces is set as False then the agent can only configure one router.
+
+# This is done by setting the specific router_id.
+# router_id =
+
+# When external_network_bridge is set, each L3 agent can be associated
+# with no more than one external network. This value should be set to the UUID
+# of that external network. To allow L3 agent support multiple external
+# networks, both the external_network_bridge and gateway_external_network_id
+# must be left empty.
+# gateway_external_network_id =
+
+# Indicates that this L3 agent should also handle routers that do not have
+# an external network gateway configured. This option should be True only
+# for a single agent in a Neutron deployment, and may be False for all agents
+# if all routers must have an external network gateway
+handle_internal_only_routers = True
+
+# Name of bridge used for external network traffic. This should be set to
+# empty value for the linux bridge. when this parameter is set, each L3 agent
+# can be associated with no more than one external network.
+external_network_bridge = br-ex
+
+# TCP Port used by Neutron metadata server
+metadata_port = 9697
+
+# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
+# to disable this feature.
+send_arp_for_ha = 3
+
+# seconds between re-sync routers' data if needed
+periodic_interval = 40
+
+# seconds to start to sync routers' data after
+# starting agent
+periodic_fuzzy_delay = 5
+
+# enable_metadata_proxy, which is true by default, can be set to False
+# if the Nova metadata server is not available
+# enable_metadata_proxy = True
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# router_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the L3 agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a router is destroyed.
+# router_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/metadata_agent.ini b/compass/deploy/ansible/roles/nova-controller/templates/metadata_agent.ini
new file mode 100644
index 000000000..6badf2877
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/templates/metadata_agent.ini
@@ -0,0 +1,46 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+debug = True
+
+# The Neutron user information for accessing the Neutron API.
+auth_url = http://{{ HA_VIP }}:5000/v2.0
+auth_region = RegionOne
+# Turn off verification of the certificate for ssl
+# auth_insecure = False
+# Certificate Authority public key (CA cert) file for ssl
+# auth_ca_cert =
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+
+# Network service endpoint type to pull from the keystone catalog
+# endpoint_type = adminURL
+
+# IP address used by Nova metadata server
+nova_metadata_ip = {{ HA_VIP }}
+
+# TCP Port used by Nova metadata server
+nova_metadata_port = 8775
+
+# When proxying metadata requests, Neutron signs the Instance-ID header with a
+# shared secret to prevent spoofing. You may select any string for a secret,
+# but it must match here and in the configuration used by the Nova Metadata
+# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
+metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# Number of separate worker processes for metadata server
+# metadata_workers = 0
+
+# Number of backlog requests to configure the metadata server socket with
+# metadata_backlog = 128
+
+# URL to connect to the cache backend.
+# Example of URL using memory caching backend
+# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
+# default_ttl=0 parameter will cause cache entries to never expire.
+# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
+# No cache is used in case no value is passed.
+# cache_url =
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/ml2_conf.ini b/compass/deploy/ansible/roles/nova-controller/templates/ml2_conf.ini
new file mode 100644
index 000000000..a7900693e
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/templates/ml2_conf.ini
@@ -0,0 +1,108 @@
+[ml2]
+# (ListOpt) List of network type driver entrypoints to be loaded from
+# the neutron.ml2.type_drivers namespace.
+#
+# type_drivers = local,flat,vlan,gre,vxlan
+# Example: type_drivers = flat,vlan,gre,vxlan
+type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
+
+# (ListOpt) Ordered list of network_types to allocate as tenant
+# networks. The default value 'local' is useful for single-box testing
+# but provides no connectivity between hosts.
+#
+# tenant_network_types = local
+# Example: tenant_network_types = vlan,gre,vxlan
+tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
+
+# (ListOpt) Ordered list of networking mechanism driver entrypoints
+# to be loaded from the neutron.ml2.mechanism_drivers namespace.
+# mechanism_drivers =
+# Example: mechanism_drivers = openvswitch,mlnx
+# Example: mechanism_drivers = arista
+# Example: mechanism_drivers = cisco,logger
+# Example: mechanism_drivers = openvswitch,brocade
+# Example: mechanism_drivers = linuxbridge,brocade
+mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
+
+[ml2_type_flat]
+# (ListOpt) List of physical_network names with which flat networks
+# can be created. Use * to allow flat networks with arbitrary
+# physical_network names.
+#
+flat_networks = external
+# Example:flat_networks = physnet1,physnet2
+# Example:flat_networks = *
+
+[ml2_type_vlan]
+# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
+# specifying physical_network names usable for VLAN provider and
+# tenant networks, as well as ranges of VLAN tags on each
+# physical_network available for allocation as tenant networks.
+#
+network_vlan_ranges =
+# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
+
+[ml2_type_gre]
+# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
+tunnel_id_ranges = 1:1000
+
+[ml2_type_vxlan]
+# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
+# ranges of VXLAN VNI IDs that are available for tenant network allocation.
+#
+vni_ranges = 1001:4095
+
+# (StrOpt) Multicast group for the VXLAN interface. When configured, will
+# enable sending all broadcast traffic to this multicast group. When left
+# unconfigured, will disable multicast VXLAN mode.
+#
+vxlan_group = 239.1.1.1
+# Example: vxlan_group = 239.1.1.1
+
+[securitygroup]
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
+firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+enable_security_group = True
+
+[database]
+connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
+
+[ovs]
+local_ip = {{ internal_ip }}
+{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
+integration_bridge = br-int
+tunnel_bridge = br-tun
+tunnel_id_ranges = 1001:4095
+tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
+{% endif %}
+
+[agent]
+root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
+tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
+vxlan_udp_port = 4789
+{% endif %}
+l2_population = False
+
+[odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+network_vlan_ranges = 1001:4095
+tunnel_id_ranges = 1001:4095
+tun_peer_patch_port = patch-int
+int_peer_patch_port = patch-tun
+tenant_network_type = vxlan
+tunnel_bridge = br-tun
+integration_bridge = br-int
+controllers = 10.1.0.15:8080:admin:admin
+{% endif %}
+
+[ml2_odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+username = {{ odl_username }}
+password = {{ odl_password }}
+url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
+{% endif %}
+
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/neutron-network.conf b/compass/deploy/ansible/roles/nova-controller/templates/neutron-network.conf
new file mode 100644
index 000000000..93be9cbc0
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/templates/neutron-network.conf
@@ -0,0 +1,465 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ DEBUG }}
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/neutron.conf b/compass/deploy/ansible/roles/nova-controller/templates/neutron.conf
new file mode 100644
index 000000000..15753675e
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/templates/neutron.conf
@@ -0,0 +1,466 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ VERBOSE }}
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/neutron_init.sh b/compass/deploy/ansible/roles/nova-controller/templates/neutron_init.sh
new file mode 100644
index 000000000..b92e202f2
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/templates/neutron_init.sh
@@ -0,0 +1,4 @@
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
+
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}
+
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/nova.conf b/compass/deploy/ansible/roles/nova-controller/templates/nova.conf
new file mode 100644
index 000000000..c8991a302
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/templates/nova.conf
@@ -0,0 +1,72 @@
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose={{ VERBOSE}}
+debug={{ DEBUG }}
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=osapi_compute,metadata
+
+vif_plugging_is_fatal: false
+vif_plugging_timeout: 0
+
+auth_strategy = keystone
+
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+osapi_compute_listen={{ internal_ip }}
+metadata_listen={{ internal_ip }}
+
+my_ip = {{ internal_ip }}
+vnc_enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
+
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
+
+network_api_class = nova.network.neutronv2.api.API
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = nova.openstack.common.notifier.rpc_notifier
+notification_driver = ceilometer.compute.nova_notifier
+
+[database]
+# The SQLAlchemy connection string used to connect to the database
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = nova
+admin_password = {{ NOVA_PASS }}
+
+[glance]
+host = {{ HA_VIP }}
+
+[neutron]
+url = http://{{ HA_VIP }}:9696
+auth_strategy = keystone
+admin_tenant_name = service
+admin_username = neutron
+admin_password = {{ NEUTRON_PASS }}
+admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/compass/deploy/ansible/roles/repo/tasks/main.yml b/compass/deploy/ansible/roles/repo/tasks/main.yml
new file mode 100644
index 000000000..9476f80af
--- /dev/null
+++ b/compass/deploy/ansible/roles/repo/tasks/main.yml
@@ -0,0 +1,6 @@
+---
+- name: add juno cloudarchive
+ apt_repository: repo="{{ juno_cloud_archive }}" state=present
+
+- name: first update pkgs
+ apt: update_cache=yes
diff --git a/compass/deploy/ansible/roles/repo/templates/sources.list b/compass/deploy/ansible/roles/repo/templates/sources.list
new file mode 100644
index 000000000..8b062e749
--- /dev/null
+++ b/compass/deploy/ansible/roles/repo/templates/sources.list
@@ -0,0 +1 @@
+{{ LOCAL_REPO }}
diff --git a/compass/deploy/compass_vm.sh b/compass/deploy/compass_vm.sh
new file mode 100644
index 000000000..07649175e
--- /dev/null
+++ b/compass/deploy/compass_vm.sh
@@ -0,0 +1,103 @@
+compass_vm_dir=$WORK_DIR/vm/compass
+rsa_file=$compass_vm_dir/boot.rsa
+ssh_args="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i $rsa_file"
+function tear_down_compass() {
+ sudo virsh destroy compass > /dev/null 2>&1
+ sudo virsh undefine compass > /dev/null 2>&1
+
+ sudo umount $compass_vm_dir/old > /dev/null 2>&1
+ sudo umount $compass_vm_dir/new > /dev/null 2>&1
+
+ sudo rm -rf $compass_vm_dir
+
+ log_info "tear_down_compass success!!!"
+}
+
+function install_compass_core() {
+ local inventory_file=$compass_vm_dir/inventory.file
+ log_info "install_compass_core enter"
+ sed -i "s/mgmt_next_ip:.*/mgmt_next_ip: ${COMPASS_SERVER}/g" $WORK_DIR/installer/compass-install/install/group_vars/all
+ echo "compass_nodocker ansible_ssh_host=$MGMT_IP ansible_ssh_port=22" > $inventory_file
+ PYTHONUNBUFFERED=1 ANSIBLE_FORCE_COLOR=true ANSIBLE_HOST_KEY_CHECKING=false ANSIBLE_SSH_ARGS='-o UserKnownHostsFile=/dev/null -o ControlMaster=auto -o ControlPersist=60s' ansible-playbook -e pipeline=true --private-key=$rsa_file --user=root --connection=ssh --inventory-file=$inventory_file $WORK_DIR/installer/compass-install/install/compass_nodocker.yml
+ exit_status=$?
+ rm $inventory_file
+ log_info "install_compass_core exit"
+ if [[ $exit_status != 0 ]];then
+ /bin/false
+ fi
+}
+
+function wait_ok() {
+ log_info "wait_compass_ok enter"
+ retry=0
+ until timeout 1s ssh $ssh_args root@$MGMT_IP "exit" 2>/dev/null
+ do
+ log_progress "os install time used: $((retry*100/$1))%"
+ sleep 1
+ let retry+=1
+ if [[ $retry -ge $1 ]];then
+ log_error "os install time out"
+ tear_down_compass
+ exit 1
+ fi
+ done
+
+ log_warn "os install time used: 100%"
+ log_info "wait_compass_ok exit"
+}
+
+function launch_compass() {
+ local old_mnt=$compass_vm_dir/old
+ local new_mnt=$compass_vm_dir/new
+ local old_iso=$WORK_DIR/iso/centos.iso
+ local new_iso=$compass_vm_dir/centos.iso
+
+ log_info "launch_compass enter"
+ tear_down_compass
+
+ set -e
+ mkdir -p $compass_vm_dir $old_mnt
+ sudo mount -o loop $old_iso $old_mnt
+ cd $old_mnt;find .|cpio -pd $new_mnt;cd -
+
+ sudo umount $old_mnt
+
+ chmod 755 -R $new_mnt
+ sed -i -e "s/REPLACE_MGMT_IP/$MGMT_IP/g" -e "s/REPLACE_MGMT_NETMASK/$MGMT_MASK/g" -e "s/REPLACE_INSTALL_IP/$COMPASS_SERVER/g" -e "s/REPLACE_INSTALL_NETMASK/$INSTALL_MASK/g" -e "s/REPLACE_GW/$MGMT_GW/g" $new_mnt/isolinux/isolinux.cfg
+
+ sudo ssh-keygen -f $new_mnt/bootstrap/boot.rsa -t rsa -N ''
+ cp $new_mnt/bootstrap/boot.rsa $rsa_file
+
+ rm -rf $new_mnt/.rr_moved $new_mnt/rr_moved
+ sudo mkisofs -quiet -r -J -R -b isolinux/isolinux.bin -no-emul-boot -boot-load-size 4 -boot-info-table -hide-rr-moved -x "lost+found:" -o $new_iso $new_mnt
+
+ rm -rf $old_mnt $new_mnt
+
+ qemu-img create -f qcow2 $compass_vm_dir/disk.img 100G
+
+ # create vm xml
+ sed -e "s/REPLACE_MEM/$COMPASS_VIRT_MEM/g" \
+ -e "s/REPLACE_CPU/$COMPASS_VIRT_CPUS/g" \
+ -e "s#REPLACE_IMAGE#$compass_vm_dir/disk.img#g" \
+ -e "s#REPLACE_ISO#$compass_vm_dir/centos.iso#g" \
+ -e "s/REPLACE_NET_MGMT/mgmt/g" \
+ -e "s/REPLACE_BRIDGE_INSTALL/br_install/g" \
+ $COMPASS_DIR/deploy/template/vm/compass.xml \
+ > $WORK_DIR/vm/compass/libvirt.xml
+
+ sudo virsh define $compass_vm_dir/libvirt.xml
+ sudo virsh start compass
+
+ if ! wait_ok 300;then
+ log_error "install os timeout"
+ exit 1
+ fi
+
+ if ! install_compass_core;then
+ log_error "install compass core failed"
+ exit 1
+ fi
+
+ set +e
+ log_info "launch_compass exit"
+}
diff --git a/compass/deploy/conf/baremetal.conf b/compass/deploy/conf/baremetal.conf
new file mode 100644
index 000000000..317d56178
--- /dev/null
+++ b/compass/deploy/conf/baremetal.conf
@@ -0,0 +1,20 @@
+export VIRT_CPUS=4
+export HOST_MACS="'64:3e:8c:4c:6d:a3' '64:3e:8c:4c:6d:37' '64:3e:8c:4c:6c:d7' '64:3e:8c:4c:6b:7b' '64:3e:8c:4c:68:2b'"
+export VIRT_MEM=16384
+export VIRT_DISK=30G
+export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*'
+#export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*'
+export ADAPTER_NAME="openstack_juno"
+export ADAPTER_TARGET_SYSTEM_PATTERN="^openstack$"
+export ADAPTER_FLAVOR_PATTERN="HA-ansible-multinodes"
+export HOSTNAMES="host1,host2,host3,host4,host5"
+export HOST_ROLES="host1=controller,ha;host2=controller,ha;host3=controller,ha;host4=compute;host5=compute"
+export DEFAULT_ROLES=""
+export SWITCH_IPS="172.29.1.166"
+export SWITCH_CREDENTIAL="version=2c,community=public"
+export DEPLOYMENT_TIMEOUT="150"
+export POLL_SWITCHES_FLAG="nopoll_switches"
+export DASHBOARD_URL=""
+export REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+source ${REGTEST_DIR}/base.conf
+export VIP="10.1.0.222"
diff --git a/compass/deploy/conf/base.conf b/compass/deploy/conf/base.conf
index 8362b9a61..d8e8d5125 100644
--- a/compass/deploy/conf/base.conf
+++ b/compass/deploy/conf/base.conf
@@ -1,11 +1,28 @@
-export COMPASS_SERVER_URL="http://10.1.0.12/api"
+export ISO_URL=http://192.168.123.11:9999/xh/work/build/work/compass.iso
+export INSTALL_IP=${INSTALL_IP:-10.1.0.12}
+export INSTALL_MASK=${INSTALL_MASK:-255.255.255.0}
+export INSTALL_GW=${INSTALL_GW:-10.1.0.1}
+export INSTALL_IP_START=${INSTALL_IP_START:-10.1.0.1}
+export INSTALL_IP_END=${INSTALL_IP_END:-10.1.0.254}
+export MGMT_IP=${MGMT_IP:-192.168.200.2}
+export MGMT_MASK=${MAGMT_MASK:-255.255.252.0}
+export MGMT_GW=${MAGMT_GW:-192.168.200.1}
+export MGMT_IP_START=${MGMT_IP_START:-192.168.200.3}
+export MGMT_IP_END=${MGMT_IP_END:-192.168.200.254}
+export OM_NIC=${OM_NIC:-eth3}
+export OM_IP=${OM_IP:-192.168.123.11/22}
+export OM_GW=${OM_GW:-192.168.120.1}
+export COMPASS_VIRT_CPUS=4
+export COMPASS_VIRT_MEM=4096
+export COMPASS_SERVER=$INSTALL_IP
+export COMPASS_SERVER_URL="http://$COMPASS_SERVER/api"
export COMPASS_USER_EMAIL="admin@huawei.com"
export COMPASS_USER_PASSWORD="admin"
export CLUSTER_NAME="opnfv2"
export LANGUAGE="EN"
export TIMEZONE="America/Los_Angeles"
-export NTP_SERVER="10.1.0.12"
-export NAMESERVERS="10.1.0.12"
+export NTP_SERVER="$COMPASS_SERVER"
+export NAMESERVERS="$COMPASS_SERVER"
export DOMAIN="ods.com"
export PARTITIONS="/home=5%,/tmp=5%,/var=20%"
export SUBNETS="10.1.0.0/24,172.16.2.0/24,172.16.3.0/24,172.16.4.0/24"
@@ -18,6 +35,7 @@ export TENANT_INTERFACE=${TENANT_INTERFACE:-eth1}
export STORAGE_INTERFACE=${STORAGE_INTERFACE:-eth3}
export PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-eth2}
+
function next_ip {
ip_addr=$1
ip_base="$(echo $ip_addr | cut -d. -f'1 2 3')"
diff --git a/compass/deploy/conf/cluster.conf b/compass/deploy/conf/cluster.conf
new file mode 100644
index 000000000..4f43027eb
--- /dev/null
+++ b/compass/deploy/conf/cluster.conf
@@ -0,0 +1,20 @@
+export VIRT_NUMBER=5
+export VIRT_CPUS=4
+export VIRT_MEM=16384
+export VIRT_DISK=30G
+export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*'
+#export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*'
+export ADAPTER_NAME="openstack_juno"
+export ADAPTER_TARGET_SYSTEM_PATTERN="^openstack$"
+export ADAPTER_FLAVOR_PATTERN="HA-ansible-multinodes"
+export HOSTNAMES="host1,host2,host3,host4,host5"
+export HOST_ROLES="host1=controller,ha;host2=controller,ha;host3=controller,ha;host4=compute;host5=compute"
+export DEFAULT_ROLES=""
+export SWITCH_IPS="1.1.1.1"
+export SWITCH_CREDENTIAL="version=2c,community=public"
+export DEPLOYMENT_TIMEOUT="150"
+export POLL_SWITCHES_FLAG="nopoll_switches"
+export DASHBOARD_URL=""
+export REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+source ${REGTEST_DIR}/base.conf
+export VIP="10.1.0.222"
diff --git a/compass/deploy/conf/five.conf b/compass/deploy/conf/five.conf
index e63e514b2..f32411bcf 100644
--- a/compass/deploy/conf/five.conf
+++ b/compass/deploy/conf/five.conf
@@ -1,6 +1,6 @@
export VIRT_NUMBER=5
export VIRT_CPUS=4
-export VIRT_MEM=4096
+export VIRT_MEM=16384
export VIRT_DISK=30G
export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*'
#export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*'
@@ -12,7 +12,7 @@ export HOST_ROLES="host1=controller,network;host2=compute,storage;host3=compute,
export DEFAULT_ROLES=""
export SWITCH_IPS="1.1.1.1"
export SWITCH_CREDENTIAL="version=2c,community=public"
-export DEPLOYMENT_TIMEOUT="90"
+export DEPLOYMENT_TIMEOUT="150"
export POLL_SWITCHES_FLAG="nopoll_switches"
export DASHBOARD_URL=""
export REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
diff --git a/compass/deploy/deploy-vm.sh b/compass/deploy/deploy-vm.sh
index 18857cd82..41ef2098d 100644
--- a/compass/deploy/deploy-vm.sh
+++ b/compass/deploy/deploy-vm.sh
@@ -16,6 +16,12 @@ fi
cp bin/switch_virtualenv.py.template bin/switch_virtualenv.py
sed -i "s|\$PythonHome|$VIRTUAL_ENV|g" bin/switch_virtualenv.py
#source ../compass-install/ci/allinone.conf
+/usr/bin/expect ${SCRIPT_DIR}/../deploy/remote_excute.exp \
+ "ssh root@${COMPASS_SERVER} mkdir -p /opt/compass/bin/ansible_callbacks" vagrant
+
+/usr/bin/expect ${SCRIPT_DIR}/../deploy/remote_excute.exp \
+ "scp -r ${SCRIPT_DIR}/../deploy/status_callback.py root@${COMPASS_SERVER}:/opt/compass/bin/ansible_callbacks/status_callback.py" \
+ vagrant
bin/client.py --logfile= --loglevel=debug --logdir= --compass_server="${COMPASS_SERVER_URL}" \
--compass_user_email="${COMPASS_USER_EMAIL}" --compass_user_password="${COMPASS_USER_PASSWORD}" \
--cluster_name="${CLUSTER_NAME}" --language="${LANGUAGE}" --timezone="${TIMEZONE}" \
@@ -32,7 +38,8 @@ bin/client.py --logfile= --loglevel=debug --logdir= --compass_server="${COMPASS_
--network_mapping="${NETWORK_MAPPING}" --package_config_json_file="${PACKAGE_CONFIG_FILENAME}" \
--host_roles="${HOST_ROLES}" --default_roles="${DEFAULT_ROLES}" --switch_ips="${SWITCH_IPS}" \
--machines=${machines//\'} --switch_credential="${SWITCH_CREDENTIAL}" \
---deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}"
+--deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}" \
+--cluster_vip="${VIP}"
deploy_result=$?
tear_down_machines
cd ../compass-install
diff --git a/compass/deploy/deploy_host.sh b/compass/deploy/deploy_host.sh
new file mode 100644
index 000000000..d08a82183
--- /dev/null
+++ b/compass/deploy/deploy_host.sh
@@ -0,0 +1,40 @@
+function deploy_host(){
+ cd $WORK_DIR/installer/compass-core
+ source $WORK_DIR/venv/bin/activate
+ if pip --help | grep -q trusted; then
+ pip install -i http://pypi.douban.com/simple -e . --trusted-host pypi.douban.com
+ else
+ pip install -i http://pypi.douban.com/simple -e .
+ fi
+
+ sudo mkdir -p /var/log/compass
+ sudo chown -R 777 /var/log/compass
+
+ sudo mkdir -p /etc/compass
+ sudo cp -rf conf/setting /etc/compass/.
+
+ cp bin/switch_virtualenv.py.template bin/switch_virtualenv.py
+ sed -i "s|\$PythonHome|$VIRTUAL_ENV|g" bin/switch_virtualenv.py
+ ssh $ssh_args root@${COMPASS_SERVER} mkdir -p /opt/compass/bin/ansible_callbacks
+ scp $ssh_args -r ${COMPASS_DIR}/deploy/status_callback.py root@${COMPASS_SERVER}:/opt/compass/bin/ansible_callbacks/status_callback.py
+
+ (sleep 15;reboot_hosts ) &
+ bin/client.py --logfile= --loglevel=debug --logdir= --compass_server="${COMPASS_SERVER_URL}" \
+ --compass_user_email="${COMPASS_USER_EMAIL}" --compass_user_password="${COMPASS_USER_PASSWORD}" \
+ --cluster_name="${CLUSTER_NAME}" --language="${LANGUAGE}" --timezone="${TIMEZONE}" \
+ --hostnames="${HOSTNAMES}" --partitions="${PARTITIONS}" --subnets="${SUBNETS}" \
+ --adapter_os_pattern="${ADAPTER_OS_PATTERN}" --adapter_name="${ADAPTER_NAME}" \
+ --adapter_target_system_pattern="${ADAPTER_TARGET_SYSTEM_PATTERN}" \
+ --adapter_flavor_pattern="${ADAPTER_FLAVOR_PATTERN}" \
+ --http_proxy="${PROXY}" --https_proxy="${PROXY}" --no_proxy="${IGNORE_PROXY}" \
+ --ntp_server="${NTP_SERVER}" --dns_servers="${NAMESERVERS}" --domain="${DOMAIN}" \
+ --search_path="${SEARCH_PATH}" --default_gateway="${GATEWAY}" \
+ --server_credential="${SERVER_CREDENTIAL}" --local_repo_url="${LOCAL_REPO_URL}" \
+ --os_config_json_file="${OS_CONFIG_FILENAME}" --service_credentials="${SERVICE_CREDENTIALS}" \
+ --console_credentials="${CONSOLE_CREDENTIALS}" --host_networks="${HOST_NETWORKS}" \
+ --network_mapping="${NETWORK_MAPPING}" --package_config_json_file="${PACKAGE_CONFIG_FILENAME}" \
+ --host_roles="${HOST_ROLES}" --default_roles="${DEFAULT_ROLES}" --switch_ips="${SWITCH_IPS}" \
+ --machines=${machines//\'} --switch_credential="${SWITCH_CREDENTIAL}" \
+ --deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}" \
+ --cluster_vip="${VIP}"
+}
diff --git a/compass/deploy/func.sh b/compass/deploy/func.sh
index 29c2c23fe..49ea94780 100644..100755
--- a/compass/deploy/func.sh
+++ b/compass/deploy/func.sh
@@ -1,19 +1,22 @@
function tear_down_machines() {
- virtmachines=$(virsh list --name |grep pxe)
+ virtmachines=$(sudo virsh list --name |grep pxe)
for virtmachine in $virtmachines; do
echo "destroy $virtmachine"
- virsh destroy $virtmachine
+ sudo virsh destroy $virtmachine
if [[ "$?" != "0" ]]; then
echo "destroy instance $virtmachine failed"
exit 1
fi
done
- virtmachines=$(virsh list --all --name |grep pxe)
- for virtmachine in $virtmachines; do
- echo "undefine $virtmachine"
- virsh undefine $virtmachine
+
+ sudo virsh list --all|grep shut|awk '{print $2}'|xargs -n 1 sudo virsh undefine
+
+ vol_names=$(sudo virsh vol-list default |grep .img | awk '{print $1}')
+ for vol_name in $vol_names; do
+ echo "virsh vol-delete $vol_name"
+ sudo virsh vol-delete $vol_name --pool default
if [[ "$?" != "0" ]]; then
- echo "undefine instance $virtmachine failed"
+ echo "vol-delete $vol_name failed!"
exit 1
fi
done
diff --git a/compass/deploy/host_baremetal.sh b/compass/deploy/host_baremetal.sh
new file mode 100644
index 000000000..26238e04e
--- /dev/null
+++ b/compass/deploy/host_baremetal.sh
@@ -0,0 +1,9 @@
+function get_host_macs() {
+ local config_file=$WORK_DIR/installer/compass-install/install/group_vars/all
+ local machines=`echo $HOST_MACS|sed 's/ /,/g'`
+
+ echo "test: true" >> $config_file
+ echo "pxe_boot_macs: [${machines}]" >> $config_file
+
+ echo $machines
+}
diff --git a/compass/deploy/host_vm.sh b/compass/deploy/host_vm.sh
new file mode 100644
index 000000000..cf9a75742
--- /dev/null
+++ b/compass/deploy/host_vm.sh
@@ -0,0 +1,59 @@
+host_vm_dir=$WORK_DIR/vm
+function tear_down_machines() {
+ for i in host{0..4}
+ do
+ sudo virsh destroy $i 1>/dev/null 2>/dev/null
+ sudo virsh undefine $i 1>/dev/null 2>/dev/null
+ rm -rf $host_vm_dir/host$i
+ done
+}
+
+function reboot_hosts() {
+ log_warn "reboot_hosts do nothing"
+}
+
+function launch_host_vms() {
+ tear_down_machines
+ #function_bod
+ mac_array=`echo $machines|sed 's/,/ /g'`
+ log_info "bringing up pxe boot vms"
+ i=0
+ for mac in $mac_array; do
+ log_info "creating vm disk for instance host${i}"
+ vm_dir=$host_vm_dir/host$i
+ mkdir -p $vm_dir
+ sudo qemu-img create -f raw $vm_dir/disk.img ${VIRT_DISK}
+ # create vm xml
+ sed -e "s/REPLACE_MEM/$VIRT_MEM/g" \
+ -e "s/REPLACE_CPU/$VIRT_CPUS/g" \
+ -e "s/REPLACE_NAME/host$i/g" \
+ -e "s#REPLACE_IMAGE#$vm_dir/disk.img#g" \
+ -e "s/REPLACE_BOOT_MAC/$mac/g" \
+ -e "s/REPLACE_BRIDGE_MGMT/br_install/g" \
+ -e "s/REPLACE_BRIDGE_TENANT/br_install/g" \
+ -e "s/REPLACE_BRIDGE_PUBLIC/br_install/g" \
+ -e "s/REPLACE_BRIDGE_STORAGE/br_install/g" \
+ $COMPASS_DIR/deploy/template/vm/host.xml\
+ > $vm_dir/libvirt.xml
+
+ sudo virsh define $vm_dir/libvirt.xml
+ sudo virsh start host$i
+ let i=i+1
+ done
+}
+
+function get_host_macs() {
+ local config_file=$WORK_DIR/installer/compass-install/install/group_vars/all
+ local mac_generator=${COMPASS_DIR}/deploy/mac_generator.sh
+ local machines=
+
+ chmod +x $mac_generator
+ mac_array=`$mac_generator $VIRT_NUMBER`
+ machines=`echo $mac_array|sed 's/ /,/g'`
+
+ echo "test: true" >> $config_file
+ echo "pxe_boot_macs: [${machines}]" >> $config_file
+
+ echo $machines
+}
+
diff --git a/compass/deploy/network.sh b/compass/deploy/network.sh
new file mode 100755
index 000000000..c60607e8e
--- /dev/null
+++ b/compass/deploy/network.sh
@@ -0,0 +1,70 @@
+function destroy_nets() {
+ sudo virsh net-destroy mgmt > /dev/null 2>&1
+ sudo virsh net-undefine mgmt > /dev/null 2>&1
+
+ sudo virsh net-destroy install > /dev/null 2>&1
+ sudo virsh net-undefine install > /dev/null 2>&1
+ rm -rf $COMPASS_DIR/deploy/work/network/*.xml
+}
+
+function setup_om_bridge() {
+ local device=$1
+ local gw=$2
+ ip link set br_install down
+ ip addr flush $device
+ brctl delbr br_install
+
+ brctl addbr br_install
+ brctl addif br_install $device
+ ip link set br_install up
+
+ shift;shift
+ for ip in $*;do
+ ip addr add $ip dev br_install
+ done
+
+ route add default gw $gw
+}
+
+function setup_om_nat() {
+ # create install network
+ sed -e "s/REPLACE_BRIDGE/br_install/g" \
+ -e "s/REPLACE_NAME/install/g" \
+ -e "s/REPLACE_GATEWAY/$INSTALL_GW/g" \
+ -e "s/REPLACE_MASK/$INSTALL_MASK/g" \
+ -e "s/REPLACE_START/$INSTALL_IP_START/g" \
+ -e "s/REPLACE_END/$INSTALL_IP_END/g" \
+ $COMPASS_DIR/deploy/template/network/nat.xml \
+ > $WORK_DIR/network/install.xml
+
+ sudo virsh net-define $WORK_DIR/network/install.xml
+ sudo virsh net-start install
+}
+
+function create_nets() {
+ destroy_nets
+
+ # create mgmt network
+ sed -e "s/REPLACE_BRIDGE/br_mgmt/g" \
+ -e "s/REPLACE_NAME/mgmt/g" \
+ -e "s/REPLACE_GATEWAY/$MGMT_GW/g" \
+ -e "s/REPLACE_MASK/$MGMT_MASK/g" \
+ -e "s/REPLACE_START/$MGMT_IP_START/g" \
+ -e "s/REPLACE_END/$MGMT_IP_END/g" \
+ $COMPASS_DIR/deploy/template/network/nat.xml \
+ > $WORK_DIR/network/mgmt.xml
+
+ sudo virsh net-define $WORK_DIR/network/mgmt.xml
+ sudo virsh net-start mgmt
+
+ # create install network
+ if [[ ! -z $VIRT_NUMBER ]];then
+ setup_om_nat
+ else
+ mask=`echo $INSTALL_MASK | awk -F'.' '{print ($1*(2^24)+$2*(2^16)+$3*(2^8)+$4)}'`
+ mask_len=`echo "obase=2;${mask}"|bc|awk -F'0' '{print length($1)}'`
+ setup_om_bridge $OM_NIC $OM_GW $INSTALL_GW/$mask_len $OM_IP
+ fi
+
+}
+
diff --git a/compass/deploy/prepare.sh b/compass/deploy/prepare.sh
index 2086c5dc4..9ec15f8ba 100644
--- a/compass/deploy/prepare.sh
+++ b/compass/deploy/prepare.sh
@@ -1,35 +1,35 @@
-sudo apt-get update -y
-sudo apt-get install git python-pip python-dev -y
-vagrant --version
-if [[ $? != 0 ]]; then
- vagrant_pkg_url=https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.deb
- wget ${vagrant_pkg_url}
- sudo dpkg -i $(basename ${vagrant_pkg_url})
-else
- echo "vagrant is already installed"
-fi
-sudo apt-get install libxslt-dev libxml2-dev libvirt-dev build-essential qemu-utils qemu-kvm libvirt-bin virtinst libmysqld-dev -y
-sudo service libvirt-bin restart
+function prepare_env() {
+ export PYTHONPATH=/usr/lib/python2.7/dist-packages:/usr/local/lib/python2.7/dist-packages
+ sudo apt-get update -y
+ sudo apt-get install mkisofs bc
+ sudo apt-get install git python-pip python-dev -y
+ sudo apt-get install libxslt-dev libxml2-dev libvirt-dev build-essential qemu-utils qemu-kvm libvirt-bin virtinst libmysqld-dev -y
+ sudo pip install --upgrade pip
+ sudo pip install --upgrade ansible
+ sudo pip install --upgrade virtualenv
+ sudo service libvirt-bin restart
+
+ # prepare work dir
+ sudo rm -rf $WORK_DIR
+ mkdir -p $WORK_DIR
+ mkdir -p $WORK_DIR/installer
+ mkdir -p $WORK_DIR/vm
+ mkdir -p $WORK_DIR/network
+ mkdir -p $WORK_DIR/iso
+ mkdir -p $WORK_DIR/venv
-for plugin in vagrant-libvirt vagrant-mutate; do
- vagrant plugin list |grep $plugin
- if [[ $? != 0 ]]; then
- vagrant plugin install $plugin --plugin-source https://ruby.taobao.org
- else
- echo "$plugin plugin is already installed"
+ if [[ ! -f centos.iso ]];then
+ wget -O $WORK_DIR/iso/centos.iso $ISO_URL
fi
-done
-sudo pip install --upgrade ansible virtualenv
-#precise_box_vb_url=https://cloud-images.ubuntu.com/vagrant/precise/current/precise-server-cloudimg-amd64-vagrant-disk1.box
-#precise_box_vb_filename=$(basename ${precise_box_vb_url})
-centos65_box_vb_url=https://developer.nrel.gov/downloads/vagrant-boxes/CentOS-6.5-x86_64-v20140504.box
-centos65_box_vb_filename=$(basename ${centos65_box_vb_url})
-#wget ${precise_box_vb_url}
-vagrant box list |grep centos65
-if [[ $? != 0 ]]; then
- wget ${centos65_box_vb_url}
- mv ${centos65_box_vb_filename} centos65.box
- vagrant mutate centos65.box libvirt
-else
- echo "centos65 box already exists"
-fi
+
+ # copy compass
+ mkdir -p $WORK_DIR/mnt
+ sudo mount -o loop $WORK_DIR/iso/centos.iso $WORK_DIR/mnt
+ cp -rf $WORK_DIR/mnt/compass/compass-core $WORK_DIR/installer/
+ cp -rf $WORK_DIR/mnt/compass/compass-install $WORK_DIR/installer/
+ sudo umount $WORK_DIR/mnt
+ rm -rf $WORK_DIR/mnt
+
+ chmod 755 $WORK_DIR -R
+ virtualenv $WORK_DIR/venv
+}
diff --git a/compass/deploy/remote_excute.exp b/compass/deploy/remote_excute.exp
new file mode 100644
index 000000000..9dd112b31
--- /dev/null
+++ b/compass/deploy/remote_excute.exp
@@ -0,0 +1,23 @@
+#!/usr/bin/expect
+
+set command [lindex $argv 0]
+set passwd [lindex $argv 1]
+
+eval spawn "$command"
+set timeout 60
+
+expect {
+ -re ".*es.*o.*"
+ {
+ exp_send "yes\r"
+ exp_continue
+ }
+
+ -re ".*sword:" {
+ exp_send "$passwd\r"
+
+ }
+
+}
+
+interact
diff --git a/compass/deploy/status_callback.py b/compass/deploy/status_callback.py
new file mode 100644
index 000000000..861913222
--- /dev/null
+++ b/compass/deploy/status_callback.py
@@ -0,0 +1,174 @@
+# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+import httplib
+import json
+import sys
+import logging
+
+def task_error(host, data):
+ logging.info("task_error: host=%s,data=%s" % (host, data))
+
+ if type(data) == dict:
+ invocation = data.pop('invocation', {})
+
+ notify_host("localhost", host, "failed")
+
+class CallbackModule(object):
+ """
+ logs playbook results, per host, in /var/log/ansible/hosts
+ """
+
+ def on_any(self, *args, **kwargs):
+ pass
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ task_error(host, res)
+
+ def runner_on_ok(self, host, res):
+ pass
+
+ def runner_on_skipped(self, host, item=None):
+ pass
+
+ def runner_on_unreachable(self, host, res):
+ pass
+
+ def runner_on_no_hosts(self):
+ pass
+
+ def runner_on_async_poll(self, host, res, jid, clock):
+ pass
+
+ def runner_on_async_ok(self, host, res, jid):
+ pass
+
+ def runner_on_async_failed(self, host, res, jid):
+ task_error(host, res)
+
+ def playbook_on_start(self):
+ pass
+
+ def playbook_on_notify(self, host, handler):
+ pass
+
+ def playbook_on_no_hosts_matched(self):
+ pass
+
+ def playbook_on_no_hosts_remaining(self):
+ pass
+
+ def playbook_on_task_start(self, name, is_conditional):
+ pass
+
+ def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
+ pass
+
+ def playbook_on_setup(self):
+ pass
+
+ def playbook_on_import_for_host(self, host, imported_file):
+ pass
+
+ def playbook_on_not_import_for_host(self, host, missing_file):
+ pass
+
+ def playbook_on_play_start(self, name):
+ pass
+
+ def playbook_on_stats(self, stats):
+ logging.info("playbook_on_stats enter")
+ hosts = sorted(stats.processed.keys())
+ host_vars = self.playbook.inventory.get_variables(hosts[0])
+ cluster_name = host_vars['cluster_name']
+ failures = False
+ unreachable = False
+
+ for host in hosts:
+ summary = stats.summarize(host)
+
+ if summary['failures'] > 0:
+ failures = True
+ if summary['unreachable'] > 0:
+ unreachable = True
+
+ if failures or unreachable:
+ for host in hosts:
+ notify_host("localhost", host, "error")
+ return
+
+ for host in hosts:
+ clusterhost_name = host + "." + cluster_name
+ notify_host("localhost", clusterhost_name, "succ")
+
+
+def raise_for_status(resp):
+ if resp.status < 200 or resp.status > 300:
+ raise RuntimeError("%s, %s, %s" % (resp.status, resp.reason, resp.read()))
+
+def auth(conn):
+ credential = {}
+ credential['email'] = "admin@huawei.com"
+ credential['password'] = "admin"
+ url = "/api/users/token"
+ headers = {"Content-type": "application/json",
+ "Accept": "*/*"}
+ conn.request("POST", url, json.dumps(credential), headers)
+ resp = conn.getresponse()
+
+ raise_for_status(resp)
+ return json.loads(resp.read())["token"]
+
+def notify_host(compass_host, host, status):
+ if status == "succ":
+ body = {"ready": True}
+ url = "/api/clusterhosts/%s/state_internal" % host
+ elif status == "error":
+ body = {"state": "ERROR"}
+ host = host.strip("host")
+ url = "/api/clusterhosts/%s/state" % host
+ else:
+ logging.error("notify_host: host %s with status %s is not supported" \
+ % (host, status))
+ return
+
+ headers = {"Content-type": "application/json",
+ "Accept": "*/*"}
+
+ conn = httplib.HTTPConnection(compass_host, 80)
+ token = auth(conn)
+ headers["X-Auth-Token"] = token
+ logging.info("host=%s,url=%s,body=%s,headers=%s" % (compass_host,url,json.dumps(body),headers))
+ conn.request("POST", url, json.dumps(body), headers)
+ resp = conn.getresponse()
+ try:
+ raise_for_status(resp)
+ logging.info("notify host status success!!! status=%s, body=%s" % (resp.status, resp.read()))
+ except Exception as e:
+ logging.error("http request failed %s" % str(e))
+ raise
+ finally:
+ conn.close()
+
+if __name__ == "__main__":
+ if len(sys.argv) != 3:
+ logging.error("params: host, status is need")
+ sys.exit(1)
+
+ host = sys.argv[1]
+ status = sys.argv[2]
+ notify_host(host, status)
diff --git a/compass/deploy/template/network/bridge.xml b/compass/deploy/template/network/bridge.xml
new file mode 100644
index 000000000..6202cf1b8
--- /dev/null
+++ b/compass/deploy/template/network/bridge.xml
@@ -0,0 +1,5 @@
+<network ipv6='no'>
+ <name>REPLACE_NAME</name>
+ <forward mode='bridge'>
+ </forward>
+</network>
diff --git a/compass/deploy/template/network/nat.xml b/compass/deploy/template/network/nat.xml
new file mode 100644
index 000000000..90ce888dd
--- /dev/null
+++ b/compass/deploy/template/network/nat.xml
@@ -0,0 +1,10 @@
+<network ipv6='yes'>
+ <name>REPLACE_NAME</name>
+ <forward mode='nat'/>
+ <bridge name='REPLACE_BRIDGE'/>
+ <ip address='REPLACE_GATEWAY' netmask='REPLACE_MASK'>
+ <!--dhcp>
+ <range start='REPLACE_START' end='REPLACE_END'/>
+ </dhcp-->
+ </ip>
+</network>
diff --git a/compass/deploy/template/vm/compass.xml b/compass/deploy/template/vm/compass.xml
new file mode 100644
index 000000000..918a9f213
--- /dev/null
+++ b/compass/deploy/template/vm/compass.xml
@@ -0,0 +1,64 @@
+<domain type='kvm'>
+ <name>compass</name>
+ <memory unit='MiB'>REPLACE_MEM</memory>
+ <currentMemory unit='MiB'>REPLACE_MEM</currentMemory>
+ <vcpu placement='static'>REPLACE_CPU</vcpu>
+ <os>
+ <type arch='x86_64' machine='pc-i440fx-trusty'>hvm</type>
+ <boot dev='hd'/>
+ <boot dev='cdrom'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <cpu mode='host-model'>
+ <model fallback='allow'/>
+ <feature policy='optional' name='vmx'/>
+ <feature policy='optional' name='svm'/>
+ </cpu>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>destroy</on_crash>
+ <devices>
+ <emulator>/usr/bin/kvm-spice</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2'/>
+ <source file='REPLACE_IMAGE'/>
+ <target dev='vda' bus='ide'/>
+ </disk>
+ <disk type='file' device='cdrom'>
+ <driver name='qemu' type='raw'/>
+ <source file='REPLACE_ISO'/>
+ <target dev='hdc' bus='ide'/>
+ <readonly/>
+ </disk>
+ <controller type='pci' index='0' model='pci-root'/>
+ <interface type='network'>
+ <source network='REPLACE_NET_MGMT'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
+ </interface>
+ <interface type='bridge'>
+ <source bridge='REPLACE_BRIDGE_INSTALL'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+ </interface>
+ <serial type='pty'>
+ <target port='0'/>
+ </serial>
+ <console type='pty'>
+ <target type='serial' port='0'/>
+ </console>
+ <input type='mouse' bus='ps2'/>
+ <input type='keyboard' bus='ps2'/>
+ <graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0' keymap='en-us'>
+ <listen type='address' address='0.0.0.0'/>
+ </graphics>
+ <video>
+ <model type='cirrus' vram='9216' heads='1'/>
+ </video>
+ </devices>
+</domain>
diff --git a/compass/deploy/template/vm/host.xml b/compass/deploy/template/vm/host.xml
new file mode 100644
index 000000000..b399e6ffc
--- /dev/null
+++ b/compass/deploy/template/vm/host.xml
@@ -0,0 +1,67 @@
+<domain type='kvm'>
+ <name>REPLACE_NAME</name>
+ <memory unit='MiB'>REPLACE_MEM</memory>
+ <currentMemory unit='MiB'>REPLACE_MEM</currentMemory>
+ <vcpu placement='static'>REPLACE_CPU</vcpu>
+ <os>
+ <type arch='x86_64' machine='pc-i440fx-trusty'>hvm</type>
+ <boot dev='hd'/>
+ <boot dev='network'/>
+ <bios useserial='yes' rebootTimeout='0'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>/usr/bin/kvm-spice</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='raw'/>
+ <source file='REPLACE_IMAGE'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <interface type='bridge'>
+ <mac address=REPLACE_BOOT_MAC/>
+ <source bridge='REPLACE_BRIDGE_MGMT'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
+ </interface>
+ <interface type='bridge'>
+ <source bridge='REPLACE_BRIDGE_TENANT'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
+ </interface>
+ <interface type='bridge'>
+ <source bridge='REPLACE_BRIDGE_PUBLIC'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+ </interface>
+ <interface type='bridge'>
+ <source bridge='REPLACE_BRIDGE_STORAGE'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+ </interface>
+ <serial type='pty'>
+ <source path='/dev/pts/0'/>
+ </serial>
+ <console type='pty' tty='/dev/pts/0'>
+ <source path='/dev/pts/0'/>
+ <target type='serial' port='0'/>
+ </console>
+ <input type='mouse' bus='ps2'/>
+ <input type='keyboard' bus='ps2'/>
+ <graphics type='vnc' port='5901' autoport='yes' listen='0.0.0.0'>
+ <listen type='address' address='0.0.0.0'/>
+ </graphics>
+ <video>
+ <model type='cirrus' vram='9216' heads='1'/>
+ <alias name='video0'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+ </video>
+ </devices>
+</domain>
diff --git a/foreman/build/Makefile b/foreman/build/Makefile
index 8b87ce61e..2d2a2a7ad 100644
--- a/foreman/build/Makefile
+++ b/foreman/build/Makefile
@@ -26,7 +26,7 @@ export VBOXDNLD = http://download.virtualbox.org/virtualbox/rpm/el/7.1/x86_64/Vi
export VBOXRPM = $(shell pwd)/VirtualBox-4.3-4.3.26_98988_el7-1.x86_64.rpm
export VAGRANTDNLD = https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.rpm
export VAGRANTRPM = $(shell pwd)/vagrant_1.7.2_x86_64.rpm
-export GENESISRPM = $(shell pwd)/x86_64/opnfv-genesis-0.1-1.x86_64.rpm
+export GENESISRPM = $(shell pwd)/x86_64/opnfv-genesis-0.2-1.x86_64.rpm
# Note! Invoke with "make REVSTATE=RXXXX all" to make release build!
# Invoke with ICOCACHE=/full/path/to/iso if cached ISO is in non-standard location.
@@ -106,8 +106,8 @@ rpm-clean:
.PHONY: rpm
rpm:
- pushd ../../ && git archive --format=tar --prefix=opnfv-genesis-0.1/ HEAD | gzip > foreman/build/opnfv-genesis.tar.gz
- rpmbuild -ba opnfv-genesis.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)'
+ pushd ../../ && git archive --format=tar --prefix=opnfv-genesis-0.2/ HEAD | gzip > foreman/build/opnfv-genesis.tar.gz
+ rpmbuild -ba opnfv-genesis.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)'
@make rpm-clean
diff --git a/foreman/build/cache.mk b/foreman/build/cache.mk
index fdfd0034a..56b72731b 100644
--- a/foreman/build/cache.mk
+++ b/foreman/build/cache.mk
@@ -16,6 +16,8 @@ CACHECLEAN := $(addsuffix .clean,$(CACHEFILES) $(CACHEDIRS))
# BEGIN of variables to customize
#
CACHEFILES += .versions
+CACHEFILES += $(shell basename $(VAGRANTRPM))
+CACHEFILES += $(shell basename $(VBOXRPM))
CACHEFILES += $(shell basename $(ISOSRC))
#
# END of variables to customize
diff --git a/foreman/build/opnfv-genesis.spec b/foreman/build/opnfv-genesis.spec
index 674760fea..30692b4e1 100644
--- a/foreman/build/opnfv-genesis.spec
+++ b/foreman/build/opnfv-genesis.spec
@@ -1,5 +1,5 @@
Name: opnfv-genesis
-Version: 0.1
+Version: 0.2
Release: 1
Summary: The files from the OPNFV genesis repo
@@ -8,8 +8,8 @@ License: Apache 2.0
URL: https://gerrit.opnfv.org/gerrit/genesis.git
Source0: opnfv-genesis.tar.gz
-#BuildRequires:
-Requires: vagrant, VirtualBox-4.3
+#BuildRequires:
+Requires: vagrant, VirtualBox-4.3, net-tools
%description
The files from the OPNFV genesis repo
@@ -21,13 +21,16 @@ The files from the OPNFV genesis repo
%build
%install
-mkdir -p %{buildroot}/usr/bin/
-cp foreman/ci/deploy.sh %{buildroot}/usr/bin/
+mkdir -p %{buildroot}/root/genesis
+cp -r foreman/ %{buildroot}/root/genesis
+cp -r common/ %{buildroot}/root/genesis
%files
-/usr/bin/deploy.sh
+/root/genesis
%changelog
-* Fri Apr 24 2015 Dan Radez <dradez@redhatcom> - 0.1-1
+* Tue Sep 15 2015 Dan Radez <dradez@redhat.com> - 0.2-1
+- Updating the install files and cleaning up white space
+* Fri Apr 24 2015 Dan Radez <dradez@redhat.com> - 0.1-1
- Initial Packaging
diff --git a/foreman/ci/Vagrantfile b/foreman/ci/Vagrantfile
index 100e12db0..c7dfc0335 100644
--- a/foreman/ci/Vagrantfile
+++ b/foreman/ci/Vagrantfile
@@ -12,7 +12,7 @@ Vagrant.configure(2) do |config|
# Every Vagrant development environment requires a box. You can search for
# boxes at https://atlas.hashicorp.com/search.
- config.vm.box = "chef/centos-7.0"
+ config.vm.box = "opnfv/centos-7.0"
# Disable automatic box update checking. If you disable this, then
# boxes will only be checked for updates when the user runs
@@ -41,6 +41,9 @@ Vagrant.configure(2) do |config|
default_gw = ""
nat_flag = false
+ # Disable dhcp flag
+ disable_dhcp_flag = false
+
# Share an additional folder to the guest VM. The first argument is
# the path on the host to the actual folder. The second argument is
# the path on the guest to mount the folder. And the optional third
@@ -77,10 +80,11 @@ Vagrant.configure(2) do |config|
# sudo apt-get update
# sudo apt-get install -y apache2
# SHELL
-
+
config.ssh.username = 'root'
config.ssh.password = 'vagrant'
config.ssh.insert_key = 'true'
+ config.vm.provision :shell, path: "resize_partition.sh"
config.vm.provision "ansible" do |ansible|
ansible.playbook = "reload_playbook.yml"
end
@@ -90,4 +94,9 @@ Vagrant.configure(2) do |config|
config.vm.provision :shell, path: "nat_setup.sh"
end
config.vm.provision :shell, path: "bootstrap.sh"
+ if disable_dhcp_flag
+ config.vm.provision :shell, :inline => "systemctl stop dhcpd"
+ config.vm.provision :shell, :inline => "systemctl disable dhcpd"
+ end
+ config.vm.provision :shell, path: "resize_lvm.sh"
end
diff --git a/foreman/ci/bootstrap.sh b/foreman/ci/bootstrap.sh
index 4bc22ed26..c98f00e6c 100755
--- a/foreman/ci/bootstrap.sh
+++ b/foreman/ci/bootstrap.sh
@@ -25,8 +25,7 @@ green=`tput setaf 2`
yum install -y epel-release-7*
# Install other required packages
-# Major version is pinned to force some consistency for Arno
-if ! yum -y install python-pip-1* python-virtualenv-1* gcc-4* git-1* sshpass-1* ansible-1* python-requests-1*; then
+if ! yum -y install python-pip python-virtualenv gcc git sshpass ansible python-requests; then
printf '%s\n' 'bootstrap.sh: failed to install required packages' >&2
exit 1
fi
@@ -36,7 +35,7 @@ cd /opt
echo "Cloning khaleesi to /opt"
if [ ! -d khaleesi ]; then
- if ! git clone -b v1.0 https://github.com/trozet/khaleesi.git; then
+ if ! git clone -b opnfv https://github.com/trozet/khaleesi.git; then
printf '%s\n' 'bootstrap.sh: Unable to git clone khaleesi' >&2
exit 1
fi
diff --git a/foreman/ci/clean.sh b/foreman/ci/clean.sh
index f61ac9372..345864b27 100755
--- a/foreman/ci/clean.sh
+++ b/foreman/ci/clean.sh
@@ -3,22 +3,23 @@
#Clean script to uninstall provisioning server for Foreman/QuickStack
#author: Tim Rozet (trozet@redhat.com)
#
-#Uses Vagrant and VirtualBox
+#Removes Libvirt, KVM, Vagrant, VirtualBox
#
-#Destroys Vagrant VM running in /tmp/bgs_vagrant
+#Destroys Vagrant VMs running in $vm_dir/
#Shuts down all nodes found in Khaleesi settings
-#Removes hypervisor kernel modules (VirtualBox)
+#Removes hypervisor kernel modules (VirtualBox & KVM/Libvirt)
##VARS
reset=`tput sgr0`
blue=`tput setaf 4`
red=`tput setaf 1`
green=`tput setaf 2`
+vm_dir=/var/opt/opnfv
##END VARS
##FUNCTIONS
display_usage() {
- echo -e "\n\n${blue}This script is used to uninstall Foreman/QuickStack Installer and Clean OPNFV Target System${reset}\n\n"
+ echo -e "\n\n${blue}This script is used to uninstall and clean the OPNFV Target System${reset}\n\n"
echo -e "\nUsage:\n$0 [arguments] \n"
echo -e "\n -no_parse : No variable parsing into config. Flag. \n"
echo -e "\n -base_config : Full path of ksgen settings file to parse. Required. Will provide BMC info to shutdown hosts. Example: -base_config /opt/myinventory.yml \n"
@@ -31,7 +32,7 @@ if [[ ( $1 == "--help") || $1 == "-h" ]]; then
exit 0
fi
-echo -e "\n\n${blue}This script is used to uninstall Foreman/QuickStack Installer and Clean OPNFV Target System${reset}\n\n"
+echo -e "\n\n${blue}This script is used to uninstall and clean the OPNFV Target System${reset}\n\n"
echo "Use -h to display help"
sleep 2
@@ -50,54 +51,55 @@ do
esac
done
-
-# Install ipmitool
-# Major version is pinned to force some consistency for Arno
-if ! yum list installed | grep -i ipmitool; then
- if ! yum -y install ipmitool-1*; then
- echo "${red}Unable to install ipmitool!${reset}"
- exit 1
- fi
-else
- echo "${blue}Skipping ipmitool as it is already installed!${reset}"
-fi
-
-###find all the bmc IPs and number of nodes
-node_counter=0
-output=`grep bmc_ip $base_config | grep -Eo '[0-9]+.[0-9]+.[0-9]+.[0-9]+'`
-for line in ${output} ; do
- bmc_ip[$node_counter]=$line
- ((node_counter++))
-done
-
-max_nodes=$((node_counter-1))
-
-###find bmc_users per node
-node_counter=0
-output=`grep bmc_user $base_config | sed 's/\s*bmc_user:\s*//'`
-for line in ${output} ; do
- bmc_user[$node_counter]=$line
- ((node_counter++))
-done
-
-###find bmc_pass per node
-node_counter=0
-output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'`
-for line in ${output} ; do
- bmc_pass[$node_counter]=$line
- ((node_counter++))
-done
-
-for mynode in `seq 0 $max_nodes`; do
- echo "${blue}Node: ${bmc_ip[$mynode]} ${bmc_user[$mynode]} ${bmc_pass[$mynode]} ${reset}"
- if ipmitool -I lanplus -P ${bmc_pass[$mynode]} -U ${bmc_user[$mynode]} -H ${bmc_ip[$mynode]} chassis power off; then
- echo "${blue}Node: $mynode, ${bmc_ip[$mynode]} powered off!${reset}"
+if [ ! -z "$base_config" ]; then
+ # Install ipmitool
+ # Major version is pinned to force some consistency for Arno
+ if ! yum list installed | grep -i ipmitool; then
+ if ! yum -y install ipmitool-1*; then
+ echo "${red}Unable to install ipmitool!${reset}"
+ exit 1
+ fi
else
- echo "${red}Error: Unable to power off $mynode, ${bmc_ip[$mynode]} ${reset}"
- exit 1
+ echo "${blue}Skipping ipmitool as it is already installed!${reset}"
fi
-done
+ ###find all the bmc IPs and number of nodes
+ node_counter=0
+ output=`grep bmc_ip $base_config | grep -Eo '[0-9]+.[0-9]+.[0-9]+.[0-9]+'`
+ for line in ${output} ; do
+ bmc_ip[$node_counter]=$line
+ ((node_counter++))
+ done
+
+ max_nodes=$((node_counter-1))
+
+ ###find bmc_users per node
+ node_counter=0
+ output=`grep bmc_user $base_config | sed 's/\s*bmc_user:\s*//'`
+ for line in ${output} ; do
+ bmc_user[$node_counter]=$line
+ ((node_counter++))
+ done
+
+ ###find bmc_pass per node
+ node_counter=0
+ output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'`
+ for line in ${output} ; do
+ bmc_pass[$node_counter]=$line
+ ((node_counter++))
+ done
+ for mynode in `seq 0 $max_nodes`; do
+ echo "${blue}Node: ${bmc_ip[$mynode]} ${bmc_user[$mynode]} ${bmc_pass[$mynode]} ${reset}"
+ if ipmitool -I lanplus -P ${bmc_pass[$mynode]} -U ${bmc_user[$mynode]} -H ${bmc_ip[$mynode]} chassis power off; then
+ echo "${blue}Node: $mynode, ${bmc_ip[$mynode]} powered off!${reset}"
+ else
+ echo "${red}Error: Unable to power off $mynode, ${bmc_ip[$mynode]} ${reset}"
+ exit 1
+ fi
+ done
+else
+ echo "${blue}Skipping Baremetal node poweroff as base_config was not provided${reset}"
+fi
###check to see if vbox is installed
vboxpkg=`rpm -qa | grep VirtualBox`
if [ $? -eq 0 ]; then
@@ -106,39 +108,120 @@ else
skip_vagrant=1
fi
+###legacy VM location check
+###remove me later
+if [ -d /tmp/bgs_vagrant ]; then
+ cd /tmp/bgs_vagrant
+ vagrant destroy -f
+ rm -rf /tmp/bgs_vagrant
+fi
+
###destroy vagrant
if [ $skip_vagrant -eq 0 ]; then
- cd /tmp/bgs_vagrant
- if vagrant destroy -f; then
- echo "${blue}Successfully destroyed Foreman VM ${reset}"
+ if [ -d $vm_dir ]; then
+ ##all vm directories
+ for vm in $( ls $vm_dir ); do
+ cd $vm_dir/$vm
+ if vagrant destroy -f; then
+ echo "${blue}Successfully destroyed $vm Vagrant VM ${reset}"
+ else
+ echo "${red}Unable to destroy $vm Vagrant VM! Attempting to killall vagrant if process is hung ${reset}"
+ killall vagrant
+ echo "${blue}Checking if vagrant was already destroyed and no process is active...${reset}"
+ if ps axf | grep vagrant | grep -v 'grep'; then
+ echo "${red}Vagrant process still exists after kill...exiting ${reset}"
+ exit 1
+ else
+ echo "${blue}Vagrant process doesn't exist. Moving on... ${reset}"
+ fi
+ fi
+
+ ##Vagrant boxes appear as VboxHeadless processes
+ ##try to gracefully destroy the VBox VM if it still exists
+ if vboxmanage list runningvms | grep $vm; then
+ echo "${red} $vm VBoxHeadless process still exists...Removing${reset}"
+ vbox_id=$(vboxmanage list runningvms | grep $vm | awk '{print $1}' | sed 's/"//g')
+ vboxmanage controlvm $vbox_id poweroff
+ if vboxmanage unregistervm --delete $vbox_id; then
+ echo "${blue}$vm VM is successfully deleted! ${reset}"
+ else
+ echo "${red} Unable to delete VM $vm ...Exiting ${reset}"
+ exit 1
+ fi
+ else
+ echo "${blue}$vm VM is successfully deleted! ${reset}"
+ fi
+ done
else
- echo "${red}Unable to destroy Foreman VM ${reset}"
- echo "${blue}Checking if vagrant was already destroyed and no process is active...${reset}"
- if ps axf | grep vagrant; then
- echo "${red}Vagrant VM still exists...exiting ${reset}"
- exit 1
- else
- echo "${blue}Vagrant process doesn't exist. Moving on... ${reset}"
- fi
+ echo "${blue}${vm_dir} doesn't exist, no VMs in OPNFV directory to destroy! ${reset}"
fi
+ echo "${blue}Checking for any remaining virtual box processes...${reset}"
###kill virtualbox
- echo "${blue}Killing VirtualBox ${reset}"
- killall virtualbox
- killall VBoxHeadless
+ if ps axf | grep virtualbox | grep -v 'grep'; then
+ echo "${blue}virtualbox processes are still running. Killing any remaining VirtualBox processes...${reset}"
+ killall virtualbox
+ fi
+
+ ###kill any leftover VMs (brute force)
+ if ps axf | grep VBoxHeadless | grep -v 'grep'; then
+ echo "${blue}VBoxHeadless processes are still running. Killing any remaining VBoxHeadless processes...${reset}"
+ killall VBoxHeadless
+ fi
###remove virtualbox
- echo "${blue}Removing VirtualBox ${reset}"
+ echo "${blue}Removing VirtualBox... ${reset}"
yum -y remove $vboxpkg
else
- echo "${blue}Skipping Vagrant destroy + Vbox Removal as VirtualBox package is already removed ${reset}"
+ echo "${blue}Skipping Vagrant destroy + VBox Removal as VirtualBox package is already removed ${reset}"
fi
+###remove working vm directory
+echo "${blue}Removing working VM directory: $vm_dir ${reset}"
+rm -rf $vm_dir
+
+###check to see if libvirt is installed
+echo "${blue}Checking if libvirt/KVM is installed"
+if rpm -qa | grep -iE 'libvirt|kvm'; then
+ echo "${blue}Libvirt/KVM is installed${reset}"
+ echo "${blue}Checking for any QEMU/KVM VMs...${reset}"
+ vm_count=0
+ while read -r line; do ((vm_count++)); done < <(virsh list --all | sed 1,2d | head -n -1)
+ if [ $vm_count -gt 0 ]; then
+ echo "${blue}VMs Found: $vm_count${reset}"
+ vm_runnning=0
+ while read -r line; do ((vm_running++)); done < <(virsh list --all | sed 1,2d | head -n -1| grep -i running)
+ echo "${blue}Powering off $vm_running VM(s)${reset}"
+ while read -r vm; do
+ if ! virsh destroy $vm; then
+ echo "${red}WARNING: Unable to power off VM ${vm}${reset}"
+ else
+ echo "${blue}VM $vm powered off!${reset}"
+ fi
+ done < <(virsh list --all | sed 1,2d | head -n -1| grep -i running | sed 's/^[ \t]*//' | awk '{print $2}')
+ echo "${blue}Destroying libvirt VMs...${reset}"
+ while read -r vm; do
+ if ! virsh undefine --remove-all-storage $vm; then
+ echo "${red}ERROR: Unable to remove the VM ${vm}${reset}"
+ exit 1
+ else
+ echo "${blue}VM $vm removed!${reset}"
+ fi
+ done < <(virsh list --all | sed 1,2d | head -n -1| awk '{print $2}')
+ else
+ echo "${blue}No VMs found for removal"
+ fi
+ echo "${blue}Removing libvirt and kvm packages"
+ yum -y remove libvirt-*
+ yum -y remove *qemu*
+else
+ echo "${blue}libvirt/KVM is not installed${reset}"
+fi
###remove kernel modules
echo "${blue}Removing kernel modules ${reset}"
-for kernel_mod in vboxnetadp vboxnetflt vboxpci vboxdrv; do
+for kernel_mod in vboxnetadp vboxnetflt vboxpci vboxdrv kvm_intel kvm; do
if ! rmmod $kernel_mod; then
if rmmod $kernel_mod 2>&1 | grep -i 'not currently loaded'; then
echo "${blue} $kernel_mod is not currently loaded! ${reset}"
diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh
index 86f03a743..6bf8f12aa 100755
--- a/foreman/ci/deploy.sh
+++ b/foreman/ci/deploy.sh
@@ -25,6 +25,14 @@ red=`tput setaf 1`
green=`tput setaf 2`
declare -A interface_arr
+declare -A controllers_ip_arr
+declare -A admin_ip_arr
+declare -A public_ip_arr
+
+vagrant_box_dir=~/.vagrant.d/boxes/opnfv-VAGRANTSLASH-centos-7.0/1.0.0/virtualbox/
+vagrant_box_vmdk=box-disk1.vmdk
+vm_dir=/var/opt/opnfv
+script=`realpath $0`
##END VARS
##FUNCTIONS
@@ -35,6 +43,38 @@ display_usage() {
echo -e "\n -no_parse : No variable parsing into config. Flag. \n"
echo -e "\n -base_config : Full path of settings file to parse. Optional. Will provide a new base settings file rather than the default. Example: -base_config /opt/myinventory.yml \n"
echo -e "\n -virtual : Node virtualization instead of baremetal. Flag. \n"
+ echo -e "\n -enable_virtual_dhcp : Run dhcp server instead of using static IPs. Use this with -virtual only. \n"
+ echo -e "\n -static_ip_range : static IP range to define when using virtual and when dhcp is not being used (default), must at least a 20 IP block. Format: '192.168.1.1,192.168.1.20' \n"
+ echo -e "\n -ping_site : site to use to verify IP connectivity from the VM when -virtual is used. Format: -ping_site www.blah.com \n"
+ echo -e "\n -floating_ip_count : number of IP address from the public range to be used for floating IP. Default is 20.\n"
+ echo -e "\n -admin_nic : Baremetal NIC for the admin network. Required if other "nic" arguments are used. \
+Not applicable with -virtual. Example: -admin_nic em1"
+ echo -e "\n -private_nic : Baremetal NIC for the private network. Required if other "nic" arguments are used. \
+Not applicable with -virtual. Example: -private_nic em2"
+ echo -e "\n -public_nic : Baremetal NIC for the public network. Required if other "nic" arguments are used. \
+Can also be used with -virtual. Example: -public_nic em3"
+ echo -e "\n -storage_nic : Baremetal NIC for the storage network. Optional. Not applicable with -virtual. \
+Private NIC will be used for storage if not specified. Example: -storage_nic em4"
+ echo -e "\n -single_baremetal_nic : Baremetal NIC for the all in one network. Optional. Not applicable with -virtual. \
+Example: -single_baremetal_nic em1"
+}
+
+##verify vm dir exists
+##params: none
+function verify_vm_dir {
+ if [ -d "$vm_dir" ]; then
+ echo -e "\n\n${red}ERROR: VM Directory: $vm_dir already exists. Environment not clean. Please use clean.sh. Exiting${reset}\n\n"
+ exit 1
+ else
+ mkdir -p $vm_dir
+ fi
+
+ chmod 700 $vm_dir
+
+ if [ ! -d $vm_dir ]; then
+ echo -e "\n\n${red}ERROR: Unable to create VM Directory: $vm_dir Exiting${reset}\n\n"
+ exit -1
+ fi
}
##find ip of interface
@@ -51,6 +91,41 @@ function find_subnet {
printf "%d.%d.%d.%d\n" "$((i1 & m1))" "$((i2 & m2))" "$((i3 & m3))" "$((i4 & m4))"
}
+##verify subnet has at least n IPs
+##params: subnet mask, n IPs
+function verify_subnet_size {
+ IFS=. read -r i1 i2 i3 i4 <<< "$1"
+ num_ips_required=$2
+
+ ##this function assumes you would never need more than 254
+ ##we check here to make sure
+ if [ "$num_ips_required" -ge 254 ]; then
+ echo -e "\n\n${red}ERROR: allocating more than 254 IPs is unsupported...Exiting${reset}\n\n"
+ return 1
+ fi
+
+ ##we just return if 3rd octet is not 255
+ ##because we know the subnet is big enough
+ if [ "$i3" -ne 255 ]; then
+ return 0
+ elif [ $((254-$i4)) -ge "$num_ips_required" ]; then
+ return 0
+ else
+ echo -e "\n\n${red}ERROR: Subnet is too small${reset}\n\n"
+ return 1
+ fi
+}
+
+##finds last usable ip (broadcast minus 1) of a subnet from an IP and netmask
+## Warning: This function only works for IPv4 at the moment.
+##params: ip, netmask
+function find_last_ip_subnet {
+ IFS=. read -r i1 i2 i3 i4 <<< "$1"
+ IFS=. read -r m1 m2 m3 m4 <<< "$2"
+ IFS=. read -r s1 s2 s3 s4 <<< "$((i1 & m1)).$((i2 & m2)).$((i3 & m3)).$((i4 & m4))"
+ printf "%d.%d.%d.%d\n" "$((255 - $m1 + $s1))" "$((255 - $m2 + $s2))" "$((255 - $m3 + $s3))" "$((255 - $m4 + $s4 - 1))"
+}
+
##increments subnet by a value
##params: ip, value
##assumes low value
@@ -87,6 +162,19 @@ function next_ip {
echo $baseaddr.$lsv
}
+##subtracts a value from an IP address
+##params: last ip, ip_count
+##assumes ip_count is less than the last octect of the address
+subtract_ip() {
+ IFS=. read -r i1 i2 i3 i4 <<< "$1"
+ ip_count=$2
+ if [ $i4 -lt $ip_count ]; then
+ echo -e "\n\n${red}ERROR: Can't subtract $ip_count from IP address $1 Exiting${reset}\n\n"
+ exit 1
+ fi
+ printf "%d.%d.%d.%d\n" "$i1" "$i2" "$i3" "$((i4 - $ip_count ))"
+}
+
##removes the network interface config from Vagrantfile
##params: interface
##assumes you are in the directory of Vagrantfile
@@ -149,19 +237,21 @@ parse_yaml() {
}'
}
-##END FUNCTIONS
-
-if [[ ( $1 == "--help") || $1 == "-h" ]]; then
+##translates the command line paramaters into variables
+##params: $@ the entire command line is passed
+##usage: parse_cmd_line() "$@"
+parse_cmdline() {
+ if [[ ( $1 == "--help") || $1 == "-h" ]]; then
display_usage
exit 0
-fi
+ fi
-echo -e "\n\n${blue}This script is used to deploy Foreman/QuickStack Installer and Provision OPNFV Target System${reset}\n\n"
-echo "Use -h to display help"
-sleep 2
+ echo -e "\n\n${blue}This script is used to deploy Foreman/QuickStack Installer and Provision OPNFV Target System${reset}\n\n"
+ echo "Use -h to display help"
+ sleep 2
-while [ "`echo $1 | cut -c1`" = "-" ]
-do
+ while [ "`echo $1 | cut -c1`" = "-" ]
+ do
echo $1
case "$1" in
-base_config)
@@ -176,35 +266,152 @@ do
virtual="TRUE"
shift 1
;;
+ -enable_virtual_dhcp)
+ enable_virtual_dhcp="TRUE"
+ shift 1
+ ;;
+ -static_ip_range)
+ static_ip_range=$2
+ shift 2
+ ;;
+ -ping_site)
+ ping_site=$2
+ shift 2
+ ;;
+ -floating_ip_count)
+ floating_ip_count=$2
+ shift 2
+ ;;
+ -admin_nic)
+ admin_nic=$2
+ shift 2
+ nic_arg_flag=1
+ ;;
+ -private_nic)
+ private_nic=$2
+ shift 2
+ nic_arg_flag=1
+ ;;
+ -public_nic)
+ public_nic=$2
+ shift 2
+ nic_arg_flag=1
+ ;;
+ -storage_nic)
+ storage_nic=$2
+ shift 2
+ nic_arg_flag=1
+ ;;
+ -single_baremetal_nic)
+ single_baremetal_nic=$2
+ shift 2
+ ;;
*)
display_usage
exit 1
;;
-esac
-done
+ esac
+ done
+
+ if [ ! -z "$enable_virtual_dhcp" ] && [ ! -z "$static_ip_range" ]; then
+ echo -e "\n\n${red}ERROR: Incorrect Usage. Static IP range cannot be set when using DHCP!. Exiting${reset}\n\n"
+ exit 1
+ fi
+
+ if [ -z "$virtual" ]; then
+ if [ ! -z "$enable_virtual_dhcp" ]; then
+ echo -e "\n\n${red}ERROR: Incorrect Usage. enable_virtual_dhcp can only be set when using -virtual!. Exiting${reset}\n\n"
+ exit 1
+ elif [ ! -z "$static_ip_range" ]; then
+ echo -e "\n\n${red}ERROR: Incorrect Usage. static_ip_range can only be set when using -virtual!. Exiting${reset}\n\n"
+ exit 1
+ fi
+ fi
+
+ if [ -z "$floating_ip_count" ]; then
+ floating_ip_count=20
+ fi
+
+ ##Validate nic args
+ if [[ $nic_arg_flag -eq 1 ]]; then
+ if [ ! -z "$single_baremetal_nic" ]; then
+ echo "${red}Please do not specify other nic types along with single_baremetal_nic!${reset}"
+ exit 1
+ fi
+
+ if [ -z "$virtual" ]; then
+ for nic_type in admin_nic private_nic public_nic; do
+ eval "nic_value=\$$nic_type"
+ if [ -z "$nic_value" ]; then
+ echo "${red}$nic_type is empty or not defined. Required when other nic args are given!${reset}"
+ exit 1
+ fi
+ interface_ip=$(find_ip $nic_value)
+ if [ ! "$interface_ip" ]; then
+ echo "${red}$nic_value does not have an IP address! Exiting... ${reset}"
+ exit 1
+ fi
+ done
+ else
+ ##if virtual only public_nic should be specified
+ for nic_type in admin_nic private_nic storage_nic single_baremetal_nic; do
+ eval "nic_value=\$$nic_type"
+ if [ ! -z "$nic_value" ]; then
+ echo "${red}$nic_type is not a valid argument using -virtual. Please only specify public_nic!${reset}"
+ exit 1
+ fi
+ done
+
+ interface_ip=$(find_ip $public_nic)
+ if [ ! "$interface_ip" ]; then
+ echo "${red}Public NIC: $public_nic does not have an IP address! Exiting... ${reset}"
+ exit 1
+ fi
+ fi
+ elif [ ! -z "$single_baremetal_nic" ]; then
+ interface_ip=$(find_ip $single_baremetal_nic)
+ if [ ! "$interface_ip" ]; then
+ echo "${red}Single Baremetal NIC: $single_baremetal_nic does not have an IP address! Exiting... ${reset}"
+ exit 1
+ fi
+ fi
+}
##disable selinux
-/sbin/setenforce 0
-
-# Install EPEL repo for access to many other yum repos
-# Major version is pinned to force some consistency for Arno
-yum install -y epel-release-7*
-
-# Install other required packages
-# Major versions are pinned to force some consistency for Arno
-if ! yum install -y binutils-2* gcc-4* make-3* patch-2* libgomp-4* glibc-headers-2* glibc-devel-2* kernel-headers-3* kernel-devel-3* dkms-2* psmisc-22*; then
- printf '%s\n' 'deploy.sh: Unable to install depdency packages' >&2
- exit 1
-fi
-
-##install VirtualBox repo
-if cat /etc/*release | grep -i "Fedora release"; then
- vboxurl=http://download.virtualbox.org/virtualbox/rpm/fedora/\$releasever/\$basearch
-else
- vboxurl=http://download.virtualbox.org/virtualbox/rpm/el/\$releasever/\$basearch
-fi
-
-cat > /etc/yum.repos.d/virtualbox.repo << EOM
+##params: none
+##usage: disable_selinux()
+disable_selinux() {
+ /sbin/setenforce 0
+}
+
+##Install the EPEL repository and additional packages
+##params: none
+##usage: install_EPEL()
+install_EPEL() {
+ # Install EPEL repo for access to many other yum repos
+ # Major version is pinned to force some consistency for Arno
+ yum install -y epel-release-7*
+
+ # Install other required packages
+ # Major versions are pinned to force some consistency for Arno
+ if ! yum install -y binutils-2* gcc-4* make-3* patch-2* libgomp-4* glibc-headers-2* glibc-devel-2* kernel-headers-3* kernel-devel-3* dkms-2* psmisc-22*; then
+ printf '%s\n' 'deploy.sh: Unable to install depdency packages' >&2
+ exit 1
+ fi
+}
+
+##Download and install virtual box
+##params: none
+##usage: install_vbox()
+install_vbox() {
+ ##install VirtualBox repo
+ if cat /etc/*release | grep -i "Fedora release"; then
+ vboxurl=http://download.virtualbox.org/virtualbox/rpm/fedora/\$releasever/\$basearch
+ else
+ vboxurl=http://download.virtualbox.org/virtualbox/rpm/el/\$releasever/\$basearch
+ fi
+
+ cat > /etc/yum.repos.d/virtualbox.repo << EOM
[virtualbox]
name=Oracle Linux / RHEL / CentOS-\$releasever / \$basearch - VirtualBox
baseurl=$vboxurl
@@ -215,364 +422,374 @@ skip_if_unavailable = 1
keepcache = 0
EOM
-##install VirtualBox
-if ! yum list installed | grep -i virtualbox; then
- if ! yum -y install VirtualBox-4.3; then
- printf '%s\n' 'deploy.sh: Unable to install virtualbox package' >&2
- exit 1
+ ##install VirtualBox
+ if ! yum list installed | grep -i virtualbox; then
+ if ! yum -y install VirtualBox-4.3; then
+ printf '%s\n' 'deploy.sh: Unable to install virtualbox package' >&2
+ exit 1
+ fi
fi
-fi
-##install kmod-VirtualBox
-if ! lsmod | grep vboxdrv; then
- if ! sudo /etc/init.d/vboxdrv setup; then
- printf '%s\n' 'deploy.sh: Unable to install kernel module for virtualbox' >&2
- exit 1
+ ##install kmod-VirtualBox
+ if ! lsmod | grep vboxdrv; then
+ sudo /etc/init.d/vboxdrv setup
+ if ! lsmod | grep vboxdrv; then
+ printf '%s\n' 'deploy.sh: Unable to install kernel module for virtualbox' >&2
+ exit 1
+ fi
+ else
+ printf '%s\n' 'deploy.sh: Skipping kernel module for virtualbox. Already Installed'
fi
-else
- printf '%s\n' 'deploy.sh: Skipping kernel module for virtualbox. Already Installed'
-fi
+}
-##install Ansible
-if ! yum list installed | grep -i ansible; then
- if ! yum -y install ansible-1*; then
- printf '%s\n' 'deploy.sh: Unable to install Ansible package' >&2
- exit 1
+##install Ansible using yum
+##params: none
+##usage: install_ansible()
+install_ansible() {
+ if ! yum list installed | grep -i ansible; then
+ if ! yum -y install ansible-1*; then
+ printf '%s\n' 'deploy.sh: Unable to install Ansible package' >&2
+ exit 1
+ fi
fi
-fi
+}
-##install Vagrant
-if ! rpm -qa | grep vagrant; then
- if ! rpm -Uvh https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.rpm; then
- printf '%s\n' 'deploy.sh: Unable to install vagrant package' >&2
- exit 1
+##install Vagrant RPM directly with the bintray.com site
+##params: none
+##usage: install_vagrant()
+install_vagrant() {
+ if ! rpm -qa | grep vagrant; then
+ if ! rpm -Uvh https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.rpm; then
+ printf '%s\n' 'deploy.sh: Unable to install vagrant package' >&2
+ exit 1
+ fi
+ else
+ printf '%s\n' 'deploy.sh: Skipping Vagrant install as it is already installed.'
fi
-else
- printf '%s\n' 'deploy.sh: Skipping Vagrant install as it is already installed.'
-fi
-##add centos 7 box to vagrant
-if ! vagrant box list | grep chef/centos-7.0; then
- if ! vagrant box add chef/centos-7.0 --provider virtualbox; then
- printf '%s\n' 'deploy.sh: Unable to download centos7 box for Vagrant' >&2
- exit 1
+ ##add centos 7 box to vagrant
+ if ! vagrant box list | grep opnfv/centos-7.0; then
+ if ! vagrant box add opnfv/centos-7.0 --provider virtualbox; then
+ printf '%s\n' 'deploy.sh: Unable to download centos7 box for Vagrant' >&2
+ exit 1
+ fi
+ else
+ printf '%s\n' 'deploy.sh: Skipping Vagrant box add as centos-7.0 is already installed.'
fi
-else
- printf '%s\n' 'deploy.sh: Skipping Vagrant box add as centos-7.0 is already installed.'
-fi
-##install workaround for centos7
-if ! vagrant plugin list | grep vagrant-centos7_fix; then
- if ! vagrant plugin install vagrant-centos7_fix; then
- printf '%s\n' 'deploy.sh: Warning: unable to install vagrant centos7 workaround' >&2
+ ##install workaround for centos7
+ if ! vagrant plugin list | grep vagrant-centos7_fix; then
+ if ! vagrant plugin install vagrant-centos7_fix; then
+ printf '%s\n' 'deploy.sh: Warning: unable to install vagrant centos7 workaround' >&2
+ fi
+ else
+ printf '%s\n' 'deploy.sh: Skipping Vagrant plugin as centos7 workaround is already installed.'
fi
-else
- printf '%s\n' 'deploy.sh: Skipping Vagrant plugin as centos7 workaround is already installed.'
-fi
+}
-cd /tmp/
##remove bgs vagrant incase it wasn't cleaned up
-rm -rf /tmp/bgs_vagrant
-
-##clone bgs vagrant
-##will change this to be opnfv repo when commit is done
-if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git; then
- printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2
- exit 1
-fi
-
-cd bgs_vagrant
-
-echo "${blue}Detecting network configuration...${reset}"
-##detect host 1 or 3 interface configuration
-#output=`ip link show | grep -E "^[0-9]" | grep -Ev ": lo|tun|virbr|vboxnet" | awk '{print $2}' | sed 's/://'`
-output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'`
-
-if [ ! "$output" ]; then
- printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2
- exit 1
-fi
-
-##find number of interfaces with ip and substitute in VagrantFile
-if_counter=0
-for interface in ${output}; do
-
- if [ "$if_counter" -ge 4 ]; then
- break
- fi
- interface_ip=$(find_ip $interface)
- if [ ! "$interface_ip" ]; then
- continue
- fi
- new_ip=$(next_usable_ip $interface_ip)
- if [ ! "$new_ip" ]; then
- continue
- fi
- interface_arr[$interface]=$if_counter
- interface_ip_arr[$if_counter]=$new_ip
- subnet_mask=$(find_netmask $interface)
- if [ "$if_counter" -eq 1 ]; then
- private_subnet_mask=$subnet_mask
- private_short_subnet_mask=$(find_short_netmask $interface)
- fi
- if [ "$if_counter" -eq 2 ]; then
- public_subnet_mask=$subnet_mask
- public_short_subnet_mask=$(find_short_netmask $interface)
- fi
- if [ "$if_counter" -eq 3 ]; then
- storage_subnet_mask=$subnet_mask
- fi
- sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile
- ((if_counter++))
-done
-
-##now remove interface config in Vagrantfile for 1 node
-##if 1, 3, or 4 interfaces set deployment type
-##if 2 interfaces remove 2nd interface and set deployment type
-if [ "$if_counter" == 1 ]; then
- deployment_type="single_network"
- remove_vagrant_network eth_replace1
- remove_vagrant_network eth_replace2
- remove_vagrant_network eth_replace3
-elif [ "$if_counter" == 2 ]; then
- deployment_type="single_network"
- second_interface=`echo $output | awk '{print $2}'`
- remove_vagrant_network $second_interface
- remove_vagrant_network eth_replace2
-elif [ "$if_counter" == 3 ]; then
- deployment_type="three_network"
- remove_vagrant_network eth_replace3
-else
- deployment_type="multi_network"
-fi
-
-echo "${blue}Network detected: ${deployment_type}! ${reset}"
-
-if route | grep default; then
- echo "${blue}Default Gateway Detected ${reset}"
- host_default_gw=$(ip route | grep default | awk '{print $3}')
- echo "${blue}Default Gateway: $host_default_gw ${reset}"
- default_gw_interface=$(ip route get $host_default_gw | awk '{print $3}')
- case "${interface_arr[$default_gw_interface]}" in
- 0)
- echo "${blue}Default Gateway Detected on Admin Interface!${reset}"
- sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile
- node_default_gw=$host_default_gw
- ;;
- 1)
- echo "${red}Default Gateway Detected on Private Interface!${reset}"
- echo "${red}Private subnet should be private and not have Internet access!${reset}"
- exit 1
- ;;
- 2)
- echo "${blue}Default Gateway Detected on Public Interface!${reset}"
- sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile
- echo "${blue}Will setup NAT from Admin -> Public Network on VM!${reset}"
- sed -i 's/^.*nat_flag =.*$/ nat_flag = true/' Vagrantfile
- echo "${blue}Setting node gateway to be VM Admin IP${reset}"
- node_default_gw=${interface_ip_arr[0]}
- public_gateway=$default_gw
- ;;
- 3)
- echo "${red}Default Gateway Detected on Storage Interface!${reset}"
- echo "${red}Storage subnet should be private and not have Internet access!${reset}"
- exit 1
- ;;
- *)
- echo "${red}Unable to determine which interface default gateway is on..Exiting!${reset}"
- exit 1
- ;;
- esac
-else
- #assumes 24 bit mask
- defaultgw=`echo ${interface_ip_arr[0]} | cut -d. -f1-3`
- firstip=.1
- defaultgw=$defaultgw$firstip
- echo "${blue}Unable to find default gateway. Assuming it is $defaultgw ${reset}"
- sed -i 's/^.*default_gw =.*$/ default_gw = '\""$defaultgw"\"'/' Vagrantfile
- node_default_gw=$defaultgw
-fi
-
-if [ $base_config ]; then
- if ! cp -f $base_config opnfv_ksgen_settings.yml; then
- echo "{red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}"
- exit 1
- fi
-fi
-
-if [ $no_parse ]; then
-echo "${blue}Skipping parsing variables into settings file as no_parse flag is set${reset}"
-
-else
-
-echo "${blue}Gathering network parameters for Target System...this may take a few minutes${reset}"
-##Edit the ksgen settings appropriately
-##ksgen settings will be stored in /vagrant on the vagrant machine
-##if single node deployment all the variables will have the same ip
-##interface names will be enp0s3, enp0s8, enp0s9 in chef/centos7
-
-sed -i 's/^.*default_gw:.*$/default_gw:'" $node_default_gw"'/' opnfv_ksgen_settings.yml
+##params: none
+##usage: clean_tmp()
+clean_tmp() {
+ rm -rf $vm_dir/foreman_vm
+}
-##replace private interface parameter
-##private interface will be of hosts, so we need to know the provisioned host interface name
-##we add biosdevname=0, net.ifnames=0 to the kickstart to use regular interface naming convention on hosts
-##replace IP for parameters with next IP that will be given to controller
-if [ "$deployment_type" == "single_network" ]; then
- ##we also need to assign IP addresses to nodes
- ##for single node, foreman is managing the single network, so we can't reserve them
- ##not supporting single network anymore for now
- echo "{blue}Single Network type is unsupported right now. Please check your interface configuration. Exiting. ${reset}"
- exit 0
+##clone genesis and move to node vm dir
+##params: destination directory
+##usage: clone_bgs /tmp/myvm/
+clone_bgs() {
+ script_dir="`dirname "$script"`"
+ cp -fr $script_dir/ $1
+ cp -fr $script_dir/../../common/puppet-opnfv $1
+}
-elif [[ "$deployment_type" == "multi_network" || "$deployment_type" == "three_network" ]]; then
+##validates the network settings and update VagrantFile with network settings
+##params: none
+##usage: configure_network()
+configure_network() {
+ cd $vm_dir/foreman_vm
+
+ ##if nic_arg_flag is set, then we don't figure out
+ ##NICs dynamically
+ if [[ $nic_arg_flag -eq 1 ]]; then
+ echo "${blue}Static Network Interfaces Defined. Updating Vagrantfile...${reset}"
+ if [ $virtual ]; then
+ nic_list="$public_nic"
+ elif [ -z "$storage_nic" ]; then
+ echo "${blue}storage_nic not defined, will combine storage into private VLAN ${reset}"
+ nic_list="$admin_nic $private_nic $public_nic"
+ else
+ nic_list="$admin_nic $private_nic $public_nic $storage_nic"
+ fi
+ nic_array=( $nic_list )
+ output=$nic_list
+ elif [ ! -z "$single_baremetal_nic" ]; then
+ output=$single_baremetal_nic
+ else
+ echo "${blue}Detecting network configuration...${reset}"
+ ##detect host 1 or 3 interface configuration
+ #output=`ip link show | grep -E "^[0-9]" | grep -Ev ": lo|tun|virbr|vboxnet" | awk '{print $2}' | sed 's/://'`
+ #output=`/bin/ls -l /sys/class/net | tail -n +2 | grep -v virtual | cut -d " " -f10`
+ output=`/bin/ls -l /sys/class/net | tail -n +2 | grep -v virtual | awk {'print $9'}`
+ fi
- if [ "$deployment_type" == "three_network" ]; then
- sed -i 's/^.*network_type:.*$/network_type: three_network/' opnfv_ksgen_settings.yml
+ if [ ! "$output" ]; then
+ printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2
+ exit 1
fi
- sed -i 's/^.*deployment_type:.*$/ deployment_type: '"$deployment_type"'/' opnfv_ksgen_settings.yml
+ ##virtual we only find 1 interface
+ if [ $virtual ]; then
+ if [ ! -z "${nic_array[0]}" ]; then
+ echo "${blue}Public Interface specified: ${nic_array[0]}${reset}"
+ this_default_gw_interface=${nic_array[0]}
+ else
+ ##find interface with default gateway
+ this_default_gw=$(ip route | grep default | awk '{print $3}')
+ echo "${blue}Default Gateway: $this_default_gw ${reset}"
+ this_default_gw_interface=$(ip route get $this_default_gw | awk '{print $3}')
+ fi
- ##get ip addresses for private network on controllers to make dhcp entries
- ##required for controllers_ip_array global param
- next_private_ip=${interface_ip_arr[1]}
- type=_private
- for node in controller1 controller2 controller3; do
- next_private_ip=$(next_usable_ip $next_private_ip)
- if [ ! "$next_private_ip" ]; then
- printf '%s\n' 'deploy.sh: Unable to find next ip for private network for control nodes' >&2
- exit 1
+ ##find interface IP, make sure its valid
+ interface_ip=$(find_ip $this_default_gw_interface)
+ if [ ! "$interface_ip" ]; then
+ echo "${red}Interface ${this_default_gw_interface} does not have an IP: $interface_ip ! Exiting ${reset}"
+ exit 1
fi
- sed -i 's/'"$node$type"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml
- controller_ip_array=$controller_ip_array$next_private_ip,
- done
- ##replace global param for contollers_ip_array
- controller_ip_array=${controller_ip_array%?}
- sed -i 's/^.*controllers_ip_array:.*$/ controllers_ip_array: '"$controller_ip_array"'/' opnfv_ksgen_settings.yml
+ ##set variable info
+ if [ ! -z "$static_ip_range" ]; then
+ new_ip=$(echo $static_ip_range | cut -d , -f1)
+ subnet_mask=$(find_netmask $this_default_gw_interface)
+ host_subnet=$(find_subnet $interface_ip $subnet_mask)
+ ip_range_subnet=$(find_subnet $new_ip $subnet_mask)
+ if [ "$ip_range_subnet" != "$host_subnet" ]; then
+ echo "${red}static_ip_range: ${static_ip_range} is not in the same subnet as your default gateway interface: ${host_subnet}. Please use a correct range!${reset}"
+ exit 1
+ fi
+ else
+ new_ip=$(next_usable_ip $interface_ip)
+ if [ ! "$new_ip" ]; then
+ echo "${red} Cannot find next IP on interface ${this_default_gw_interface} new_ip: $new_ip ! Exiting ${reset}"
+ exit 1
+ fi
+ fi
+ interface=$this_default_gw_interface
+ public_interface=$interface
+ interface_arr[$interface]=2
+ interface_ip_arr[2]=$new_ip
+ subnet_mask=$(find_netmask $interface)
+ public_subnet_mask=$subnet_mask
+ public_short_subnet_mask=$(find_short_netmask $interface)
- ##now replace all the VIP variables. admin//private can be the same IP
- ##we have to use IP's here that won't be allocated to hosts at provisioning time
- ##therefore we increment the ip by 10 to make sure we have a safe buffer
- next_private_ip=$(increment_ip $next_private_ip 10)
+ if ! verify_subnet_size $public_subnet_mask 25; then
+ echo "${red} Not enough IPs in public subnet: $interface_ip_arr[2] ${public_subnet_mask}. Need at least 25 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
- grep -E '*private_vip|loadbalancer_vip|db_vip|amqp_vip|*admin_vip' opnfv_ksgen_settings.yml | while read -r line ; do
- sed -i 's/^.*'"$line"'.*$/ '"$line $next_private_ip"'/' opnfv_ksgen_settings.yml
- next_private_ip=$(next_usable_ip $next_private_ip)
- if [ ! "$next_private_ip" ]; then
- printf '%s\n' 'deploy.sh: Unable to find next ip for private network for vip replacement' >&2
- exit 1
+ ##set that interface to be public
+ sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile
+ if_counter=1
+ else
+ if [ ! -z $single_baremetal_nic ]; then
+ interface_ip=$(find_ip $single_baremetal_nic)
+ if [ ! "$interface_ip" ]; then
+ echo "${red}Unable to determine IP address of $single_baremetal_nic. Exiting...${reset}"
+ exit 1
+ fi
+ subnet_mask=$(find_netmask $single_baremetal_nic)
+ public_subnet_mask=$subnet_mask
+ if ! verify_subnet_size $public_subnet_mask 50; then
+ echo "${red} Not enough IPs in subnet: $interface_ip $subnet_mask. Need at least 50 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
+
+ new_ip=$(next_usable_ip $interface_ip)
+ if [ ! "$new_ip" ]; then
+ echo "${red}Unable to allocate new IP address: $interface_ip $subnet_mask Exiting...${reset}"
+ exit 1
+ fi
+
+ this_default_gw=$(ip route | grep default | awk '{print $3}')
+ echo "${blue}Default Gateway: $this_default_gw ${reset}"
+ this_default_gw_interface=$(ip route get $this_default_gw | awk '{print $3}')
+ if [ "$this_default_gw_interface" != "$single_baremetal_nic" ]; then
+ echo "${red}Error: Your default gateway interface: $this_default_gw_interface does not \
+match the baremetal nic you provided: ${single_baremetal_nic}. Exiting...${reset}"
+ exit 1
+ fi
+ sed -i 's/^.*eth_replace0.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$single_baremetal_nic"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile
+ interface_ip_arr[0]=$new_ip
+ interface_arr[$single_baremetal_nic]=0
+ admin_ip=$new_ip
+ admin_subnet_mask=$subnet_mask
+ public_short_subnet_mask=$(find_short_netmask $single_baremetal_nic)
+ if_counter=1
+ else
+ ##find number of interfaces with ip and substitute in VagrantFile
+ if_counter=0
+ for interface in ${output}; do
+
+ if [ "$if_counter" -ge 4 ]; then
+ break
+ fi
+ interface_ip=$(find_ip $interface)
+ if [ ! "$interface_ip" ]; then
+ continue
+ fi
+ new_ip=$(next_usable_ip $interface_ip)
+ if [ ! "$new_ip" ]; then
+ continue
+ fi
+ interface_arr[$interface]=$if_counter
+ interface_ip_arr[$if_counter]=$new_ip
+ subnet_mask=$(find_netmask $interface)
+ if [ "$if_counter" -eq 0 ]; then
+ admin_subnet_mask=$subnet_mask
+ admin_ip=$new_ip
+ if ! verify_subnet_size $admin_subnet_mask 5; then
+ echo "${red} Not enough IPs in admin subnet: ${interface_ip_arr[$if_counter]} ${admin_subnet_mask}. Need at least 5 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
+
+ elif [ "$if_counter" -eq 1 ]; then
+ private_subnet_mask=$subnet_mask
+ private_short_subnet_mask=$(find_short_netmask $interface)
+
+ if ! verify_subnet_size $private_subnet_mask 15; then
+ echo "${red} Not enough IPs in private subnet: ${interface_ip_arr[$if_counter]} ${private_subnet_mask}. Need at least 15 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
+ elif [ "$if_counter" -eq 2 ]; then
+ public_subnet_mask=$subnet_mask
+ public_short_subnet_mask=$(find_short_netmask $interface)
+
+ if ! verify_subnet_size $public_subnet_mask 25; then
+ echo "${red} Not enough IPs in public subnet: ${interface_ip_arr[$if_counter]} ${public_subnet_mask}. Need at least 25 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
+ elif [ "$if_counter" -eq 3 ]; then
+ storage_subnet_mask=$subnet_mask
+
+ if ! verify_subnet_size $storage_subnet_mask 10; then
+ echo "${red} Not enough IPs in storage subnet: ${interface_ip_arr[$if_counter]} ${storage_subnet_mask}. Need at least 10 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
+ else
+ echo "${red}ERROR: interface counter outside valid range of 0 to 3: $if_counter ! ${reset}"
+ exit 1
+ fi
+ sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile
+ ((if_counter++))
+ done
fi
- done
+ fi
- ##replace foreman site
- next_public_ip=${interface_ip_arr[2]}
- sed -i 's/^.*foreman_url:.*$/ foreman_url:'" https:\/\/$next_public_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml
- ##replace public vips
- next_public_ip=$(increment_ip $next_public_ip 10)
- grep -E '*public_vip' opnfv_ksgen_settings.yml | while read -r line ; do
- sed -i 's/^.*'"$line"'.*$/ '"$line $next_public_ip"'/' opnfv_ksgen_settings.yml
- next_public_ip=$(next_usable_ip $next_public_ip)
- if [ ! "$next_public_ip" ]; then
- printf '%s\n' 'deploy.sh: Unable to find next ip for public network for vip replcement' >&2
+ ##now remove interface config in Vagrantfile for 1 node
+ ##if 1, 3, or 4 interfaces set deployment type
+ ##if 2 interfaces remove 2nd interface and set deployment type
+ if [[ "$if_counter" == 1 || "$if_counter" == 2 ]]; then
+ if [ $virtual ]; then
+ deployment_type="single_network"
+ echo "${blue}Single network detected for Virtual deployment...converting to three_network with internal networks! ${reset}"
+ private_internal_ip=155.1.2.2
+ admin_internal_ip=156.1.2.2
+ private_subnet_mask=255.255.255.0
+ private_short_subnet_mask=/24
+ interface_ip_arr[1]=$private_internal_ip
+ interface_ip_arr[0]=$admin_internal_ip
+ admin_subnet_mask=255.255.255.0
+ admin_short_subnet_mask=/24
+ sed -i 's/^.*eth_replace1.*$/ config.vm.network "private_network", virtualbox__intnet: "my_private_network", ip: '\""$private_internal_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile
+ sed -i 's/^.*eth_replace0.*$/ config.vm.network "private_network", virtualbox__intnet: "my_admin_network", ip: '\""$admin_internal_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile
+ remove_vagrant_network eth_replace3
+ deployment_type=three_network
+ elif [[ "$if_counter" == 1 ]]; then
+ echo "${blue}Single network detected for Baremetal deployment! ${reset}"
+ remove_vagrant_network eth_replace1
+ remove_vagrant_network eth_replace2
+ remove_vagrant_network eth_replace3
+ deployment_type="single_network"
+ else
+ echo "${blue}Single network or 2 network detected for baremetal deployment. This is unsupported! Exiting. ${reset}"
exit 1
fi
- done
-
- ##replace public_network param
- public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
- sed -i 's/^.*public_network:.*$/ public_network:'" $public_subnet"'/' opnfv_ksgen_settings.yml
- ##replace private_network param
- private_subnet=$(find_subnet $next_private_ip $private_subnet_mask)
- sed -i 's/^.*private_network:.*$/ private_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml
- ##replace storage_network
- if [ "$deployment_type" == "three_network" ]; then
- sed -i 's/^.*storage_network:.*$/ storage_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml
+ elif [ "$if_counter" == 3 ]; then
+ deployment_type="three_network"
+ remove_vagrant_network eth_replace3
else
- next_storage_ip=${interface_ip_arr[3]}
- storage_subnet=$(find_subnet $next_storage_ip $storage_subnet_mask)
- sed -i 's/^.*storage_network:.*$/ storage_network:'" $storage_subnet"'/' opnfv_ksgen_settings.yml
- fi
-
- ##replace public_subnet param
- public_subnet=$public_subnet'\'$public_short_subnet_mask
- sed -i 's/^.*public_subnet:.*$/ public_subnet:'" $public_subnet"'/' opnfv_ksgen_settings.yml
- ##replace private_subnet param
- private_subnet=$private_subnet'\'$private_short_subnet_mask
- sed -i 's/^.*private_subnet:.*$/ private_subnet:'" $private_subnet"'/' opnfv_ksgen_settings.yml
-
- ##replace public_dns param to be foreman server
- sed -i 's/^.*public_dns:.*$/ public_dns: '${interface_ip_arr[2]}'/' opnfv_ksgen_settings.yml
-
- ##replace public_gateway
- if [ -z "$public_gateway" ]; then
- ##if unset then we assume its the first IP in the public subnet
- public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
- public_gateway=$(increment_subnet $public_subnet 1)
- fi
- sed -i 's/^.*public_gateway:.*$/ public_gateway:'" $public_gateway"'/' opnfv_ksgen_settings.yml
-
- ##we have to define an allocation range of the public subnet to give
- ##to neutron to use as floating IPs
- ##we should control this subnet, so this range should work .150-200
- ##but generally this is a bad idea and we are assuming at least a /24 subnet here
- public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
- public_allocation_start=$(increment_subnet $public_subnet 150)
- public_allocation_end=$(increment_subnet $public_subnet 200)
-
- sed -i 's/^.*public_allocation_start:.*$/ public_allocation_start:'" $public_allocation_start"'/' opnfv_ksgen_settings.yml
- sed -i 's/^.*public_allocation_end:.*$/ public_allocation_end:'" $public_allocation_end"'/' opnfv_ksgen_settings.yml
-
-else
- printf '%s\n' 'deploy.sh: Unknown network type: $deployment_type' >&2
- exit 1
-fi
-
-echo "${blue}Parameters Complete. Settings have been set for Foreman. ${reset}"
-
-fi
-
-if [ $virtual ]; then
- echo "${blue} Virtual flag detected, setting Khaleesi playbook to be opnfv-vm.yml ${reset}"
- sed -i 's/opnfv.yml/opnfv-vm.yml/' bootstrap.sh
-fi
-
-echo "${blue}Starting Vagrant! ${reset}"
-
-##stand up vagrant
-if ! vagrant up; then
- printf '%s\n' 'deploy.sh: Unable to start vagrant' >&2
- exit 1
-else
- echo "${blue}Foreman VM is up! ${reset}"
-fi
-
-if [ $virtual ]; then
-
-##Bring up VM nodes
-echo "${blue}Setting VMs up... ${reset}"
-nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'`
-##due to ODL Helium bug of OVS connecting to ODL too early, we need controllers to install first
-##this is fix kind of assumes more than I would like to, but for now it should be OK as we always have
-##3 static controllers
-compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "`
-controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "`
-nodes=${controller_nodes}${compute_nodes}
-
-for node in ${nodes}; do
- cd /tmp
-
- ##remove VM nodes incase it wasn't cleaned up
- rm -rf /tmp/$node
-
- ##clone bgs vagrant
- ##will change this to be opnfv repo when commit is done
- if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git $node; then
- printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2
- exit 1
+ deployment_type="multi_network"
+ fi
+
+ echo "${blue}Network detected: ${deployment_type}! ${reset}"
+
+ if [ $virtual ]; then
+ if [ -z "$enable_virtual_dhcp" ]; then
+ sed -i 's/^.*disable_dhcp_flag =.*$/ disable_dhcp_flag = true/' Vagrantfile
+ if [ $static_ip_range ]; then
+ ##verify static range is at least 20 IPs
+ static_ip_range_begin=$(echo $static_ip_range | cut -d , -f1)
+ static_ip_range_end=$(echo $static_ip_range | cut -d , -f2)
+ ##verify range is at least 20 ips
+ ##assumes less than 255 range pool
+ begin_octet=$(echo $static_ip_range_begin | cut -d . -f4)
+ end_octet=$(echo $static_ip_range_end | cut -d . -f4)
+ ip_count=$((end_octet-begin_octet+1))
+ if [ "$ip_count" -lt 20 ]; then
+ echo "${red}Static range is less than 20 ips: ${ip_count}, exiting ${reset}"
+ exit 1
+ else
+ echo "${blue}Static IP range is size $ip_count ${reset}"
+ fi
+ fi
+ fi
fi
- cd $node
+ if route | grep default; then
+ echo "${blue}Default Gateway Detected ${reset}"
+ host_default_gw=$(ip route | grep default | awk '{print $3}')
+ echo "${blue}Default Gateway: $host_default_gw ${reset}"
+ default_gw_interface=$(ip route get $host_default_gw | awk '{print $3}')
+ case "${interface_arr[$default_gw_interface]}" in
+ 0)
+ echo "${blue}Default Gateway Detected on Admin Interface!${reset}"
+ sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile
+ node_default_gw=$host_default_gw
+ ;;
+ 1)
+ echo "${red}Default Gateway Detected on Private Interface!${reset}"
+ echo "${red}Private subnet should be private and not have Internet access!${reset}"
+ exit 1
+ ;;
+ 2)
+ echo "${blue}Default Gateway Detected on Public Interface!${reset}"
+ sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile
+ echo "${blue}Will setup NAT from Admin -> Public Network on VM!${reset}"
+ sed -i 's/^.*nat_flag =.*$/ nat_flag = true/' Vagrantfile
+ echo "${blue}Setting node gateway to be VM Admin IP${reset}"
+ node_default_gw=${interface_ip_arr[0]}
+ public_gateway=$host_default_gw
+ ;;
+ 3)
+ echo "${red}Default Gateway Detected on Storage Interface!${reset}"
+ echo "${red}Storage subnet should be private and not have Internet access!${reset}"
+ exit 1
+ ;;
+ *)
+ echo "${red}Unable to determine which interface default gateway is on..Exiting!${reset}"
+ exit 1
+ ;;
+ esac
+ else
+ #assumes 24 bit mask
+ defaultgw=`echo ${interface_ip_arr[0]} | cut -d. -f1-3`
+ firstip=.1
+ defaultgw=$defaultgw$firstip
+ echo "${blue}Unable to find default gateway. Assuming it is $defaultgw ${reset}"
+ sed -i 's/^.*default_gw =.*$/ default_gw = '\""$defaultgw"\"'/' Vagrantfile
+ node_default_gw=$defaultgw
+ fi
if [ $base_config ]; then
if ! cp -f $base_config opnfv_ksgen_settings.yml; then
@@ -581,114 +798,671 @@ for node in ${nodes}; do
fi
fi
- ##parse yaml into variables
- eval $(parse_yaml opnfv_ksgen_settings.yml "config_")
- ##find node type
- node_type=config_nodes_${node}_type
- node_type=$(eval echo \$$node_type)
+ nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'`
+ controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "`
+ echo "${blue}Controller nodes found in settings: ${controller_nodes}${reset}"
+ my_controller_array=( $controller_nodes )
+ num_control_nodes=${#my_controller_array[@]}
+ if [ "$num_control_nodes" -ne 3 ]; then
+ if cat opnfv_ksgen_settings.yml | grep ha_flag | grep true; then
+ echo "${red}Error: You must define exactly 3 control nodes when HA flag is true!${reset}"
+ exit 1
+ fi
+ else
+ echo "${blue}Number of Controller nodes detected: ${num_control_nodes}${reset}"
+ fi
- ##find number of interfaces with ip and substitute in VagrantFile
- output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'`
+ if [ $no_parse ]; then
+ echo "${blue}Skipping parsing variables into settings file as no_parse flag is set${reset}"
+
+ else
+
+ echo "${blue}Gathering network parameters for Target System...this may take a few minutes${reset}"
+ ##Edit the ksgen settings appropriately
+ ##ksgen settings will be stored in /vagrant on the vagrant machine
+ ##if single node deployment all the variables will have the same ip
+ ##interface names will be enp0s3, enp0s8, enp0s9 in chef/centos7
+
+ sed -i 's/^.*default_gw:.*$/default_gw:'" $node_default_gw"'/' opnfv_ksgen_settings.yml
+
+ ##replace private interface parameter
+ ##private interface will be of hosts, so we need to know the provisioned host interface name
+ ##we add biosdevname=0, net.ifnames=0 to the kickstart to use regular interface naming convention on hosts
+ ##replace IP for parameters with next IP that will be given to controller
+
+ if [[ "$deployment_type" == "single_network" || "$deployment_type" == "multi_network" || "$deployment_type" == "three_network" ]]; then
+
+ if [ "$deployment_type" == "three_network" ]; then
+ sed -i 's/^.*network_type:.*$/network_type: three_network/' opnfv_ksgen_settings.yml
+ elif [ "$deployment_type" == "single_network" ]; then
+ sed -i 's/^.*network_type:.*$/network_type: single_network/' opnfv_ksgen_settings.yml
+ next_single_ip=${interface_ip_arr[0]}
+ foreman_ip=$next_single_ip
+ next_single_ip=$(next_usable_ip $next_single_ip)
+ fi
+
+ sed -i 's/^.*deployment_type:.*$/ deployment_type: '"$deployment_type"'/' opnfv_ksgen_settings.yml
+
+ ##get ip addresses for private network on controllers to make dhcp entries
+ ##required for controllers_ip_array global param
+ if [ "$deployment_type" == "single_network" ]; then
+ next_private_ip=$next_single_ip
+ sed -i 's/^.*no_dhcp:.*$/no_dhcp: true/' opnfv_ksgen_settings.yml
+ nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'`
+ compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "`
+ controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "`
+ nodes=${controller_nodes}${compute_nodes}
+ next_admin_ip=${interface_ip_arr[0]}
+ type1=_admin
+ type2=_private
+ control_count=0
+ for node in ${controller_nodes}; do
+ next_private_ip=$(next_usable_ip $next_private_ip)
+ if [ ! "$next_private_ip" ]; then
+ echo "${red} Unable to find an unused IP for $node ! ${reset}"
+ exit 1
+ else
+ sed -i 's/'"$node$type1"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml
+ sed -i 's/'"$node$type2"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml
+ controller_ip_array=$controller_ip_array$next_private_ip,
+ controllers_ip_arr[$control_count]=$next_private_ip
+ ((control_count++))
+ fi
+ done
+
+ for node in ${compute_nodes}; do
+ next_private_ip=$(next_usable_ip $next_private_ip)
+ if [ ! "$next_private_ip" ]; then
+ echo "${red} Unable to find an unused IP for $node ! ${reset}"
+ exit 1
+ else
+ sed -i 's/'"$node$type1"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml
+ fi
+ done
+
+ else
+ next_private_ip=${interface_ip_arr[1]}
+
+ type=_private
+ control_count=0
+ for node in controller1 controller2 controller3; do
+ next_private_ip=$(next_usable_ip $next_private_ip)
+ if [ ! "$next_private_ip" ]; then
+ printf '%s\n' 'deploy.sh: Unable to find next ip for private network for control nodes' >&2
+ exit 1
+ fi
+ sed -i 's/'"$node$type"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml
+ controller_ip_array=$controller_ip_array$next_private_ip,
+ controllers_ip_arr[$control_count]=$next_private_ip
+ ((control_count++))
+ done
+ fi
+
+ if [[ "$deployment_type" != "single_network" ]]; then
+ next_public_ip=${interface_ip_arr[2]}
+ foreman_ip=$next_public_ip
+ fi
+
+ ##if no dhcp, find all the Admin IPs for nodes in advance
+ if [ $virtual ]; then
+ if [ -z "$enable_virtual_dhcp" ]; then
+ sed -i 's/^.*no_dhcp:.*$/no_dhcp: true/' opnfv_ksgen_settings.yml
+ nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'`
+ compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "`
+ controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "`
+ nodes=${controller_nodes}${compute_nodes}
+ next_admin_ip=${interface_ip_arr[0]}
+ type=_admin
+ for node in ${nodes}; do
+ next_admin_ip=$(next_ip $next_admin_ip)
+ if [ ! "$next_admin_ip" ]; then
+ echo "${red} Unable to find an unused IP in admin_network for $node ! ${reset}"
+ exit 1
+ else
+ admin_ip_arr[$node]=$next_admin_ip
+ sed -i 's/'"$node$type"'/'"$next_admin_ip"'/g' opnfv_ksgen_settings.yml
+ fi
+ done
+
+ ##allocate node public IPs
+ for node in ${nodes}; do
+ next_public_ip=$(next_usable_ip $next_public_ip)
+ if [ ! "$next_public_ip" ]; then
+ echo "${red} Unable to find an unused IP in admin_network for $node ! ${reset}"
+ exit 1
+ else
+ public_ip_arr[$node]=$next_public_ip
+ fi
+ done
+ fi
+ fi
+ ##replace global param for controllers_ip_array
+ controller_ip_array=${controller_ip_array%?}
+ sed -i 's/^.*controllers_ip_array:.*$/ controllers_ip_array: '"$controller_ip_array"'/' opnfv_ksgen_settings.yml
+
+ ##now replace all the VIP variables. admin//private can be the same IP
+ ##we have to use IP's here that won't be allocated to hosts at provisioning time
+ ##therefore we increment the ip by 10 to make sure we have a safe buffer
+ next_private_ip=$(increment_ip $next_private_ip 10)
+
+ private_output=$(grep -E '*private_vip|loadbalancer_vip|db_vip|amqp_vip|*admin_vip' opnfv_ksgen_settings.yml)
+ if [ ! -z "$private_output" ]; then
+ while read -r line; do
+ sed -i 's/^.*'"$line"'.*$/ '"$line $next_private_ip"'/' opnfv_ksgen_settings.yml
+ next_private_ip=$(next_usable_ip $next_private_ip)
+ if [ ! "$next_private_ip" ]; then
+ printf '%s\n' 'deploy.sh: Unable to find next ip for private network for vip replacement' >&2
+ exit 1
+ fi
+ done <<< "$private_output"
+ fi
+
+ ##replace odl_control_ip (non-HA only)
+ odl_control_ip=${controllers_ip_arr[0]}
+ sed -i 's/^.*odl_control_ip:.*$/ odl_control_ip: '"$odl_control_ip"'/' opnfv_ksgen_settings.yml
+
+ ##replace controller_ip (non-HA only)
+ sed -i 's/^.*controller_ip:.*$/ controller_ip: '"$odl_control_ip"'/' opnfv_ksgen_settings.yml
+
+ ##replace foreman site
+ sed -i 's/^.*foreman_url:.*$/ foreman_url:'" https:\/\/$foreman_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml
+ ##replace public vips
+
+ ##if single_network deployment we continue next_public_ip from next_private_ip
+ if [[ "$deployment_type" == "single_network" ]]; then
+ next_public_ip=$(next_usable_ip $next_private_ip)
+ else
+ ##no need to do this if no dhcp
+ if [[ -z "$enable_virtual_dhcp" && ! -z "$virtual" ]]; then
+ next_public_ip=$(next_usable_ip $next_public_ip)
+ else
+ next_public_ip=$(increment_ip $next_public_ip 10)
+ fi
+ fi
+
+ public_output=$(grep -E '*public_vip' opnfv_ksgen_settings.yml)
+ if [ ! -z "$public_output" ]; then
+ while read -r line; do
+ if echo $line | grep horizon_public_vip; then
+ horizon_public_vip=$next_public_ip
+ fi
+ sed -i 's/^.*'"$line"'.*$/ '"$line $next_public_ip"'/' opnfv_ksgen_settings.yml
+ next_public_ip=$(next_usable_ip $next_public_ip)
+ if [ ! "$next_public_ip" ]; then
+ printf '%s\n' 'deploy.sh: Unable to find next ip for public network for vip replcement' >&2
+ exit 1
+ fi
+ done <<< "$public_output"
+ fi
+
+ ##replace admin_network param for bare metal deployments
+ if [[ -z "$virtual" && -z "$single_network" ]]; then
+ admin_subnet=$(find_subnet $admin_ip $admin_subnet_mask)
+ sed -i 's/^.*admin_network:.*$/ admin_network:'" $admin_subnet"'/' opnfv_ksgen_settings.yml
+ else
+ sed -i 's/^.*admin_network:.*$/ admin_network:'" \"false\""'/' opnfv_ksgen_settings.yml
+ fi
+ ##replace public_network param
+ public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
+ sed -i 's/^.*public_network:.*$/ public_network:'" $public_subnet"'/' opnfv_ksgen_settings.yml
+ if [ "$deployment_type" == "single_network" ]; then
+ sed -i 's/^.*private_network:.*$/ private_network:'" $public_subnet"'/' opnfv_ksgen_settings.yml
+ else
+ ##replace private_network param
+ private_subnet=$(find_subnet $next_private_ip $private_subnet_mask)
+ sed -i 's/^.*private_network:.*$/ private_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml
+ fi
+
+ ##replace storage_network
+ if [ "$deployment_type" == "single_network" ]; then
+ sed -i 's/^.*storage_network:.*$/ storage_network:'" $public_subnet"'/' opnfv_ksgen_settings.yml
+ elif [ "$deployment_type" == "three_network" ]; then
+ sed -i 's/^.*storage_network:.*$/ storage_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml
+ else
+ next_storage_ip=${interface_ip_arr[3]}
+ storage_subnet=$(find_subnet $next_storage_ip $storage_subnet_mask)
+ sed -i 's/^.*storage_network:.*$/ storage_network:'" $storage_subnet"'/' opnfv_ksgen_settings.yml
+ fi
+
+ ##replace public_subnet param
+ public_subnet=$public_subnet'\'$public_short_subnet_mask
+ sed -i 's/^.*public_subnet:.*$/ public_subnet:'" $public_subnet"'/' opnfv_ksgen_settings.yml
+ if [ "$deployment_type" == "single_network" ]; then
+ sed -i 's/^.*private_subnet:.*$/ private_subnet:'" $public_subnet"'/' opnfv_ksgen_settings.yml
+ else
+ ##replace private_subnet param
+ private_subnet=$private_subnet'\'$private_short_subnet_mask
+ sed -i 's/^.*private_subnet:.*$/ private_subnet:'" $private_subnet"'/' opnfv_ksgen_settings.yml
+ fi
+
+ ##replace public_dns param to be foreman server
+ if [ "$deployment_type" == "single_network" ]; then
+ sed -i 's/^.*public_dns:.*$/ public_dns: '${interface_ip_arr[0]}'/' opnfv_ksgen_settings.yml
+ else
+ sed -i 's/^.*public_dns:.*$/ public_dns: '${interface_ip_arr[2]}'/' opnfv_ksgen_settings.yml
+ fi
+
+ ##replace public_gateway
+ if [ -z "$public_gateway" ]; then
+ if [ "$deployment_type" == "single_network" ]; then
+ public_gateway=$node_default_gw
+ else
+ ##if unset then we assume its the first IP in the public subnet
+ public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
+ public_gateway=$(increment_subnet $public_subnet 1)
+ fi
+ fi
+ sed -i 's/^.*public_gateway:.*$/ public_gateway:'" $public_gateway"'/' opnfv_ksgen_settings.yml
+
+ ##we have to define an allocation range of the public subnet to give
+ ##to neutron to use as floating IPs
+ ##if static ip range, then we take the difference of the end range and current ip
+ ## to be the allocation pool
+ ##if not static ip, we will use the last 20 IP from the subnet
+ ## note that this is not a really good idea because the subnet must be at least a /27 for this to work...
+ public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
+ if [ ! -z "$static_ip_range" ]; then
+ begin_octet=$(echo $next_public_ip | cut -d . -f4)
+ end_octet=$(echo $static_ip_range_end | cut -d . -f4)
+ ip_diff=$((end_octet-begin_octet))
+ if [ $ip_diff -le 0 ]; then
+ echo "${red}ip range left for floating range is less than or equal to 0! $ipdiff ${reset}"
+ exit 1
+ else
+ public_allocation_start=$(next_ip $next_public_ip)
+ public_allocation_end=$static_ip_range_end
+ fi
+ else
+ last_ip_subnet=$(find_last_ip_subnet $next_public_ip $public_subnet_mask)
+ public_allocation_start=$(subtract_ip $last_ip_subnet $floating_ip_count )
+ public_allocation_end=${last_ip_subnet}
+ fi
+ echo "${blue}Neutron Floating IP range: $public_allocation_start to $public_allocation_end ${reset}"
+
+ sed -i 's/^.*public_allocation_start:.*$/ public_allocation_start:'" $public_allocation_start"'/' opnfv_ksgen_settings.yml
+ sed -i 's/^.*public_allocation_end:.*$/ public_allocation_end:'" $public_allocation_end"'/' opnfv_ksgen_settings.yml
+
+ else
+ printf '%s\n' 'deploy.sh: Unknown network type: $deployment_type' >&2
+ exit 1
+ fi
+
+ echo "${blue}Parameters Complete. Settings have been set for Foreman. ${reset}"
- if [ ! "$output" ]; then
- printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2
- exit 1
fi
+}
+##Configure bootstrap.sh to use the virtual Khaleesi playbook
+##params: none
+##usage: configure_virtual()
+configure_virtual() {
+ if [ $virtual ]; then
+ echo "${blue} Virtual flag detected, setting Khaleesi playbook to be opnfv-vm.yml ${reset}"
+ sed -i 's/opnfv.yml/opnfv-vm.yml/' bootstrap.sh
+ fi
+}
+
+##Starts Foreman VM with Vagrant
+##params: none
+##usage: start_vagrant()
+start_foreman() {
+ echo "${blue}Starting Vagrant! ${reset}"
- if_counter=0
- for interface in ${output}; do
+ ##stand up vagrant
+ if ! vagrant up; then
+ printf '%s\n' 'deploy.sh: Unable to complete Foreman VM install' >&2
+ exit 1
+ else
+ echo "${blue}Foreman VM is up! ${reset}"
+ fi
+}
- if [ "$if_counter" -ge 4 ]; then
- break
+##start the VM if this is a virtual installation
+##this function does nothing if baremetal servers are being used
+##params: none
+##usage: start_virtual_nodes()
+start_virtual_nodes() {
+ if [ $virtual ]; then
+
+ ##Bring up VM nodes
+ echo "${blue}Setting VMs up... ${reset}"
+ nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'`
+ ##due to ODL Helium bug of OVS connecting to ODL too early, we need controllers to install first
+ ##this is fix kind of assumes more than I would like to, but for now it should be OK as we always have
+ ##3 static controllers
+ compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "`
+ controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "`
+ nodes=${controller_nodes}${compute_nodes}
+ controller_count=0
+ compute_wait_completed=false
+
+ for node in ${nodes}; do
+
+ ##remove VM nodes incase it wasn't cleaned up
+ rm -rf $vm_dir/$node
+ rm -rf /tmp/genesis/
+
+ ##clone genesis and move into node folder
+ clone_bgs $vm_dir/$node
+
+ cd $vm_dir/$node
+
+ if [ $base_config ]; then
+ if ! cp -f $base_config opnfv_ksgen_settings.yml; then
+ echo "${red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}"
+ exit 1
+ fi
+ fi
+
+ ##parse yaml into variables
+ eval $(parse_yaml opnfv_ksgen_settings.yml "config_")
+ ##find node type
+ node_type=config_nodes_${node}_type
+ node_type=$(eval echo \$$node_type)
+
+ ##modify memory and cpu
+ node_memory=$(eval echo \${config_nodes_${node}_memory})
+ node_vcpus=$(eval echo \${config_nodes_${node}_cpus})
+ node_storage=$(eval echo \${config_nodes_${node}_disk})
+
+ sed -i 's/^.*vb.memory =.*$/ vb.memory = '"$node_memory"'/' Vagrantfile
+ sed -i 's/^.*vb.cpus =.*$/ vb.cpus = '"$node_vcpus"'/' Vagrantfile
+
+ if ! resize_vagrant_disk $node_storage; then
+ echo "${red}Error while resizing vagrant box to size $node_storage for $node! ${reset}"
+ exit 1
+ fi
+
+ ##trozet test make compute nodes wait 20 minutes
+ if [ "$compute_wait_completed" = false ] && [ "$node_type" != "controller" ]; then
+ echo "${blue}Waiting 20 minutes for Control nodes to install before continuing with Compute nodes..."
+ compute_wait_completed=true
+ sleep 1400
+ fi
+
+ ## Add Admin interface
+ mac_string=config_nodes_${node}_mac_address
+ mac_addr=$(eval echo \$$mac_string)
+ mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+ if [ $mac_addr == "" ]; then
+ echo "${red} Unable to find mac_address for $node! ${reset}"
+ exit 1
+ fi
+ this_admin_ip=${admin_ip_arr[$node]}
+ sed -i 's/^.*eth_replace0.*$/ config.vm.network "private_network", virtualbox__intnet: "my_admin_network", ip: '\""$this_admin_ip"\"', netmask: '\""$admin_subnet_mask"\"', :mac => '\""$mac_addr"\"'/' Vagrantfile
+
+ ## Add private interface
+ if [ "$node_type" == "controller" ]; then
+ mac_string=config_nodes_${node}_private_mac
+ mac_addr=$(eval echo \$$mac_string)
+ if [ $mac_addr == "" ]; then
+ echo "${red} Unable to find private_mac for $node! ${reset}"
+ exit 1
+ fi
+ else
+ ##generate random mac
+ mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
+ fi
+ mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+ if [ "$node_type" == "controller" ]; then
+ new_node_ip=${controllers_ip_arr[$controller_count]}
+ if [ ! "$new_node_ip" ]; then
+ echo "{red}ERROR: Empty node ip for controller $controller_count ${reset}"
+ exit 1
+ fi
+ ((controller_count++))
+ else
+ next_private_ip=$(next_ip $next_private_ip)
+ if [ ! "$next_private_ip" ]; then
+ echo "{red}ERROR: Could not find private ip for $node ${reset}"
+ exit 1
+ fi
+ new_node_ip=$next_private_ip
+ fi
+ sed -i 's/^.*eth_replace1.*$/ config.vm.network "private_network", virtualbox__intnet: "my_private_network", :mac => '\""$mac_addr"\"', ip: '\""$new_node_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile
+ ##replace host_ip in vm_nodes_provision with private ip
+ sed -i 's/^host_ip=REPLACE/host_ip='$new_node_ip'/' vm_nodes_provision.sh
+ ##replace ping site
+ if [ ! -z "$ping_site" ]; then
+ sed -i 's/www.google.com/'$ping_site'/' vm_nodes_provision.sh
+ fi
+
+ ##find public ip info and add public interface
+ mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
+ mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+ this_public_ip=${public_ip_arr[$node]}
+
+ if [ -z "$enable_virtual_dhcp" ]; then
+ sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", bridge: '\'"$public_interface"\'', :mac => '\""$mac_addr"\"', ip: '\""$this_public_ip"\"', netmask: '\""$public_subnet_mask"\"'/' Vagrantfile
+ else
+ sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", bridge: '\'"$public_interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile
+ fi
+ remove_vagrant_network eth_replace3
+
+ ##modify provisioning to do puppet install, config, and foreman check-in
+ ##substitute host_name and dns_server in the provisioning script
+ host_string=config_nodes_${node}_short_name
+ short_host_name=$(eval echo \$$host_string)
+ ##substitute domain_name
+ domain_name=$config_domain_name
+ sed -i 's/^domain_name=REPLACE/domain_name='$domain_name'/' vm_nodes_provision.sh
+ host_name=${short_host_name}.${domain_name}
+ sed -i 's/^host_name=REPLACE/host_name='$host_name'/' vm_nodes_provision.sh
+ ##dns server should be the foreman server
+ sed -i 's/^dns_server=REPLACE/dns_server='${interface_ip_arr[0]}'/' vm_nodes_provision.sh
+ ## remove bootstrap and NAT provisioning
+ sed -i '/nat_setup.sh/d' Vagrantfile
+ sed -i 's/bootstrap.sh/vm_nodes_provision.sh/' Vagrantfile
+ ## modify default_gw to be node_default_gw
+ sed -i 's/^.*default_gw =.*$/ default_gw = '\""$node_default_gw"\"'/' Vagrantfile
+ echo "${blue}Starting Vagrant Node $node! ${reset}"
+ ##stand up vagrant
+ if ! vagrant up; then
+ echo "${red} Unable to start $node ${reset}"
+ exit 1
+ else
+ echo "${blue} $node VM is up! ${reset}"
+ fi
+ done
+ echo "${blue} All VMs are UP! ${reset}"
+ echo "${blue} Waiting for puppet to complete on the nodes... ${reset}"
+ ##check puppet is complete
+ ##ssh into foreman server, run check to verify puppet is complete
+ pushd $vm_dir/foreman_vm
+ if ! vagrant ssh -c "/opt/khaleesi/run.sh --no-logs --use /vagrant/opnfv_ksgen_settings.yml /opt/khaleesi/playbooks/validate_opnfv-vm.yml"; then
+ echo "${red} Failed to validate puppet completion on nodes ${reset}"
+ exit 1
+ else
+ echo "{$blue} Puppet complete on all nodes! ${reset}"
fi
- interface_ip=$(find_ip $interface)
- if [ ! "$interface_ip" ]; then
- continue
+ popd
+ ##add routes back to nodes
+ for node in ${nodes}; do
+ pushd $vm_dir/$node
+ if ! vagrant ssh -c "route | grep default | grep $this_default_gw"; then
+ echo "${blue} Adding public route back to $node! ${reset}"
+ vagrant ssh -c "route add default gw $this_default_gw"
+ vagrant ssh -c "route delete default gw 10.0.2.2"
+ fi
+ popd
+ done
+ if [ ! -z "$horizon_public_vip" ]; then
+ echo "${blue} Virtual deployment SUCCESS!! Foreman URL: http://${foreman_ip}, Horizon URL: http://${horizon_public_vip} ${reset}"
+ else
+ ##Find public IP of controller
+ for node in ${nodes}; do
+ node_type=config_nodes_${node}_type
+ node_type=$(eval echo \$$node_type)
+ if [ "$node_type" == "controller" ]; then
+ pushd $vm_dir/$node
+ horizon_ip=`vagrant ssh -c "ifconfig enp0s10" | grep -Eo "inet [0-9\.]+" | awk {'print $2'}`
+ popd
+ break
+ fi
+ done
+ if [ -z "$horizon_ip" ]; then
+ echo "${red}Warn: Unable to determine horizon IP, please login to your controller node to find it${reset}"
+ fi
+ echo "${blue} Virtual deployment SUCCESS!! Foreman URL: http://${foreman_ip}, Horizon URL: http://${horizon_ip} ${reset}"
fi
- case "${if_counter}" in
- 0)
- mac_string=config_nodes_${node}_mac_address
- mac_addr=$(eval echo \$$mac_string)
- mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
- if [ $mac_addr == "" ]; then
- echo "${red} Unable to find mac_address for $node! ${reset}"
- exit 1
- fi
- ;;
- 1)
- if [ "$node_type" == "controller" ]; then
- mac_string=config_nodes_${node}_private_mac
- mac_addr=$(eval echo \$$mac_string)
- if [ $mac_addr == "" ]; then
- echo "${red} Unable to find private_mac for $node! ${reset}"
- exit 1
- fi
- else
- ##generate random mac
- mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
- fi
- mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
- ;;
- *)
- mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
- mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
- ;;
- esac
- sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", bridge: '\'"$interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile
- ((if_counter++))
- done
+ fi
+}
- ##now remove interface config in Vagrantfile for 1 node
- ##if 1, 3, or 4 interfaces set deployment type
- ##if 2 interfaces remove 2nd interface and set deployment type
- if [ "$if_counter" == 1 ]; then
- deployment_type="single_network"
- remove_vagrant_network eth_replace1
- remove_vagrant_network eth_replace2
- remove_vagrant_network eth_replace3
- elif [ "$if_counter" == 2 ]; then
- deployment_type="single_network"
- second_interface=`echo $output | awk '{print $2}'`
- remove_vagrant_network $second_interface
- remove_vagrant_network eth_replace2
- elif [ "$if_counter" == 3 ]; then
- deployment_type="three_network"
- remove_vagrant_network eth_replace3
+##check to make sure nodes are powered off
+##this function does nothing if virtual
+##params: none
+##usage: check_baremetal_nodes()
+check_baremetal_nodes() {
+ if [ $virtual ]; then
+ echo "${blue}Skipping Baremetal node power status check as deployment is virtual ${reset}"
else
- deployment_type="multi_network"
+ echo "${blue}Checking Baremetal nodes power state... ${reset}"
+ if [ ! -z "$base_config" ]; then
+ # Install ipmitool
+ # Major version is pinned to force some consistency for Arno
+ if ! yum list installed | grep -i ipmitool; then
+ echo "${blue}Installing ipmitool...${reset}"
+ if ! yum -y install ipmitool-1*; then
+ echo "${red}Failed to install ipmitool!${reset}"
+ exit 1
+ fi
+ fi
+
+ ###find all the bmc IPs and number of nodes
+ node_counter=0
+ output=`grep bmc_ip $base_config | grep -Eo '[0-9]+.[0-9]+.[0-9]+.[0-9]+'`
+ for line in ${output} ; do
+ bmc_ip[$node_counter]=$line
+ ((node_counter++))
+ done
+
+ max_nodes=$((node_counter-1))
+
+ ###find bmc_users per node
+ node_counter=0
+ output=`grep bmc_user $base_config | sed 's/\s*bmc_user:\s*//'`
+ for line in ${output} ; do
+ bmc_user[$node_counter]=$line
+ ((node_counter++))
+ done
+
+ ###find bmc_pass per node
+ node_counter=0
+ output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'`
+ for line in ${output} ; do
+ bmc_pass[$node_counter]=$line
+ ((node_counter++))
+ done
+
+ for mynode in `seq 0 $max_nodes`; do
+ echo "${blue}Node: ${bmc_ip[$mynode]} ${bmc_user[$mynode]} ${bmc_pass[$mynode]} ${reset}"
+ ipmi_output=`ipmitool -I lanplus -P ${bmc_pass[$mynode]} -U ${bmc_user[$mynode]} -H ${bmc_ip[$mynode]} chassis status \
+ | grep "System Power" | cut -d ':' -f2 | tr -d [:blank:]`
+ if [ "$ipmi_output" == "on" ]; then
+ echo "${red}Error: Node is powered on: ${bmc_ip[$mynode]} ${reset}"
+ echo "${red}Please run clean.sh before running deploy! ${reset}"
+ exit 1
+ elif [ "$ipmi_output" == "off" ]; then
+ echo "${blue}Node: ${bmc_ip[$mynode]} is powered off${reset}"
+ else
+ echo "${red}Warning: Unable to detect node power state: ${bmc_ip[$mynode]} ${reset}"
+ fi
+ done
+ else
+ echo "${red}base_config was not provided for a baremetal install! Exiting${reset}"
+ exit 1
+ fi
fi
+}
- ##modify provisioning to do puppet install, config, and foreman check-in
- ##substitute host_name and dns_server in the provisioning script
- host_string=config_nodes_${node}_hostname
- host_name=$(eval echo \$$host_string)
- sed -i 's/^host_name=REPLACE/host_name='$host_name'/' vm_nodes_provision.sh
- ##dns server should be the foreman server
- sed -i 's/^dns_server=REPLACE/dns_server='${interface_ip_arr[0]}'/' vm_nodes_provision.sh
+##resizes vagrant disk (cannot shrink)
+##params: size in GB
+##usage: resize_vagrant_disk 100
+resize_vagrant_disk() {
+ if [[ "$1" < 40 ]]; then
+ echo "${blue}Warn: Requested disk size cannot be less than 40, using 40 as new size${reset}"
+ new_size_gb=40
+ else
+ new_size_gb=$1
+ fi
- ## remove bootstrap and NAT provisioning
- sed -i '/nat_setup.sh/d' Vagrantfile
- sed -i 's/bootstrap.sh/vm_nodes_provision.sh/' Vagrantfile
+ if ! vagrant box list | grep opnfv; then
+ vagrant box remove -f opnfv/centos-7.0
+ if ! vagrant box add opnfv/centos-7.0 --provider virtualbox; then
+ echo "${red}Unable to reclone vagrant box! Exiting...${reset}"
+ exit 1
+ fi
+ fi
- ## modify default_gw to be node_default_gw
- sed -i 's/^.*default_gw =.*$/ default_gw = '\""$node_default_gw"\"'/' Vagrantfile
+ pushd $vagrant_box_dir
+
+ # Close medium to make sure we can modify it
+ vboxmanage closemedium disk $vagrant_box_vmdk
+
+ cur_size=$(vboxmanage showhdinfo $vagrant_box_vmdk | grep -i capacity | grep -Eo [0-9]+)
+ cur_size_gb=$((cur_size / 1024))
+
+ if [ "$cur_size_gb" -eq "$new_size_gb" ]; then
+ echo "${blue}Info: Disk size already ${cur_size_gb} ${reset}"
+ popd
+ return
+ elif [[ "$new_size_gb" < "$cur_size_gb" ]] ; then
+ echo "${blue}Info: Requested disk is less than ${cur_size_gb} ${reset}"
+ echo "${blue}Re-adding vagrant box${reset}"
+ if vagrant box list | grep opnfv; then
+ popd
+ vagrant box remove -f opnfv/centos-7.0
+ if ! vagrant box add opnfv/centos-7.0 --provider virtualbox; then
+ echo "${red}Unable to reclone vagrant box! Exiting...${reset}"
+ exit 1
+ fi
+ pushd $vagrant_box_dir
+ fi
+ fi
- ## modify VM memory to be 4gig
- sed -i 's/^.*vb.memory =.*$/ vb.memory = 4096/' Vagrantfile
+ new_size=$((new_size_gb * 1024))
+ if ! vboxmanage clonehd $vagrant_box_vmdk tmp-disk.vdi --format vdi; then
+ echo "${red}Error: Unable to clone ${vagrant_box_vmdk}${reset}"
+ popd
+ return 1
+ fi
- echo "${blue}Starting Vagrant Node $node! ${reset}"
+ if ! vboxmanage modifyhd tmp-disk.vdi --resize $new_size; then
+ echo "${red}Error: Unable modify tmp-disk.vdi to ${new_size}${reset}"
+ popd
+ return 1
+ fi
- ##stand up vagrant
- if ! vagrant up; then
- echo "${red} Unable to start $node ${reset}"
- exit 1
- else
- echo "${blue} $node VM is up! ${reset}"
+ if ! vboxmanage clonehd tmp-disk.vdi resized-disk.vmdk --format vmdk; then
+ echo "${red}Error: Unable clone tmp-disk.vdi to vmdk${reset}"
+ popd
+ return 1
fi
-done
+ vboxmanage closemedium disk tmp-disk.vdi --delete
+ rm -f tmp-disk.vdi $vagrant_box_vmdk
+ cp -f resized-disk.vmdk $vagrant_box_vmdk
+ vboxmanage closemedium disk resized-disk.vmdk --delete
+ popd
+}
+
+##END FUNCTIONS
- echo "${blue} All VMs are UP! ${reset}"
+main() {
+ parse_cmdline "$@"
+ disable_selinux
+ check_baremetal_nodes
+ install_EPEL
+ install_vbox
+ install_ansible
+ install_vagrant
+ clean_tmp
+ verify_vm_dir
+ clone_bgs $vm_dir/foreman_vm
+ configure_network
+ configure_virtual
+ start_foreman
+ start_virtual_nodes
+}
-fi
+main "$@"
diff --git a/foreman/ci/inventory/lf_pod2_ksgen_settings.yml b/foreman/ci/inventory/lf_pod2_ksgen_settings.yml
index 72935c9ad..2c146a07a 100644
--- a/foreman/ci/inventory/lf_pod2_ksgen_settings.yml
+++ b/foreman/ci/inventory/lf_pod2_ksgen_settings.yml
@@ -105,9 +105,9 @@ nodes:
type: compute
host_type: baremetal
hostgroup: Compute
- mac_address: "00:25:b5:a0:00:5e"
- bmc_ip: 172.30.8.74
- bmc_mac: "74:a2:e6:a4:14:9c"
+ mac_address: "00:25:B5:A0:00:2A"
+ bmc_ip: 172.30.8.75
+ bmc_mac: "a8:9d:21:c9:8b:56"
bmc_user: admin
bmc_pass: octopus
ansible_ssh_pass: "Op3nStack"
@@ -125,9 +125,9 @@ nodes:
type: compute
host_type: baremetal
hostgroup: Compute
- mac_address: "00:25:b5:a0:00:3e"
- bmc_ip: 172.30.8.73
- bmc_mac: "a8:9d:21:a0:15:9c"
+ mac_address: "00:25:B5:A0:00:3A"
+ bmc_ip: 172.30.8.65
+ bmc_mac: "a8:9d:21:c9:4d:26"
bmc_user: admin
bmc_pass: octopus
ansible_ssh_pass: "Op3nStack"
@@ -145,13 +145,13 @@ nodes:
type: controller
host_type: baremetal
hostgroup: Controller_Network_ODL
- mac_address: "00:25:b5:a0:00:af"
- bmc_ip: 172.30.8.66
- bmc_mac: "a8:9d:21:c9:8b:56"
+ mac_address: "00:25:B5:A0:00:4A"
+ bmc_ip: 172.30.8.74
+ bmc_mac: "a8:9d:21:c9:3a:92"
bmc_user: admin
bmc_pass: octopus
private_ip: controller1_private
- private_mac: "00:25:b5:b0:00:1f"
+ private_mac: "00:25:B5:A0:00:4B"
ansible_ssh_pass: "Op3nStack"
admin_password: "octopus"
groups:
@@ -167,13 +167,13 @@ nodes:
type: controller
host_type: baremetal
hostgroup: Controller_Network
- mac_address: "00:25:b5:a0:00:9e"
- bmc_ip: 172.30.8.75
- bmc_mac: "a8:9d:21:c9:4d:26"
+ mac_address: "00:25:B5:A0:00:5A"
+ bmc_ip: 172.30.8.73
+ bmc_mac: "74:a2:e6:a4:14:9c"
bmc_user: admin
bmc_pass: octopus
private_ip: controller2_private
- private_mac: "00:25:b5:b0:00:de"
+ private_mac: "00:25:B5:A0:00:5B"
ansible_ssh_pass: "Op3nStack"
admin_password: "octopus"
groups:
@@ -189,13 +189,13 @@ nodes:
type: controller
host_type: baremetal
hostgroup: Controller_Network
- mac_address: "00:25:b5:a0:00:7e"
- bmc_ip: 172.30.8.65
- bmc_mac: "a8:9d:21:c9:3a:92"
+ mac_address: "00:25:B5:A0:00:6A"
+ bmc_ip: 172.30.8.72
+ bmc_mac: "a8:9d:21:a0:15:9c"
bmc_user: admin
bmc_pass: octopus
private_ip: controller3_private
- private_mac: "00:25:b5:b0:00:be"
+ private_mac: "00:25:B5:A0:00:6B"
ansible_ssh_pass: "Op3nStack"
admin_password: "octopus"
groups:
diff --git a/foreman/ci/opnfv_ksgen_settings.yml b/foreman/ci/opnfv_ksgen_settings.yml
index 21840ddf8..28596163a 100644
--- a/foreman/ci/opnfv_ksgen_settings.yml
+++ b/foreman/ci/opnfv_ksgen_settings.yml
@@ -7,6 +7,7 @@ global_params:
controllers_hostnames_array: oscontroller1,oscontroller2,oscontroller3
controllers_ip_array:
amqp_vip:
+ admin_network:
private_subnet:
cinder_admin_vip:
cinder_private_vip:
@@ -44,6 +45,8 @@ global_params:
deployment_type:
network_type: multi_network
default_gw:
+no_dhcp: false
+domain_name: opnfv.com
foreman:
seed_values:
- { name: heat_cfn, oldvalue: true, newvalue: false }
@@ -99,8 +102,8 @@ workaround_vif_plugging: false
openstack_packstack_rpm: http://REPLACE_ME/brewroot/packages/openstack-puppet-modules/2013.2/9.el6ost/noarch/openstack-puppet-modules-2013.2-9.el6ost.noarch.rpm
nodes:
compute:
- name: oscompute11.opnfv.com
- hostname: oscompute11.opnfv.com
+ name: oscompute11.{{ domain_name }}
+ hostname: oscompute11.{{ domain_name }}
short_name: oscompute11
type: compute
host_type: baremetal
@@ -110,8 +113,12 @@ nodes:
bmc_mac: "10:23:45:67:88:AB"
bmc_user: root
bmc_pass: root
+ admin_ip: compute_admin
ansible_ssh_pass: "Op3nStack"
admin_password: ""
+ cpus: 2
+ memory: 2048
+ disk: 40
groups:
- compute
- foreman_nodes
@@ -119,8 +126,8 @@ nodes:
- rdo
- neutron
controller1:
- name: oscontroller1.opnfv.com
- hostname: oscontroller1.opnfv.com
+ name: oscontroller1.{{ domain_name }}
+ hostname: oscontroller1.{{ domain_name }}
short_name: oscontroller1
type: controller
host_type: baremetal
@@ -130,10 +137,14 @@ nodes:
bmc_mac: "10:23:45:67:88:AC"
bmc_user: root
bmc_pass: root
+ admin_ip: controller1_admin
private_ip: controller1_private
private_mac: "10:23:45:67:87:AC"
ansible_ssh_pass: "Op3nStack"
admin_password: "octopus"
+ cpus: 2
+ memory: 4096
+ disk: 40
groups:
- controller
- foreman_nodes
@@ -141,8 +152,8 @@ nodes:
- rdo
- neutron
controller2:
- name: oscontroller2.opnfv.com
- hostname: oscontroller2.opnfv.com
+ name: oscontroller2.{{ domain_name }}
+ hostname: oscontroller2.{{ domain_name }}
short_name: oscontroller2
type: controller
host_type: baremetal
@@ -152,10 +163,14 @@ nodes:
bmc_mac: "10:23:45:67:88:AD"
bmc_user: root
bmc_pass: root
+ admin_ip: controller2_admin
private_ip: controller2_private
private_mac: "10:23:45:67:87:AD"
ansible_ssh_pass: "Op3nStack"
admin_password: "octopus"
+ cpus: 2
+ memory: 4096
+ disk: 40
groups:
- controller
- foreman_nodes
@@ -163,8 +178,8 @@ nodes:
- rdo
- neutron
controller3:
- name: oscontroller3.opnfv.com
- hostname: oscontroller3.opnfv.com
+ name: oscontroller3.{{ domain_name }}
+ hostname: oscontroller3.{{ domain_name }}
short_name: oscontroller3
type: controller
host_type: baremetal
@@ -174,10 +189,14 @@ nodes:
bmc_mac: "10:23:45:67:88:AE"
bmc_user: root
bmc_pass: root
+ admin_ip: controller3_admin
private_ip: controller3_private
private_mac: "10:23:45:67:87:AE"
ansible_ssh_pass: "Op3nStack"
admin_password: "octopus"
+ cpus: 2
+ memory: 4096
+ disk: 40
groups:
- controller
- foreman_nodes
diff --git a/foreman/ci/opnfv_ksgen_settings_no_HA.yml b/foreman/ci/opnfv_ksgen_settings_no_HA.yml
new file mode 100644
index 000000000..306603826
--- /dev/null
+++ b/foreman/ci/opnfv_ksgen_settings_no_HA.yml
@@ -0,0 +1,272 @@
+global_params:
+ admin_email: opnfv@opnfv.com
+ ha_flag: "false"
+ odl_flag: "true"
+ odl_control_ip:
+ admin_network:
+ private_network:
+ storage_network:
+ public_network:
+ private_subnet:
+ deployment_type:
+ controller_ip:
+network_type: multi_network
+default_gw:
+no_dhcp: false
+domain_name: opnfv.com
+foreman:
+ seed_values:
+ - { name: heat_cfn, oldvalue: true, newvalue: false }
+workaround_puppet_version_lock: false
+opm_branch: master
+installer:
+ name: puppet
+ short_name: pupt
+ network:
+ auto_assign_floating_ip: false
+ variant:
+ short_name: m2vx
+ plugin:
+ name: neutron
+workaround_openstack_packstack_rpm: false
+tempest:
+ repo:
+ Fedora:
+ '19': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-19/
+ '20': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-20/
+ RedHat:
+ '7.0': https://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7/
+ use_virtual_env: false
+ public_allocation_end: 10.2.84.71
+ skip:
+ files: null
+ tests: null
+ public_allocation_start: 10.2.84.51
+ physnet: physnet1
+ use_custom_repo: false
+ public_subnet_cidr: 10.2.84.0/24
+ public_subnet_gateway: 10.2.84.1
+ additional_default_settings:
+ - section: compute
+ option: flavor_ref
+ value: 1
+ cirros_image_file: cirros-0.3.1-x86_64-disk.img
+ setup_method: tempest/rpm
+ test_name: all
+ rdo:
+ version: juno
+ rpm: http://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+ rpm:
+ version: 20141201
+ dir: ~{{ nodes.tempest.remote_user }}/tempest-dir
+tmp:
+ node_prefix: '{{ node.prefix | reject("none") | join("-") }}-'
+ anchors:
+ - https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+ - http://repos.fedorapeople.org/repos/openstack/openstack-juno/
+opm_repo: https://github.com/redhat-openstack/openstack-puppet-modules.git
+workaround_vif_plugging: false
+openstack_packstack_rpm: http://REPLACE_ME/brewroot/packages/openstack-puppet-modules/2013.2/9.el6ost/noarch/openstack-puppet-modules-2013.2-9.el6ost.noarch.rpm
+nodes:
+ compute:
+ name: oscompute11.{{ domain_name }}
+ hostname: oscompute11.{{ domain_name }}
+ short_name: oscompute11
+ type: compute
+ host_type: baremetal
+ hostgroup: Compute
+ mac_address: "10:23:45:67:89:AB"
+ bmc_ip: 10.4.17.2
+ bmc_mac: "10:23:45:67:88:AB"
+ bmc_user: root
+ bmc_pass: root
+ admin_ip: compute_admin
+ ansible_ssh_pass: "Op3nStack"
+ admin_password: ""
+ cpus: 2
+ memory: 2048
+ disk: 40
+ groups:
+ - compute
+ - foreman_nodes
+ - puppet
+ - rdo
+ - neutron
+ controller1:
+ name: oscontroller1.{{ domain_name }}
+ hostname: oscontroller1.{{ domain_name }}
+ short_name: oscontroller1
+ type: controller
+ host_type: baremetal
+ hostgroup: Controller_Network_ODL
+ mac_address: "10:23:45:67:89:AC"
+ bmc_ip: 10.4.17.3
+ bmc_mac: "10:23:45:67:88:AC"
+ bmc_user: root
+ bmc_pass: root
+ private_ip: controller1_private
+ admin_ip: controller1_admin
+ private_mac: "10:23:45:67:87:AC"
+ ansible_ssh_pass: "Op3nStack"
+ admin_password: "octopus"
+ cpus: 2
+ memory: 4096
+ disk: 40
+ groups:
+ - controller
+ - foreman_nodes
+ - puppet
+ - rdo
+ - neutron
+workaround_mysql_centos7: true
+distro:
+ name: centos
+ centos:
+ '7.0':
+ repos: []
+ short_name: c
+ short_version: 70
+ version: '7.0'
+ rhel:
+ '7.0':
+ kickstart_url: http://REPLACE_ME/released/RHEL-7/7.0/Server/x86_64/os/
+ repos:
+ - section: rhel7-server-rpms
+ name: Packages for RHEL 7 - $basearch
+ baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0/x86_64/
+ gpgcheck: 0
+ - section: rhel-7-server-update-rpms
+ name: Update Packages for Enterprise Linux 7 - $basearch
+ baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0-z/x86_64/
+ gpgcheck: 0
+ - section: rhel-7-server-optional-rpms
+ name: Optional Packages for Enterprise Linux 7 - $basearch
+ baseurl: http://REPLACE_ME/released/RHEL-7/7.0/Server-optional/x86_64/os/
+ gpgcheck: 0
+ - section: rhel-7-server-extras-rpms
+ name: Optional Packages for Enterprise Linux 7 - $basearch
+ baseurl: http://REPLACE_ME/rel-eng/EXTRAS-7.0-RHEL-7-20140610.0/compose/Server/x86_64/os/
+ gpgcheck: 0
+ '6.5':
+ kickstart_url: http://REPLACE_ME/released/RHEL-6/6.5/Server/x86_64/os/
+ repos:
+ - section: rhel6.5-server-rpms
+ name: Packages for RHEL 6.5 - $basearch
+ baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/$basearch/os/Server
+ gpgcheck: 0
+ - section: rhel-6.5-server-update-rpms
+ name: Update Packages for Enterprise Linux 6.5 - $basearch
+ baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/$basearch/
+ gpgcheck: 0
+ - section: rhel-6.5-server-optional-rpms
+ name: Optional Packages for Enterprise Linux 6.5 - $basearch
+ baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/$basearch/os
+ gpgcheck: 0
+ - section: rhel6.5-server-rpms-32bit
+ name: Packages for RHEL 6.5 - i386
+ baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/i386/os/Server
+ gpgcheck: 0
+ enabled: 1
+ - section: rhel-6.5-server-update-rpms-32bit
+ name: Update Packages for Enterprise Linux 6.5 - i686
+ baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/i686/
+ gpgcheck: 0
+ enabled: 1
+ - section: rhel-6.5-server-optional-rpms-32bit
+ name: Optional Packages for Enterprise Linux 6.5 - i386
+ baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/i386/os
+ gpgcheck: 0
+ enabled: 1
+ subscription:
+ username: REPLACE_ME
+ password: HWj8TE28Qi0eP2c
+ pool: 8a85f9823e3d5e43013e3ddd4e2a0977
+ config:
+ selinux: permissive
+ ntp_server: 0.pool.ntp.org
+ dns_servers:
+ - 10.4.1.1
+ - 10.4.0.2
+ reboot_delay: 1
+ initial_boot_timeout: 180
+node:
+ prefix:
+ - rdo
+ - pupt
+ - ffqiotcxz1
+ - null
+product:
+ repo_type: production
+ name: rdo
+ short_name: rdo
+ rpm:
+ CentOS: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+ Fedora: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+ RedHat: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+ short_version: ju
+ repo:
+ production:
+ CentOS:
+ 7.0.1406: http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+ '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+ '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+ Fedora:
+ '20': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-20
+ '21': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-21
+ RedHat:
+ '6.6': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+ '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+ '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+ version: juno
+ config:
+ enable_epel: y
+ short_repo: prod
+tester:
+ name: tempest
+distro_reboot_options: '--no-wall '' Reboot is triggered by Ansible'' '
+job:
+ verbosity: 1
+ archive:
+ - '{{ tempest.dir }}/etc/tempest.conf'
+ - '{{ tempest.dir }}/etc/tempest.conf.sample'
+ - '{{ tempest.dir }}/*.log'
+ - '{{ tempest.dir }}/*.xml'
+ - /root/
+ - /var/log/
+ - /etc/nova
+ - /etc/ceilometer
+ - /etc/cinder
+ - /etc/glance
+ - /etc/keystone
+ - /etc/neutron
+ - /etc/ntp
+ - /etc/puppet
+ - /etc/qpid
+ - /etc/qpidd.conf
+ - /root
+ - /etc/yum.repos.d
+ - /etc/yum.repos.d
+topology:
+ name: multinode
+ short_name: mt
+workaround_neutron_ovs_udev_loop: true
+workaround_glance_table_utf8: false
+verbosity:
+ debug: 0
+ info: 1
+ warning: 2
+ warn: 2
+ errors: 3
+provisioner:
+ username: admin
+ network:
+ type: nova
+ name: external
+ skip: skip_provision
+ foreman_url: https://10.2.84.2/api/v2/
+ password: octopus
+ type: foreman
+workaround_nova_compute_fix: false
+workarounds:
+ enabled: true
+
diff --git a/foreman/ci/reload_playbook.yml b/foreman/ci/reload_playbook.yml
index 9e3d053b5..9b3a4d4bb 100644
--- a/foreman/ci/reload_playbook.yml
+++ b/foreman/ci/reload_playbook.yml
@@ -14,3 +14,4 @@
delay=60
timeout=180
sudo: false
+ - pause: minutes=1
diff --git a/foreman/ci/resize_lvm.sh b/foreman/ci/resize_lvm.sh
new file mode 100755
index 000000000..64a9c6252
--- /dev/null
+++ b/foreman/ci/resize_lvm.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+
+#script for resizing volumes in Foreman/QuickStack VM
+#author: Tim Rozet (trozet@redhat.com)
+#
+#Uses Vagrant and VirtualBox
+#VagrantFile uses resize_partition.sh
+#
+#Pre-requisties:
+#Vagrant box disk size already resized
+#Partition already resized
+
+##VARS
+reset=`tput sgr0`
+blue=`tput setaf 4`
+red=`tput setaf 1`
+green=`tput setaf 2`
+
+##END VARS
+
+echo "${blue}Resizing physical volume${reset}"
+if ! pvresize /dev/sda2; then
+ echo "${red}Unable to resize physical volume${reset}"
+ exit 1
+else
+ new_part_size=`pvdisplay | grep -Eo "PV Size\s*[0-9]+\." | awk {'print $3'} | tr -d .`
+ echo "${blue}New physical volume size: ${new_part_size}${reset}"
+fi
+
+echo "${blue}Resizing logical volume${reset}"
+if ! lvextend /dev/mapper/centos-root -r -l +100%FREE; then
+ echo "${red}Unable to resize logical volume${reset}"
+ exit 1
+else
+ new_fs_size=`df -h | grep centos-root | awk '{print $2}'`
+ echo "${blue}Filesystem resized to: ${new_fs_size}${reset}"
+fi
diff --git a/foreman/ci/resize_partition.sh b/foreman/ci/resize_partition.sh
new file mode 100755
index 000000000..4c5581dd2
--- /dev/null
+++ b/foreman/ci/resize_partition.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+
+#script for extending disk partition in Foreman/QuickStack VM
+#author: Tim Rozet (trozet@redhat.com)
+#
+#Uses Vagrant and VirtualBox
+#VagrantFile uses resize_partition.sh
+#
+#Pre-requisties:
+#Vagrant box disk size already resized
+
+##VARS
+reset=`tput sgr0`
+blue=`tput setaf 4`
+red=`tput setaf 1`
+green=`tput setaf 2`
+
+##END VARS
+
+echo "${blue}Extending partition...${reset}"
+echo "d
+2
+n
+p
+
+
+
+p
+t
+2
+8e
+w
+"|fdisk /dev/sda; true
diff --git a/foreman/ci/vm_nodes_provision.sh b/foreman/ci/vm_nodes_provision.sh
index d0bba6452..ef2b325ce 100755
--- a/foreman/ci/vm_nodes_provision.sh
+++ b/foreman/ci/vm_nodes_provision.sh
@@ -18,6 +18,8 @@ green=`tput setaf 2`
host_name=REPLACE
dns_server=REPLACE
+host_ip=REPLACE
+domain_name=REPLACE
##END VARS
##set hostname
@@ -31,27 +33,52 @@ if ! grep 'PEERDNS=no' /etc/sysconfig/network-scripts/ifcfg-enp0s3; then
systemctl restart NetworkManager
fi
-if ! ping www.google.com -c 5; then
+##modify /etc/resolv.conf to point to foreman
+echo "${blue} Configuring resolv.conf with DNS: $dns_server ${reset}"
+cat > /etc/resolv.conf << EOF
+search $domain_name
+nameserver $dns_server
+nameserver 8.8.8.8
+
+EOF
+
+##modify /etc/hosts to add own IP for rabbitmq workaround
+host_short_name=`echo $host_name | cut -d . -f 1`
+echo "${blue} Configuring hosts with: $host_name $host_ip ${reset}"
+cat > /etc/hosts << EOF
+$host_ip $host_short_name $host_name
+127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
+::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
+EOF
+
+if ! ping www.google.com -c 5; then
echo "${red} No internet connection, check your route and DNS setup ${reset}"
exit 1
fi
-# Install EPEL repo for access to many other yum repos
-# Major version is pinned to force some consistency for Arno
-yum install -y epel-release-7*
+##install EPEL
+if ! yum repolist | grep "epel/"; then
+ if ! rpm -Uvh http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm; then
+ printf '%s\n' 'vm_provision_nodes.sh: Unable to configure EPEL repo' >&2
+ exit 1
+ fi
+else
+ printf '%s\n' 'vm_nodes_provision.sh: Skipping EPEL repo as it is already configured.'
+fi
-# Update device-mapper-libs, needed for libvirtd on compute nodes
-# Major version is pinned to force some consistency for Arno
-if ! yum -y upgrade device-mapper-libs-1*; then
+##install device-mapper-libs
+##needed for libvirtd on compute nodes
+if ! yum -y upgrade device-mapper-libs; then
echo "${red} WARN: Unable to upgrade device-mapper-libs...nova-compute may not function ${reset}"
fi
-# Install other required packages
-# Major version is pinned to force some consistency for Arno
echo "${blue} Installing Puppet ${reset}"
-if ! yum install -y puppet-3*; then
- printf '%s\n' 'vm_nodes_provision.sh: failed to install required packages' >&2
- exit 1
+##install puppet
+if ! yum list installed | grep -i puppet; then
+ if ! yum -y install puppet; then
+ printf '%s\n' 'vm_nodes_provision.sh: Unable to install puppet package' >&2
+ exit 1
+ fi
fi
echo "${blue} Configuring puppet ${reset}"
@@ -68,10 +95,10 @@ pluginsync = true
report = true
ignoreschedules = true
daemon = false
-ca_server = foreman-server.opnfv.com
+ca_server = foreman-server.$domain_name
certname = $host_name
environment = production
-server = foreman-server.opnfv.com
+server = foreman-server.$domain_name
runinterval = 600
EOF
@@ -79,13 +106,13 @@ EOF
# Setup puppet to run on system reboot
/sbin/chkconfig --level 345 puppet on
-/usr/bin/puppet agent --config /etc/puppet/puppet.conf -o --tags no_such_tag --server foreman-server.opnfv.com --no-daemonize
+/usr/bin/puppet agent --config /etc/puppet/puppet.conf -o --tags no_such_tag --server foreman-server.$domain_name --no-daemonize
sync
# Inform the build system that we are done.
echo "Informing Foreman that we are built"
-wget -q -O /dev/null --no-check-certificate http://foreman-server.opnfv.com:80/unattended/built
+wget -q -O /dev/null --no-check-certificate http://foreman-server.$domain_name:80/unattended/built
echo "Starting puppet"
systemctl start puppet
diff --git a/foreman/docs/src/installation-instructions.rst b/foreman/docs/src/installation-instructions.rst
index 2ac872d13..73b900e58 100644
--- a/foreman/docs/src/installation-instructions.rst
+++ b/foreman/docs/src/installation-instructions.rst
@@ -1,6 +1,6 @@
-=======================================================================================================
-OPNFV Installation instructions for the Arno release of OPNFV when using Foreman as a deployment tool
-=======================================================================================================
+=========================================================================================================
+OPNFV Installation Instructions for the Arno SR1 Release of OPNFV when using Foreman as a deployment tool
+=========================================================================================================
.. contents:: Table of Contents
@@ -10,15 +10,19 @@ OPNFV Installation instructions for the Arno release of OPNFV when using Foreman
Abstract
========
-This document describes how to install the Arno release of OPNFV when using Foreman/Quickstack as a deployment tool covering it's limitations, dependencies and required system resources.
+This document describes how to install the Arno SR1 release of OPNFV when using Foreman/Quickstack as
+a
+deployment tool covering it's limitations, dependencies and required system resources.
License
=======
-Arno release of OPNFV when using Foreman as a deployment tool Docs (c) by Tim Rozet (RedHat)
+Arno SR1 release of OPNFV when using Foreman as a deployment tool Docs (c) by Tim Rozet (RedHat)
-Arno release of OPNFV when using Foreman as a deployment tool Docs are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
+Arno SR1 release of OPNFV when using Foreman as a deployment tool Docs are licensed under a Creative
+Commons Attribution 4.0 International License. You should have received a copy of the license along
+with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
-Version history
+Version History
===================
+--------------------+--------------------+--------------------+--------------------+
@@ -37,25 +41,43 @@ Version history
| 2015-06-03 | 0.0.4 | Ildiko Vancsa | Minor changes |
| | | (Ericsson) | |
+--------------------+--------------------+--------------------+--------------------+
+| 2015-09-10 | 0.2.0 | Tim Rozet | Update to SR1 |
+| | | (Red Hat) | |
++--------------------+--------------------+--------------------+--------------------+
+| 2015-09-25 | 0.2.1 | Randy Levensalor | Added CLI |
+| | | (CableLabs) | verification |
++--------------------+--------------------+--------------------+--------------------+
Introduction
============
-This document describes the steps to install an OPNFV Arno reference platform, as defined by the Bootstrap/Getting-Started (BGS) Project using the Foreman/QuickStack installer.
+This document describes the steps to install an OPNFV Arno SR1 reference platform, as defined by the
+Bootstrap/Getting-Started (BGS) Project using the Foreman/QuickStack installer.
The audience is assumed to have a good background in networking and Linux administration.
Preface
=======
-Foreman/QuickStack uses the Foreman Open Source project as a server management tool, which in turn manages and executes Genesis/QuickStack. Genesis/QuickStack consists of layers of Puppet modules that are capable of provisioning the OPNFV Target System (3 controllers, n number of compute nodes).
+Foreman/QuickStack uses the Foreman Open Source project as a server management tool, which in turn
+manages and executes Genesis/QuickStack. Genesis/QuickStack consists of layers of Puppet modules that
+are capable of provisioning the OPNFV Target System (3 controllers, n number of compute nodes).
-The Genesis repo contains the necessary tools to get install and deploy an OPNFV target system using Foreman/QuickStack. These tools consist of the Foreman/QuickStack bootable ISO (``arno.2015.1.0.foreman.iso``), and the automatic deployment script (``deploy.sh``).
+The Genesis repo contains the necessary tools to get install and deploy an OPNFV target system using
+Foreman/QuickStack. These tools consist of the Foreman/QuickStack bootable ISO
+(``arno.2015.2.0.foreman.iso``), and the automatic deployment script (``deploy.sh``).
-An OPNFV install requires a "Jumphost" in order to operate. The bootable ISO will allow you to install a customized CentOS 7 release to the Jumphost, which then gives you the required packages needed to run ``deploy.sh``. If you already have a Jumphost with CentOS 7 installed, you may choose to ignore the ISO step and instead move directly to running ``deploy.sh``. In this case, ``deploy.sh`` will install the necessary packages for you in order to execute.
+An OPNFV install requires a "Jumphost" in order to operate. The bootable ISO will allow you to
+install
+a customized CentOS 7 release to the Jumphost, which then gives you the required packages needed to
+run ``deploy.sh``. If you already have a Jumphost with CentOS 7 installed, you may choose to ignore
+the ISO step and instead move directly to cloning the git repository and running ``deploy.sh``. In
+this case, ``deploy.sh`` will install the necessary packages for you in order to execute.
-``deploy.sh`` installs Foreman/QuickStack VM server using Vagrant with VirtualBox as its provider. This VM is then used to provision the OPNFV target system (3 controllers, n compute nodes). These nodes can be either virtual or bare metal. This guide contains instructions for installing both.
+``deploy.sh`` installs Foreman/QuickStack VM server using Vagrant with VirtualBox as its provider.
+This VM is then used to provision the OPNFV target system (3 controllers, n compute nodes). These
+nodes can be either virtual or bare metal. This guide contains instructions for installing both.
Setup Requirements
==================
@@ -71,26 +93,32 @@ The Jumphost requirements are outlined below:
3. libvirt or other hypervisors disabled (no kernel modules loaded).
-4. 3-4 NICs, untagged (no 802.1Q tagging), with IP addresses.
+4. 3-4 NICs for bare metal deployment/only 1 NIC required for virtual deployment, untagged
+ (no 802.1Q tagging), with IP addresses.
5. Internet access for downloading packages, with a default gateway configured.
-6. 4 GB of RAM for a bare metal deployment, 24 GB of RAM for a VM deployment.
+6. 4 GB of RAM for a bare metal deployment, 18 GB (HA) or 8 GB (non-HA) of RAM for a VM
+ deployment.
Network Requirements
--------------------
Network requirements include:
-1. No DHCP or TFTP server running on networks used by OPNFV.
+1. No DHCP or TFTP server running on networks used by OPNFV (bare metal deployment only).
-2. 3-4 separate VLANs (untagged) with connectivity between Jumphost and nodes (bare metal deployment only). These make up the admin, private, public and optional storage networks.
+2. 1, 3, or 4 separate VLANs (untagged) with connectivity between Jumphost and nodes (bare metal
+ deployment only). These make up the admin, private, public and optional storage networks. If
+ only 1 VLAN network used for baremetal, then all of the previously listed logical networks will
+ be consolidated to that single network.
-3. Lights out OOB network access from Jumphost with IPMI node enabled (bare metal deployment only).
+3. Lights out OOB network access from Jumphost with IPMI node enabled (bare metal deployment
+ only).
4. Admin or public network has Internet access, meaning a gateway and DNS availability.
-*Note: Storage network will be consolidated to the private network if only 3 networks are used.*
+**Note: Storage network will be consolidated to the private network if only 3 networks are used.**
Bare Metal Node Requirements
----------------------------
@@ -116,46 +144,85 @@ In order to execute a deployment, one must gather the following information:
4. MAC address of private interfaces on 3 nodes that will be controllers.
+**Note: For single NIC/network barmetal deployment, the MAC address of the admin and private
+interface will be the same.**
Installation High-Level Overview - Bare Metal Deployment
========================================================
-The setup presumes that you have 6 bare metal servers and have already setup connectivity on at least 3 interfaces for all servers via a TOR switch or other network implementation.
+The setup presumes that you have 6 bare metal servers and have already setup connectivity on at least
+1 or 3 interfaces for all servers via a TOR switch or other network implementation.
-The physical TOR switches are **not** automatically configured from the OPNFV reference platform. All the networks involved in the OPNFV infrastructure as well as the provider networks and the private tenant VLANs needs to be manually configured.
+The physical TOR switches are **not** automatically configured from the OPNFV reference platform. All
+the networks involved in the OPNFV infrastructure as well as the provider networks and the private
+tenant VLANs needs to be manually configured.
-The Jumphost can be installed using the bootable ISO. The Jumphost should then be configured with an IP gateway on its admin or public interface and configured with a working DNS server. The Jumphost should also have routable access to the lights out network.
+The Jumphost can be installed using the bootable ISO. The Jumphost should then be configured with an
+IP gateway on its admin or public interface and configured with a working DNS server. The Jumphost
+should also have routable access to the lights out network.
-``deploy.sh`` is then executed in order to install the Foreman/QuickStack Vagrant VM. ``deploy.sh`` uses a configuration file with YAML format in order to know how to install and provision the OPNFV target system. The information gathered under section `Execution Requirements (Bare Metal Only)`_ is put into this configuration file.
+``deploy.sh`` is then executed in order to install the Foreman/QuickStack Vagrant VM. ``deploy.sh``
+uses a configuration file with YAML format in order to know how to install and provision the OPNFV
+target system. The information gathered under section `Execution Requirements (Bare Metal Only)`_
+is put into this configuration file.
-``deploy.sh`` brings up a CentOS 7 Vagrant VM, provided by VirtualBox. The VM then executes an Ansible project called Khaleesi in order to install Foreman and QuickStack. Once the Foreman/QuickStack VM is up, Foreman will be configured with the nodes' information. This includes MAC address, IPMI, OpenStack type (controller, compute, OpenDaylight controller) and other information. At this point Khaleesi makes a REST API call to Foreman to instruct it to provision the hardware.
+``deploy.sh`` brings up a CentOS 7 Vagrant VM, provided by VirtualBox. The VM then executes an
+Ansible project called Khaleesi in order to install Foreman and QuickStack. Once the
+Foreman/QuickStack VM is up, Foreman will be configured with the nodes' information. This includes
+MAC address, IPMI, OpenStack type (controller, compute, OpenDaylight controller) and other
+information.
+At this point Khaleesi makes a REST API call to Foreman to instruct it to provision the hardware.
-Foreman will then reboot the nodes via IPMI. The nodes should already be set to PXE boot first off the admin interface. Foreman will then allow the nodes to PXE and install CentOS 7 as well as Puppet. Foreman/QuickStack VM server runs a Puppet Master and the nodes query this master to get their appropriate OPNFV configuration. The nodes will then reboot one more time and once back up, will DHCP on their private, public and storage NICs to gain IP addresses. The nodes will now check in via Puppet and start installing OPNFV.
+Foreman will then reboot the nodes via IPMI. The nodes should already be set to PXE boot first off
+the
+admin interface. Foreman will then allow the nodes to PXE and install CentOS 7 as well as Puppet.
+Foreman/QuickStack VM server runs a Puppet Master and the nodes query this master to get their
+appropriate OPNFV configuration. The nodes will then reboot one more time and once back up, will DHCP
+on their private, public and storage NICs to gain IP addresses. The nodes will now check in via
+Puppet and start installing OPNFV.
-Khaleesi will wait until these nodes are fully provisioned and then return a success or failure based on the outcome of the Puppet application.
+Khaleesi will wait until these nodes are fully provisioned and then return a success or failure based
+on the outcome of the Puppet application.
Installation High-Level Overview - VM Deployment
================================================
-The VM nodes deployment operates almost the same way as the bare metal deployment with a few differences. ``deploy.sh`` still installs Foreman/QuickStack VM the exact same way, however the part of the Khaleesi Ansible playbook which IPMI reboots/PXE boots the servers is ignored. Instead, ``deploy.sh`` brings up N number more Vagrant VMs (where N is 3 control nodes + n compute). These VMs already come up with CentOS 7 so instead of re-provisioning the entire VM, ``deploy.sh`` initiates a small Bash script that will signal to Foreman that those nodes are built and install/configure Puppet on them.
+The VM nodes deployment operates almost the same way as the bare metal deployment with a few
+differences. ``deploy.sh`` still installs Foreman/QuickStack VM the exact same way, however the part
+of the Khaleesi Ansible playbook which IPMI reboots/PXE boots the servers is ignored. Instead,
+``deploy.sh`` brings up N number more Vagrant VMs (where N is 3 control nodes + n compute). These VMs
+already come up with CentOS 7 so instead of re-provisioning the entire VM, ``deploy.sh`` initiates a
+small Bash script that will signal to Foreman that those nodes are built and install/configure Puppet
+on them.
To Foreman these nodes look like they have just built and register the same way as bare metal nodes.
+VM deployment will automatically use the default gateway interface on the host for all of the VMs
+internet access via bridging the VMs NICs (public network). The other networks - such as admin,
+private, storage will all be created as internal VirtualBox networks. Therefore only a single
+interface on the host is needed for VM deployment.
+
Installation Guide - Bare Metal Deployment
==========================================
-This section goes step-by-step on how to correctly install and provision the OPNFV target system to bare metal nodes.
+This section goes step-by-step on how to correctly install and provision the OPNFV target system to
+bare metal nodes.
Install Bare Metal Jumphost
---------------------------
-1. If your Jumphost does not have CentOS 7 already on it, or you would like to do a fresh install, then download the Foreman/QuickStack bootable ISO <http://artifacts.opnfv.org/arno.2015.1.0/foreman/arno.2015.1.0.foreman.iso> here.
+1. If your Jumphost does not have CentOS 7 already on it, or you would like to do a fresh install,
+ then download the Foreman/QuickStack bootable ISO
+ `here <http://artifacts.opnfv.org/arno.2015.2.0/foreman/arno.2015.2.0.foreman.iso>`_. If you
+ already have a CentOS 7 install that you would like to use then go to step 3.
2. Boot the ISO off of a USB or other installation media and walk through installing OPNFV CentOS 7.
-3. After OS is installed login to your Jumphost as root.
+3. After OS is installed login to your Jumphost as root. If /root/genesis does not exist then
+ ``git clone -b arno.2015.2.0 https://gerrit.opnfv.org/gerrit/genesis /root/genesis``
-4. Configure IP addresses on 3-4 interfaces that you have selected as your admin, private, public, and storage (optional) networks.
+4. Configure IP addresses on 3-4 interfaces that you have selected as your admin, private, public,
+ and storage (optional) networks.
5. Configure the IP gateway to the Internet either, preferably on the public interface.
@@ -174,15 +241,26 @@ Install Bare Metal Jumphost
Creating an Inventory File
--------------------------
-You now need to take the MAC address/IPMI info gathered in section `Execution Requirements (Bare Metal Only)`_ and create the YAML inventory (also known as configuration) file for ``deploy.sh``.
+You now need to take the MAC address/IPMI info gathered in section
+`Execution Requirements (Bare Metal Only)`_ and create the YAML inventory (also known as
+configuration)
+file for ``deploy.sh``.
-1. Copy the ``opnfv_ksgen_settings.yml`` file from ``/root/bgs_vagrant/`` to another directory and rename it to be what you want EX: ``/root/my_ksgen_settings.yml``
+1. Copy the ``opnfv_ksgen_settings.yml`` file (for HA) or ``opnfv_ksgen_settings_no_HA.yml`` from
+ ``/root/genesis/foreman/ci/`` to another directory and rename it to be what you want Example:
+ ``/root/my_ksgen_settings.yml``
-2. Edit the file in your favorite editor. There is a lot of information in this file, but you really only need to be concerned with the "nodes:" dictionary.
+2. Edit the file in your favorite editor. There is a lot of information in this file, but you
+ really only need to be concerned with the "nodes:" dictionary.
-3. The nodes dictionary contains each bare metal host you want to deploy. You can have 1 or more compute nodes and must have 3 controller nodes (these are already defined for you). It is optional at this point to add more compute nodes into the dictionary. You must use a different name, hostname, short_name and dictionary keyname for each node.
+3. The nodes dictionary contains each bare metal host you want to deploy. You can have 1 or more
+ compute nodes and must have 3 controller nodes (these are already defined for you) if ha_flag is
+ set to true. If ha_flag is set to false, please only define 1 controller node. It is optional at
+ this point to add more compute nodes into the dictionary. You must use a different name, hostname
+ , short_name and dictionary keyname for each node.
-4. Once you have decided on your node definitions you now need to modify the MAC address/IPMI info dependent on your hardware. Edit the following values for each node:
+4. Once you have decided on your node definitions you now need to modify the MAC address/IPMI info
+ dependent on your hardware. Edit the following values for each node:
- ``mac_address``: change to MAC address of that node's admin NIC (defaults to 1st NIC)
- ``bmc_ip``: change to IP Address of BMC (out-of-band)/IPMI IP
@@ -194,56 +272,79 @@ You now need to take the MAC address/IPMI info gathered in section `Execution Re
- ``private_mac`` - change to MAC address of node's private NIC (default to 2nd NIC)
-6. Save your changes.
+6. You may also define a unique domain name by editing the ``domain_name`` global parameter.
+
+7. Save your changes.
Running ``deploy.sh``
---------------------
-You are now ready to deploy OPNFV! ``deploy.sh`` will use your ``/tmp/`` directory to store its Vagrant VMs. Your Foreman/QuickStack Vagrant VM will be running out of ``/tmp/bgs_vagrant``.
+You are now ready to deploy OPNFV! ``deploy.sh`` will use your ``/var/opt/opnfv/`` directory to store
+its Vagrant VMs. Your Foreman/QuickStack Vagrant VM will be running out of
+``/var/opt/opnfv/foreman_vm/``.
-It is also recommended that you power off your nodes before running ``deploy.sh`` If there are DHCP servers or other network services that are on those nodes it may conflict with the installation.
+It is also recommended that you power off your nodes before running ``deploy.sh`` If there are DHCP
+servers or other network services that are on those nodes it may conflict with the installation.
Follow the steps below to execute:
-1. ``cd /root/bgs_vagrant``
+1. ``cd /root/genesis/foreman/ci/``
-2. ``./deploy.sh -base_config </root/my_ksgen_settings.yml>``
+2. ``./deploy.sh -base_config /root/my_ksgen_settings.yml``
-3. It will take about 20-25 minutes to install Foreman/QuickStack VM. If something goes wrong during this part of the process, it is most likely a problem with the setup of your Jumphost. You will also notice different outputs in your shell. When you see messages that say "TASK:" or "PLAY:" this is Khalessi running and installing Foreman/QuickStack inside of your VM or deploying your nodes. Look for "PLAY [Deploy Nodes]" as a sign that Foreman/QuickStack is finished installing and now your nodes are being rebuilt.
+**Note: This is for default detection of at least 3 VLAN/interfaces configured on your jumphost
+with defaulting interface assignment by the NIC order (1st Admin, 2nd Private, 3rd Public). If you
+wish to use a single interface for baremetal install, see help output for "-single_baremetal_nic".
+If you would like to specify the NIC mapping to logical network, see help output for "-admin_nic",
+"-private_nic", "-public_nic", "-storage_nic".**
-4. Your nodes will take 40-60 minutes to re-install CentOS 7 and install/configure OPNFV. When complete you will see "Finished: SUCCESS"
+3. It will take about 20-25 minutes to install Foreman/QuickStack VM. If something goes wrong during
+ this part of the process, it is most likely a problem with the setup of your Jumphost. You will
+ also notice different outputs in your shell. When you see messages that say "TASK:" or "PLAY:"
+ this is Khalessi running and installing Foreman/QuickStack inside of your VM or deploying your
+ nodes. Look for "PLAY [Deploy Nodes]" as a sign that Foreman/QuickStack is finished installing
+ and now your nodes are being rebuilt.
-.. _setup_verify:
+4. Your nodes will take 40-60 minutes to re-install CentOS 7 and install/configure OPNFV. When
+ complete you will see "Finished: SUCCESS"
Verifying the Setup
-------------------
-Now that the installer has finished it is a good idea to check and make sure things are working correctly. To access your Foreman/QuickStack VM:
-
-1. ``cd /tmp/bgs_vagrant``
+Now that the installer has finished it is a good idea to check and make sure things are working
+correctly. To access your Foreman/QuickStack VM:
-2. ``vagrant ssh`` (password is "vagrant")
+1. As root: ``cd /var/opt/opnfv/foreman_vm/``
-3. You are now in the VM and can check the status of Foreman service, etc. For example: ``systemctl status foreman``
+2. ``vagrant ssh`` (no password is required)
-4. Type "exit" and leave the Vagrant VM. Now execute: ``cat /tmp/bgs_vagrant/opnfv_ksgen_settings.yml | grep foreman_url``
+3. You are now in the VM and can check the status of Foreman service, etc. For example:
+ ``systemctl status foreman``
-5. This is your Foreman URL on your public interface. You can go to your web browser, ``http://<foreman_ip>``, login will be "admin"/"octopus". This way you can look around in Foreman and check that your hosts are in a good state, etc.
+4. Type "exit" and leave the Vagrant VM. Now execute:
+ ``cat /var/opt/opnfv/foreman_vm/opnfv_ksgen_settings.yml | grep foreman_url``
-6. In Foreman GUI, you can now go to Infrastructure -> Global Parameters. This is a list of all the variables being handed to Puppet for configuring OPNFV. Look for ``horizon_public_vip``. This is your IP address to Horizon GUI.
+5. This is your Foreman URL on your public interface. You can go to your web browser,
+ ``http://<foreman_ip>``, login will be "admin"/"octopus". This way you can look around in
+ Foreman and check that your hosts are in a good state, etc.
- **Note: You can find out more about how to ues Foreman by going to http://www.theforeman.org/ or by watching a walkthrough video here: https://bluejeans.com/s/89gb/**
+6. In Foreman GUI, you can now go to Infrastructure -> Global Parameters. This is a list of all the
+ variables being handed to Puppet for configuring OPNFV. Look for ``horizon_public_vip``. This is
+ your IP address to Horizon GUI.
-7. Now go to your web browser and insert the Horizon public VIP. The login will be "admin"/"octopus".
+**Note: You can find out more about how to use Foreman by going to http://www.theforeman.org/ or
+by watching a walkthrough video here: https://bluejeans.com/s/89gb/**
-8. You are now able to follow the `OpenStack Verification <openstack_verify_>`_ section.
+7. Now go to your web browser and insert the Horizon public VIP. The login will be
+ "admin"/"octopus".
-.. _openstack_verify:
+8. You are now able to follow the `OpenStack Verification`_ section.
OpenStack Verification
----------------------
-Now that you have Horizon access, let's make sure OpenStack the OPNFV target system are working correctly:
+Now that you have Horizon access, let's make sure OpenStack the OPNFV target system are working
+correctly:
1. In Horizon, click Project -> Compute -> Volumes, Create Volume
@@ -251,7 +352,8 @@ Now that you have Horizon access, let's make sure OpenStack the OPNFV target sys
3. Now in the left pane, click Compute -> Images, click Create Image
-4. Insert a name "cirros", Insert an Image Location ``http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img``
+4. Insert a name "cirros", Insert an Image Location
+ ``http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img``
5. Select format "QCOW2", select Public, then hit Create Image
@@ -263,75 +365,244 @@ Now that you have Horizon access, let's make sure OpenStack the OPNFV target sys
9. Enter ``10.0.0.5,10.0.0.9`` under Allocation Pools, then hit Create
-10. Now go to Project -> Compute -> Instances, click Launch Instance
+**Note: You may also want to expand this pool by giving a larger range,
+or you can simply hit Create with entering nothing and the entire subnet
+range will be used for DHCP**
+
+10. Go to Project -> Network -> Routers
+
+11. Click "provider_router". Then select "Add Interface"
+
+12. From the pop up menu, select test_subnet in the "Subnet" field. Press "Add interface"
+
+13. Verify your Network Topology looks correct in Project -> Network -> Network Topology
-11. Enter Instance Name "cirros1", select Instance Boot Source "Boot from image", and then select Image Name "cirros"
+14. Now go to Project -> Compute -> Instances, click Launch Instance
-12. Click Launch, status should show "Spawning" while it is being built
+15. Enter Instance Name "cirros1", select Instance Boot Source "Boot from image", and then select
+ Image Name "cirros"
-13. You can now repeat steps 11 and 12, but create a "cirros2" named instance
+16. Click Launch, status should show "Spawning" while it is being built
-14. Once both instances are up you can see their IP addresses on the Instances page. Click the Instance Name of cirros1.
+17. You can now repeat steps 15 and 16, but create a "cirros2" named instance
-15. Now click the "Console" tab and login as "cirros"/"cubswin" :)
+18. Once both instances are up you can see their IP addresses on the Instances page. Click the
+ Instance Name of cirros1.
-16. Verify you can ping the IP address of cirros2
+19. Now click the "Console" tab and login as "cirros"/"cubswin:)"
+
+20. Verify you can ping the IP address of cirros2
+
+21. Continue to the next steps to provide external network access to cirros1.
+
+22. Go to Project -> Compute -> Instances. From the drop down menu under "Actions" select
+ "Associate Floating IP"
+
+23. Press the "+" symbol next under "IP Address". Select "Allocate IP" on the new pop up.
+
+24. You should now see an external IP address filled into the "IP Address" field. Click
+ "Associate".
+
+25. Now from your external network you should be able to ping/ssh to the floating IP address.
Congratulations you have successfully installed OPNFV!
+OpenStack CLI Verification
+--------------------------
+
+This section is for users who do not have web access or prefer to use command line rather
+than a web browser to validate the OpenStack installation. Do not run this if you have
+already completed the OpenStack verification, since this uses the same names.
+
+1. Install the OpenStack CLI tools or log-in to one of the compute or control servers.
+
+2. Find the IP of keystone public VIP. As root:
+
+ cat /var/opt/opnfv/foreman_vm/opnfv_ksgen_settings.yml | \
+ grep keystone_public_vip
+
+3. Set the environment variables. Substitute the keystone public VIP for <VIP> below.
+
+ | export OS_AUTH_URL=http://<VIP>:5000/v2.0
+ | export OS_TENANT_NAME="admin"
+ | export OS_USERNAME="admin"
+ | export OS_PASSWORD="octopus"
+
+4. Load the CirrOS image into glance.
+
+ glance image-create --copy-from \
+ http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img \
+ --disk-format qcow2 --container-format bare --name 'CirrOS'
+
+5. Verify the image is downloaded. The status will be "active" when the download completes.
+
+ ``glance image-show CirrOS``
+
+6. Create a private tenant network.
+
+ ``neutron net-create test_network``
+
+7. Verify the network has been created by running the command below.
+
+ ``neutron net-show test_network``
+
+8. Create a subnet for the tenant network.
+
+ ``neutron subnet-create test_network --name test_subnet --dns-nameserver 8.8.8.8 10.0.0.0/24``
+
+9. Verify the subnet was created.
+
+ ``neutron subnet-show test_subnet``
+
+10. Add an interface from the test_subnet to the provider router.
+
+ ``neutron router-interface-add provider_router test_subnet``
+
+11. Verify the interface was added.
+
+ ``neutron router-port-list``
+
+12. Deploy a VM.
+
+ ``nova boot --flavor 1 --image CirrOS cirros1``
+
+13. Wait for the VM to complete booting. This can be completed by viewing the console log until a
+ login prompt appears.
+
+ ``nova console-log cirros1``
+
+14. Get the local ip from the VM.
+
+ ``nova show cirros1 | grep test_network``
+
+15. Get the port ID for the ip from the previous command. Replace <IP> with the IP from the previous
+ command. The port id is the first series of numbers and letters.
+
+ ``neutron port-list | grep 10.0.0.2 | awk ' { print $2 } '``
+
+16. Assign a floating ip to the VM. Substitue the port-id from the previous command for <PORT_ID>
+
+ ``neutron floatingip-create --port-id <PORT_ID> provider_network``
+
+17. Log into the vm. Substitute FLOATING_IP for the floating_ip_address displayed in the output in
+ the above command.
+
+ ``ssh cirros@<FLOATING_IP>``
+
+18. Logout and create a second VM.
+
+ ``nova boot --flavor 1 --image CirrOS cirros2``
+
+19. Get the ip for cirros2.
+
+ ``nova show cirros2 | grep test_network``
+
+20. Redo step 17 to log back into cirros1 and ping cirros2. Replace <CIRROS2> with the ip from the
+ previous step.
+
+ ``ping <CIRROS2>``
+
Installation Guide - VM Deployment
==================================
-This section goes step-by-step on how to correctly install and provision the OPNFV target system to VM nodes.
+This section goes step-by-step on how to correctly install and provision the OPNFV target system
+to VM nodes.
Install Jumphost
----------------
-Follow the instructions in the `Install Bare Metal Jumphost`_ section.
+Follow the instructions in the `Install Bare Metal Jumphost`_ section, except that you only need 1
+network interface on the host system with internet connectivity.
+
+Creating an Inventory File
+--------------------------
+
+It is optional to create an inventory file for virtual deployments. Since the nodes are virtual you
+are welcome to use the provided opnfv_ksgen_settings files. You may also elect to customize your
+deployment. Those options include modifying domain name of your deployment as well as allocating
+specific resources per node.
+
+Modifying VM resources is necessary for bigger virtual deployments in order to run more nova
+instances. To modify these resources you can edit each of the follow node paramters in the
+Inventory file:
+
+1. memory - set in KiB
+
+2. cpus - number of vcpus to allocate to this VM
+
+3. disk - size in GB (cannot be less than 40)
Running ``deploy.sh``
----------------------------
+---------------------
-You are now ready to deploy OPNFV! ``deploy.sh`` will use your ``/tmp/`` directory to store its Vagrant VMs. Your Foreman/QuickStack Vagrant VM will run out of ``/tmp/bgs_vagrant``. Your compute and subsequent controller nodes will run in:
+You are now ready to deploy OPNFV! ``deploy.sh`` will use your ``/var/opt/opnfv/`` directory to store
+its Vagrant VMs. Your Foreman/QuickStack Vagrant VM will run out of ``/var/opt/opnfv/foreman_vm/``.
+Your compute and subsequent controller nodes will run in:
-- ``/tmp/compute``
-- ``/tmp/controller1``
-- ``/tmp/controller2``
-- ``/tmp/controller3``
+- ``/var/opt/opnfv/compute``
+- ``/var/opt/opnfv/controller1``
+- ``/var/opt/opnfv/controller2``
+- ``/var/opt/opnfv/controller3``
-Each VM will be brought up and bridged to your Jumphost NICs. ``deploy.sh`` will first bring up your Foreman/QuickStack Vagrant VM and afterwards it will bring up each of the nodes listed above, in order.
+Each VM will be brought up and bridged to your Jumphost NIC for the public network. ``deploy.sh``
+will
+first bring up your Foreman/QuickStack Vagrant VM and afterwards it will bring up each of the nodes
+listed above, in order of controllers first.
Follow the steps below to execute:
-1. ``cd /root/bgs_vagrant``
+1. ``cd /root/genesis/foreman/ci/``
+
+2. ``./deploy.sh -virtual -static_ip_range <your_range>``, Where <your_range> is a range of at least
+ 20 IP addresses (non-HA you need only 5) that are useable on your public subnet.
+ ``Ex: -static_ip_range 192.168.1.101,192.168.1.120``
-2. ``./deploy.sh -virtual``
+**Note: You may also wish to use other options like manually selecting the NIC to be used on your
+host,
+etc. Please use "deploy.sh -h" to see a full list of options available.**
-3. It will take about 20-25 minutes to install Foreman/QuickStack VM. If something goes wrong during this part of the process, it is most likely a problem with the setup of your Jumphost. You will also notice different outputs in your shell. When you see messages that say "TASK:" or "PLAY:" this is Khalessi running and installing Foreman/QuickStack inside of your VM or deploying your nodes. When you see "Foreman is up!", that means deploy will now move on to bringing up your other nodes.
+3. It will take about 20-25 minutes to install Foreman/QuickStack VM. If something goes wrong during
+ this part of the process, it is most likely a problem with the setup of your Jumphost. You will
+ also notice different outputs in your shell. When you see messages that say "TASK:" or "PLAY:"
+ this is Khalessi running and installing Foreman/QuickStack inside of your VM or deploying your
+ nodes. When you see "Foreman is up!", that means deploy will now move on to bringing up your
+ other nodes.
-4. ``deploy.sh`` will now bring up your other nodes, look for logging messages like "Starting Vagrant Node <node name>", "<node name> VM is up!" These are indicators of how far along in the process you are. ``deploy.sh`` will start each Vagrant VM, then run provisioning scripts to inform Foreman they are built and initiate Puppet.
+4. ``deploy.sh`` will now bring up your other nodes, look for logging messages like "Starting Vagrant
+ Node <node name>", "<node name> VM is up!" These are indicators of how far along in the process
+ you are. ``deploy.sh`` will start each Vagrant VM, then run provisioning scripts to inform
+ Foreman they are built and initiate Puppet.
-5. The speed at which nodes are provisioned is totally dependent on your Jumphost server specs. When complete you will see "All VMs are UP!"
+5. The speed at which nodes are provisioned is totally dependent on your Jumphost server specs. When
+ complete you will see "All VMs are UP!"
+
+6. The deploy will then print out the URL for your foreman server as well as the URL to access
+ horizon.
Verifying the Setup - VMs
-------------------------
-Follow the instructions in the `Verifying the Setup <setup_verify_>`_ section.
+Follow the instructions in the `Verifying the Setup`_ section.
-Also, for VM deployment you are able to easily access your nodes by going to ``/tmp/<node name>`` and then ``vagrant ssh`` (password is "vagrant"). You can use this to go to a controller and check OpenStack services, OpenDaylight, etc.
+Also, for VM deployment you are able to easily access your nodes by going to
+``/var/opt/opnfv/<node name>`` and then ``vagrant ssh`` (password is "vagrant"). You can use this to
+go to a controller and check OpenStack services, OpenDaylight, etc.
OpenStack Verification - VMs
----------------------------
-Follow the steps in `OpenStack Verification <openstack_verify_>`_ section.
+Follow the steps in `OpenStack Verification`_ section.
Frequently Asked Questions
==========================
+Please see the `Arno FAQ <https://wiki.opnfv.org/releases/arno/faq>`_.
+
License
=======
-All Foreman/QuickStack and "common" entities are protected by the `Apache 2.0 License <http://www.apache.org/licenses/>`_.
+All Foreman/QuickStack and "common" entities are protected by the
+`Apache 2.0 License <http://www.apache.org/licenses/>`_.
References
==========
@@ -353,7 +624,15 @@ OpenStack
OpenDaylight
------------
-`OpenDaylight artifacts <http://www.opendaylight.org/software/downloads>`_
+Upstream OpenDaylight provides `a number of packaging and deployment options
+<https://wiki.opendaylight.org/view/Deployment>`_ meant for consumption by downstream projects like
+OPNFV.
+
+Currently, OPNFV Foreman uses `OpenDaylight's Puppet module
+<https://github.com/dfarrell07/puppet-opendaylight>`_, which in turn depends on `OpenDaylight's RPM
+<https://github.com/opendaylight/integration-packaging/tree/master/rpm>`_ hosted on the `CentOS
+Community
+Build System <http://cbs.centos.org/repos/nfv7-opendaylight-2-candidate/x86_64/os/Packages/>`_.
Foreman
-------
@@ -361,11 +640,10 @@ Foreman
`Foreman documentation <http://theforeman.org/documentation.html>`_
:Authors: Tim Rozet (trozet@redhat.com)
-:Version: 0.0.3
+:Version: 0.2.0
**Documentation tracking**
Revision: _sha1_
Build date: _date_
-
diff --git a/foreman/docs/src/release-notes.rst b/foreman/docs/src/release-notes.rst
index f9fcb37e5..613f56181 100644
--- a/foreman/docs/src/release-notes.rst
+++ b/foreman/docs/src/release-notes.rst
@@ -1,6 +1,6 @@
-===========================================================================================
-OPNFV Release Note for the Arno release of OPNFV when using Foreman as a deployment tool
-===========================================================================================
+=============================================================================================
+OPNFV Release Notes for the Arno SR1 release of OPNFV when using Foreman as a deployment tool
+=============================================================================================
.. contents:: Table of Contents
@@ -10,12 +10,14 @@ OPNFV Release Note for the Arno release of OPNFV when using Foreman as a deploy
Abstract
========
-This document provides the release notes for Arno release with the Foreman/QuickStack deployment toolchain.
+This document provides the release notes for Arno SR1 release with the Foreman/QuickStack deployment
+toolchain.
License
=======
-All Foreman/QuickStack and "common" entities are protected by the Apache License ( http://www.apache.org/licenses/ )
+All Foreman/QuickStack and "common" entities are protected by the Apache License
+( http://www.apache.org/licenses/ )
Version history
@@ -34,18 +36,31 @@ Version history
| 2015-06-03 | 0.1.2 | Tim Rozet | Minor Edits |
| | | | |
+--------------------+--------------------+--------------------+--------------------+
+| 2015-09-10 | 0.2.0 | Tim Rozet | Updated for SR1 |
+| | | | |
++--------------------+--------------------+--------------------+--------------------+
+| 2015-09-25 | 0.2.1 | Randy Levensalor | Added Workaround |
+| | | | for DHCP issue |
++--------------------+--------------------+--------------------+--------------------+
+
Important notes
===============
-This is the initial OPNFV Arno release that implements the deploy stage of the OPNFV CI pipeline.
+This is the OPNFV Arno SR1 release that implements the deploy stage of the OPNFV CI pipeline.
-Carefully follow the installation-instructions which guide a user on how to deploy OPNFV using Foreman/QuickStack installer.
+Carefully follow the installation-instructions which guide a user on how to deploy OPNFV using
+Foreman/QuickStack installer.
Summary
=======
-Arno release with the Foreman/QuickStack deployment toolchain will establish an OPNFV target system on a Pharos compliant lab infrastructure. The current definition of an OPNFV target system is and OpenStack Juno version combined with OpenDaylight version: Helium. The system is deployed with OpenStack High Availability (HA) for most OpenStack services. OpenDaylight is deployed in non-HA form as HA is not availble for Arno release. Ceph storage is used as Cinder backend, and is the only supported storage for Arno. Ceph is setup as 3 OSDs and 3 Monitors, one OSD+Mon per Controller node.
+Arno release with the Foreman/QuickStack deployment toolchain will establish an OPNFV target system on
+a Pharos compliant lab infrastructure. The current definition of an OPNFV target system is and
+OpenStack Juno version combined with OpenDaylight version: Helium. The system is deployed with
+OpenStack High Availability (HA) for most OpenStack services. OpenDaylight is deployed in non-HA form
+as HA is not availble for Arno SR1 release. Ceph storage is used as Cinder backend, and is the only
+supported storage for Arno. Ceph is setup as 3 OSDs and 3 Monitors, one OSD+Mon per Controller node.
- Documentation is built by Jenkins
- .iso image is built by Jenkins
@@ -58,16 +73,16 @@ Release Data
| **Project** | genesis |
| | |
+--------------------------------------+--------------------------------------+
-| **Repo/tag** | genesis/arno.2015.1.0 |
+| **Repo/tag** | genesis/arno.2015.2.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | arno.2015.1.0 |
+| **Release designation** | arno.2015.2.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | 2015-06-04 |
+| **Release date** | 2015-09-23 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | OPNFV Arno release |
+| **Purpose of the delivery** | OPNFV Arno SR1 release |
| | |
+--------------------------------------+--------------------------------------+
@@ -76,7 +91,8 @@ Version change
Module version changes
~~~~~~~~~~~~~~~~~~~~~~
-This is the first tracked version of the Arno release with the Foreman/QuickStack deployment toolchain. It is based on following upstream versions:
+This is the Service Release 1 version of the Arno release with the Foreman/QuickStack deployment
+toolchain. It is based on following upstream versions:
- OpenStack (Juno release)
@@ -87,10 +103,11 @@ This is the first tracked version of the Arno release with the Foreman/QuickStac
Document version changes
~~~~~~~~~~~~~~~~~~~~~~~~
-This is the first tracked version of Arno release with the Foreman/QuickStack deployment toolchain. The following documentation is provided with this release:
+This is the SR1 version of Arno release with the Foreman/QuickStack deployment toolchain. The following
+documentation is provided with this release:
-- OPNFV Installation instructions for the Arno release with the Foreman/QuickStack deployment toolchain - ver. 1.0.0
-- OPNFV Release Notes for the Arno release with the Foreman/QuickStack deployment toolchain - ver. 1.0.0 (this document)
+- OPNFV Installation instructions for the Arno release with the Foreman/QuickStack deployment toolchain - ver. 0.2.0
+- OPNFV Release Notes for the Arno release with the Foreman/QuickStack deployment toolchain - ver. 0.2.0 (this document)
Feature additions
~~~~~~~~~~~~~~~~~
@@ -99,8 +116,27 @@ Feature additions
| **JIRA REFERENCE** | **SLOGAN** |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: BGS-4 | OPNFV base system install |
-| | using Foreman/Quickstack. |
+| JIRA: BGS-73 | Changes Virtual deployments to |
+| | only require 1 interface, and adds |
+| | accesbility in China |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-75 | Adds ability to specify number of |
+| | floating IPs |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-3 | clean now removes all VMs |
+| | |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-4 | Adds ability to specify NICs to |
+| | bridge to on the jumphost |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-86 | Adds ability to specify domain name |
+| | for deployment |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-1 | Adds ability to specify VM resources |
+| | such as disk size, memory, vcpus |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-33 | Adds ability to use single interface |
+| | for baremetal installs |
+--------------------------------------+--------------------------------------+
Bug corrections
@@ -112,9 +148,60 @@ Bug corrections
| **JIRA REFERENCE** | **SLOGAN** |
| | |
+--------------------------------------+--------------------------------------+
+| JIRA: BGS-65 | Fixes external network bridge and |
+| | increases neutron quota limits |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-74 | Fixes verification of vbox drivers |
+| | |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-59 | Adds ODL Deployment stack docs to |
+| | Foreman Guide |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-60 | Migrates github bgs_vagrant project |
+| | into Genesis |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-89 | Fixes public allocation IP |
| | |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-71 | Adds check to ensure subnets are the |
+| | minimum size required |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-78 | Fixes Foreman clean to not hang and |
+| | now also removes libvirt |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-7 | Adds check to make sure 3 control |
+| | nodes are set when HA is enabled |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-68 | Adds check to make sure baremetal |
+| | nodes are powered off when deploying |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-14 | Fixes Vagrant base box to be opnfv |
| | |
+--------------------------------------+--------------------------------------+
+| JIRA: APEX-8 | Fixes puppet modules to come from |
+| | the Genesis repo |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-17 | Fixes clean to kill vagrant processes|
+| | correctly |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-2 | Removes default vagrant route from |
+| | virtual nodes |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-9 | Fixes external network to be created |
+| | by the services tenant |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-10 | Disables DHCP on external neutron |
+| | network |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-19 | Adds check to ensure provided arg |
+| | static_ip_range is correct |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-12 | Fixes horizon IP URL for non-HA |
+| | deployments |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-84 | Set default route to public |
+| | gateway |
++--------------------------------------+--------------------------------------+
Deliverables
------------
@@ -122,12 +209,12 @@ Deliverables
Software deliverables
~~~~~~~~~~~~~~~~~~~~~
Foreman/QuickStack@OPNFV .iso file
-deploy.sh - Automatically deploys Target OPNFV System to Bare Metal
+deploy.sh - Automatically deploys Target OPNFV System to Bare Metal or VMs
Documentation deliverables
~~~~~~~~~~~~~~~~~~~~~~~~~~
-- OPNFV Installation instructions for the Arno release with the Foreman/QuickStack deployment toolchain - ver. 1.0.0
-- OPNFV Release Notes for the Arno release with the Foreman/QuickStack deployment toolchain - ver. 1.0.0 (this document)
+- OPNFV Installation instructions for the Arno release with the Foreman/QuickStack deployment toolchain - ver. 1.2.0
+- OPNFV Release Notes for the Arno release with the Foreman/QuickStack deployment toolchain - ver. 1.2.0 (this document)
Known Limitations, Issues and Workarounds
=========================================
@@ -153,27 +240,39 @@ Known issues
| **JIRA REFERENCE** | **SLOGAN** |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: BGS-13 | bridge br-ex is not auto configured |
-| | by puppet |
+| JIRA: APEX-13 | Keystone Config: bind host is wrong |
+| | for admin user |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-38 | Neutron fails to provide DHCP address|
+| | to instance |
+--------------------------------------+--------------------------------------+
Workarounds
-----------
**-**
+JIRA: APEX-38 - Neutron fails to provide DHCP address to instance
+
+1. Find the controller that is running the DHCP service. ssh to oscontroller[1-3] and
+ run the command below until the command returns a namespace that start with with "qdhcp".
+
+ ``ip netns | grep qdhcp``
+
+2. Restart the neturon server and the neutron DHCP service.
+
+ ``systemctl restart neutron-server``
+
+ ``systemctl restart neutron-dhcp-agent``
+
+3. Restart the interface on the VM or restart the VM.
Test Result
===========
-The Arno release with the Foreman/QuickStack deployment toolchain has undergone QA test runs with the following results:
-
-+--------------------------------------+--------------------------------------+
-| **TEST-SUITE** | **Results:** |
-| | |
-+--------------------------------------+--------------------------------------+
-| **-** | **-** |
-+--------------------------------------+--------------------------------------+
+The Arno release with the Foreman/QuickStack deployment toolchain has undergone QA test runs with the
+following results:
+https://wiki.opnfv.org/arno_sr1_result_page?rev=1443626728
References
==========
diff --git a/fuel/TODO b/fuel/TODO
index 7aa42d22d..e2d1b379c 100644
--- a/fuel/TODO
+++ b/fuel/TODO
@@ -3,8 +3,3 @@
# jonas.bjurel@ericsson.com 0.2 2015.04.14
#########################################################################
Following items needs to be done to achieve an OPNFV/BGS ARNO Fuel Stack:
-1) Add support for CentOS 6.5 - REMAINING
-2) Add Local GIT repo mirror
-3) Add Auto-deployment for Linux-Foundation Lab.
-4) Dry-run Funktest (Jenkins/Robot/etc.)
-5) Finalize Documentation \ No newline at end of file
diff --git a/fuel/build/Makefile b/fuel/build/Makefile
index 5f631200b..ba4beaf39 100644
--- a/fuel/build/Makefile
+++ b/fuel/build/Makefile
@@ -15,7 +15,7 @@ SHELL = /bin/bash
#Input args
export UNIT_TEST = FALSE
export INTERACTIVE = TRUE
-export ISOSRC = file:$(shell pwd)/fuel-6.0.1.iso
+export ISOSRC = file:$(shell pwd)/fuel-6.1.iso
export ISOCACHE = $(shell pwd)/$(shell basename $(ISOSRC))
export PRODNO = "OPNFV_BGS"
export REVSTATE = "P0000"
@@ -37,13 +37,6 @@ export TOPDIR := $(shell pwd)
#Build subclasses
SUBDIRS := f_isoroot
-SUBDIRS += f_opnfv_puppet
-SUBDIRS += f_osnaily
-SUBDIRS += f_l23network
-SUBDIRS += f_resolvconf
-SUBDIRS += f_ntp
-SUBDIRS += f_odl_docker
-#SUBDIRS += f_odl
# f_example is only an example of how to generate a .deb package and
# should not be enabled in official builds.
@@ -63,7 +56,6 @@ all:
@echo "Versions of cached build results built by" $(shell hostname) "at" $(shell date -u) > $(VERSION_FILE)
@echo "cache.mk" $(shell md5sum $(BUILD_BASE)/cache.mk | cut -f1 -d " ") >> $(VERSION_FILE)
@echo "config.mk" $(shell md5sum $(BUILD_BASE)/config.mk | cut -f1 -d " ") >> $(VERSION_FILE)
- $(MAKE) -C f_odl_docker -f Makefile all
@make -C docker
@docker/runcontext $(DOCKERIMG) $(MAKE) $(MAKEFLAGS) iso
@@ -83,16 +75,25 @@ $(ISOCACHE):
@echo "fuel" `git -C /tmp/fuel-main show | grep commit | head -1 | cut -d " " -f2` >> $(VERSION_FILE)
# Setup cgroups for docker-in-docker
sudo /root/enable_dockerx2
- # Patch to fix race condition when doing "Docker-in-Docker" build
- cd /tmp/fuel-main && patch -p1 < $(TOPDIR)/fuel-main_1.patch
- # Patch to make the sandbox chroot in Fuel succeed with package
- # installation in a Docker build
- cd /tmp/fuel-main && patch -p1 < $(TOPDIR)/fuel-main_2.patch
+ # Temporary patch to accomodate for new Ubuntu trusty devops keys not yet
+ # backported to fuel 6.0 or 6.1
+ cd /tmp/fuel-main && patch -p0 < $(TOPDIR)/fuel-main_3.patch
+ # Patch for adding dosfstools, as Fuel 6.1 is running mkfs.vfat
+ cd /tmp/fuel-main && patch -p0 < $(TOPDIR)/fuel-main_5.patch
# Remove Docker optimizations, otherwise multistrap will fail during
# Fuel build.
sudo rm -f /etc/apt/apt.conf.d/docker*
#
cd /tmp/fuel-main && ./prepare-build-env.sh
+ cd /tmp/fuel-main && make repos
+ # Patch for speeding up image creation in virtual environments,
+ # https://review.openstack.org/#/c/197943/
+ cd /tmp/fuel-main && patch -p0 < $(TOPDIR)/fuel-agent_1.patch
+ cd /tmp/fuel-main/build/repos/nailgun && git config --global user.email "build$opnfv.org"
+ cd /tmp/fuel-main/build/repos/nailgun && git config --global user.name "OPNFV build"
+ cd /tmp/fuel-main/build/repos/nailgun && git add -u .
+ cd /tmp/fuel-main/build/repos/nailgun && git commit -m "Added patch"
+ #
cd /tmp/fuel-main && make iso
mv /tmp/fuel-main/build/artifacts/fuel*.iso .
@@ -116,14 +117,6 @@ $(SUBDIRS):
patch-packages:
ORIGISO=$(ISOCACHE) REVSTATE=$(REVSTATE) $(MAKE) -C $@ -f Makefile release
-.PHONY: prepare
-prepare:
- #$(MAKE) -C opendaylight -f Makefile setup
-
-.PHONY: odl
-odl:
- #$(MAKE) -C opendaylight -f Makefile
-
.PHONY: build-clean $(SUBCLEAN)
build-clean: $(SUBCLEAN)
$(MAKE) -C patch-packages -f Makefile clean
@@ -132,13 +125,11 @@ build-clean: $(SUBCLEAN)
@rm -f $(NEWISO)
.PHONY: clean $(SUBCLEAN)
-clean: clean-cache prepare $(SUBCLEAN)
+clean: clean-cache $(SUBCLEAN)
$(MAKE) -C patch-packages -f Makefile clean
- #$(MAKE) -C opendaylight -f Makefile clean
@rm -f *.iso
@rm -Rf release
@rm -Rf newiso
- @rm -f f_odl
@rm -f $(NEWISO)
@rm -f $(BUILD_BASE)/.versions
@@ -148,6 +139,12 @@ $(SUBCLEAN): %.clean:
# Todo: Make things smarter - we shouldn't need to clean everything
# betwen make invocations.
.PHONY: iso
-iso: prepare build-clean odl $(ISOCACHE) $(SUBDIRS) patch-packages
+iso: build-clean $(ISOCACHE) $(SUBDIRS) patch-packages
install/install.sh iso $(ISOCACHE) $(NEWISO) $(PRODNO) $(REVSTATE)
@printf "\n\nProduct ISO is $(NEWISO)\n\n"
+
+# Start a bash shell in docker for Makefile debugging
+.PHONY: debug
+debug:
+ @docker version >/dev/null 2>&1 || (echo 'No Docker installation available'; exit 1)
+ docker/runcontext $(DOCKERIMG) bash
diff --git a/fuel/build/README b/fuel/build/README
index 9692f3468..a6e15694c 100644
--- a/fuel/build/README
+++ b/fuel/build/README
@@ -18,6 +18,5 @@ This purpose of this framework is to:
- Apply patches to the baseline.
- Etc.
3) Re factor/rebuild the .iso image for deployment (also builds in a container, for the same reason as mentioned above)
-4) Through a pre-deployment script, setting config's not part of the pristine fuel build can be achieved
For detailed instructions on how to add content, configuration, build and deply - please see: DOC/
diff --git a/fuel/build/cache.mk b/fuel/build/cache.mk
index cc98f6825..b88ac2fc4 100644
--- a/fuel/build/cache.mk
+++ b/fuel/build/cache.mk
@@ -1,4 +1,4 @@
-##############################################################################
+#############################################################################
# Copyright (c) 2015 Ericsson AB and others.
# stefan.k.berg@ericsson.com
# jonas.bjurel@ericsson.com
@@ -15,10 +15,8 @@ CACHECLEAN := $(addsuffix .clean,$(CACHEFILES) $(CACHEDIRS))
############################################################################
# BEGIN of variables to customize
#
-#CACHEDIRS := opendaylight/f_odl/package
+#CACHEDIRS := foo/bar
-#CACHEFILES := opendaylight/.odl-build-history
-#CACHEFILES += opendaylight/.odl-build.log
CACHEFILES += .versions
CACHEFILES += $(shell basename $(ISOSRC))
#
@@ -54,20 +52,20 @@ $(CACHEFILES):
@if [ ! -f $(BUILD_BASE)/$@ ]; then\
echo " " > $(BUILD_BASE)/$@;\
- ln -s $(BUILD_BASE)/$@ $(CACHE_DIR)/$@;\
+ ln -s $(BUILD_BASE)/$@ $(CACHE_DIR)/$@;\
rm -f $(BUILD_BASE)/$@;\
else\
ln -s $(BUILD_BASE)/$@ $(CACHE_DIR)/$@;\
fi
.PHONY: validate-cache
-validate-cache: prepare $(CACHEVALIDATE)
- @if [[ $(shell md5sum $(BUILD_BASE)/config.mk | cut -f1 -d " ") != $(shell cat $(VERSION_FILE) | grep config.mk | awk '{print $$NF}') ]]; then\
+validate-cache: $(CACHEVALIDATE)
+ @if [ "$(shell md5sum $(BUILD_BASE)/config.mk | cut -f1 -d " ")" != "$(shell cat $(VERSION_FILE) | grep config.mk | awk '{print $$NF}')" ]; then\
echo "Cache does not match current config.mk definition, cache must be rebuilt";\
exit 1;\
fi;
- @if [[ $(shell md5sum $(BUILD_BASE)/cache.mk | cut -f1 -d " ") != $(shell cat $(VERSION_FILE) | grep cache.mk | awk '{print $$NF}') ]]; then\
+ @if [ "$(shell md5sum $(BUILD_BASE)/cache.mk | cut -f1 -d " ")" != "$(shell cat $(VERSION_FILE) | grep cache.mk | awk '{print $$NF}')" ]; then\
echo "Cache does not match current cache.mk definition, cache must be rebuilt";\
exit 1;\
fi;
@@ -80,14 +78,12 @@ validate-cache: prepare $(CACHEVALIDATE)
then \
REMOTE_ID=$(shell git ls-remote $(FUEL_MAIN_REPO) $(FUEL_MAIN_TAG) | awk '{print $$(NF-1)}'); \
fi; \
- if [ $$REMOTE_ID != $(shell cat $(VERSION_FILE) | grep fuel | awk '{print $$NF}') ]; \
+ if [[ $$REMOTE_ID != $(shell cat $(VERSION_FILE) | grep fuel | awk '{print $$NF}') ]]; \
then \
echo "Cache does not match upstream Fuel, cache must be rebuilt!"; \
exit 1; \
fi
- #$(MAKE) -C opendaylight validate-cache
-
.PHONY: $(CACHEVALIDATE)
$(CACHEVALIDATE): %.validate:
@echo VALIDATE $(CACHEVALIDATE)
diff --git a/fuel/build/config.mk b/fuel/build/config.mk
index 19f502d03..e9a5320f4 100644
--- a/fuel/build/config.mk
+++ b/fuel/build/config.mk
@@ -8,19 +8,12 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-ODL_MAIN_REPO := https://git.opendaylight.org/gerrit/p/controller.git
-ODL_MAIN_TAG := release/helium
-
FUEL_MAIN_REPO := https://github.com/stackforge/fuel-main
-FUEL_MAIN_TAG = stable/6.0
+FUEL_MAIN_TAG = stable/6.1
DOCKER_REPO := http://get.docker.com/builds/Linux/x86_64
DOCKER_TAG := docker-latest
-.PHONY: get-odl-repo
-get-odl-repo:
- @echo $(ODL_MAIN_REPO) $(ODL_MAIN_TAG)
-
.PHONY: get-fuel-repo
get-fuel-repo:
@echo $(FUEL_MAIN_REPO) $(FUEL_MAIN_TAG)
diff --git a/fuel/build/docker/ubuntu-builder/Dockerfile b/fuel/build/docker/ubuntu-builder/Dockerfile
index b4e1b4e24..81cdc43fb 100644
--- a/fuel/build/docker/ubuntu-builder/Dockerfile
+++ b/fuel/build/docker/ubuntu-builder/Dockerfile
@@ -14,7 +14,7 @@ RUN apt-get update
RUN apt-get install -y software-properties-common python-software-properties \
make python-setuptools python-all dpkg-dev debhelper \
fuseiso git genisoimage bind9-host wget curl lintian tmux lxc iptables \
- ca-certificates sudo apt-utils lsb-release
+ ca-certificates sudo apt-utils lsb-release dosfstools
RUN echo "ALL ALL=NOPASSWD: ALL" > /etc/sudoers.d/open-sudo
RUN chmod 0440 /etc/sudoers.d/open-sudo
@@ -24,8 +24,5 @@ ADD ./setcontext /root/setcontext
RUN chmod +x /root/setcontext
ADD ./enable_dockerx2 /root/enable_dockerx2
RUN chmod +x /root/enable_dockerx2
-ADD ./install.sh /root/install.sh
-RUN chmod +x /root/install.sh
-RUN /root/install.sh
VOLUME /var/lib/docker
diff --git a/fuel/build/docker/ubuntu-builder/install.sh b/fuel/build/docker/ubuntu-builder/install.sh
deleted file mode 100755
index df1af72f1..000000000
--- a/fuel/build/docker/ubuntu-builder/install.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-#Install Oracle Java 7 jdk
-echo "Installing JAVA 7"
-apt-get update
-add-apt-repository ppa:webupd8team/java -y
-apt-get update
-echo debconf shared/accepted-oracle-license-v1-1 select true | debconf-set-selections
-apt-get install oracle-java7-installer -y
-
-#Install Maven 3
-echo deb http://ppa.launchpad.net/natecarlson/maven3/ubuntu precise main >> /etc/apt/sources.list
-echo deb-src http://ppa.launchpad.net/natecarlson/maven3/ubuntu precise main >> /etc/apt/sources.list
-apt-get update || exit 1
-sudo apt-get install -y --force-yes maven3 || exit 1
-ln -s /usr/share/maven3/bin/mvn /usr/bin/mvn
diff --git a/fuel/build/f_isoroot/Makefile b/fuel/build/f_isoroot/Makefile
index bde8e6442..a9b12d927 100644
--- a/fuel/build/f_isoroot/Makefile
+++ b/fuel/build/f_isoroot/Makefile
@@ -8,7 +8,7 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-SUBDIRS = f_predeployment f_kscfg f_bootstrap
+SUBDIRS = f_kscfg f_bootstrap f_repobuild f_odlpluginbuild
SUBCLEAN = $(addsuffix .clean,$(SUBDIRS))
.PHONY: all
diff --git a/fuel/build/f_isoroot/f_bootstrap/bootstrap_admin_node.sh b/fuel/build/f_isoroot/f_bootstrap/bootstrap_admin_node.sh
index 348ce3cb4..8bdf5667c 100755
--- a/fuel/build/f_isoroot/f_bootstrap/bootstrap_admin_node.sh
+++ b/fuel/build/f_isoroot/f_bootstrap/bootstrap_admin_node.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+FUEL_RELEASE=$(grep release: /etc/fuel/version.yaml | cut -d: -f2 | tr -d '" ')
function countdown() {
local i
@@ -47,9 +48,6 @@ if [[ "$showmenu" == "yes" || "$showmenu" == "YES" ]]; then
{ kill "$pid"; wait $!; } 2>/dev/null
case "$key" in
$'\e') echo "Skipping Fuel Setup.."
- echo -n "Applying default Fuel setings..."
- fuelmenu --save-only --iface=eth0
- echo "Done!"
;;
*) echo -e "\nEntering Fuel Setup..."
fuelmenu
@@ -57,30 +55,51 @@ if [[ "$showmenu" == "yes" || "$showmenu" == "YES" ]]; then
esac
fi
fi
+
+
#Reread /etc/sysconfig/network to inform puppet of changes
. /etc/sysconfig/network
hostname "$HOSTNAME"
-### docker stuff
-images_dir="/var/www/nailgun/docker/images"
+service docker start
-# extract docker images
-mkdir -p $images_dir $sources_dir
-rm -f $images_dir/*tar
-pushd $images_dir &>/dev/null
+if [ -f /root/.build_images ]; then
+ #Fail on all errors
+ set -e
+ trap fail EXIT
-echo "Extracting and loading docker images. (This may take a while)"
-lrzip -d -o fuel-images.tar fuel-images.tar.lrz && tar -xf fuel-images.tar && rm -f fuel-images.tar
-popd &>/dev/null
-service docker start
+ echo "Loading Fuel base image for Docker..."
+ docker load -i /var/www/nailgun/docker/images/fuel-images.tar
-# load docker images
-for image in $images_dir/*tar ; do
- echo "Loading docker image ${image}..."
- docker load -i "$image"
- # clean up extracted image
- rm -f "$image"
-done
+ echo "Building Fuel Docker images..."
+ WORKDIR=$(mktemp -d /tmp/docker-buildXXX)
+ SOURCE=/var/www/nailgun/docker
+ REPO_CONT_ID=$(docker -D run -d -p 80 -v /var/www/nailgun:/var/www/nailgun fuel/centos sh -c 'mkdir /var/www/html/os;ln -sf /var/www/nailgun/centos/x86_64 /var/www/html/os/x86_64;/usr/sbin/apachectl -DFOREGROUND')
+ RANDOM_PORT=$(docker port $REPO_CONT_ID 80 | cut -d':' -f2)
+
+ for imagesource in /var/www/nailgun/docker/sources/*; do
+ if ! [ -f "$imagesource/Dockerfile" ]; then
+ echo "Skipping ${imagesource}..."
+ continue
+ fi
+ image=$(basename "$imagesource")
+ cp -R "$imagesource" $WORKDIR/$image
+ mkdir -p $WORKDIR/$image/etc
+ cp -R /etc/puppet /etc/fuel $WORKDIR/$image/etc
+ sed -e "s/_PORT_/${RANDOM_PORT}/" -i $WORKDIR/$image/Dockerfile
+ sed -e 's/production:.*/production: "docker-build"/' -i $WORKDIR/$image/etc/fuel/version.yaml
+ docker build -t fuel/${image}_${FUEL_RELEASE} $WORKDIR/$image
+ done
+ docker rm -f $REPO_CONT_ID
+ rm -rf "$WORKDIR"
+
+ #Remove trap for normal deployment
+ trap - EXIT
+ set +e
+else
+ echo "Loading docker images. (This may take a while)"
+ docker load -i /var/www/nailgun/docker/images/fuel-images.tar
+fi
# apply puppet
puppet apply --detailed-exitcodes -d -v /etc/puppet/modules/nailgun/examples/host-only.pp
@@ -102,4 +121,52 @@ done
shopt -u nullglob
### OPNFV addition END
+# Enable updates repository
+cat > /etc/yum.repos.d/mos${FUEL_RELEASE}-updates.repo << EOF
+[mos${FUEL_RELEASE}-updates]
+name=mos${FUEL_RELEASE}-updates
+baseurl=http://mirror.fuel-infra.org/mos/centos-6/mos${FUEL_RELEASE}/updates/
+gpgcheck=0
+skip_if_unavailable=1
+EOF
+
+# Enable security repository
+cat > /etc/yum.repos.d/mos${FUEL_RELEASE}-security.repo << EOF
+[mos${FUEL_RELEASE}-security]
+name=mos${FUEL_RELEASE}-security
+baseurl=http://mirror.fuel-infra.org/mos/centos-6/mos${FUEL_RELEASE}/security/
+gpgcheck=0
+skip_if_unavailable=1
+EOF
+
+#Check if repo is accessible
+echo "Checking for access to updates repository..."
+repourl=$(grep baseurl /etc/yum.repos.d/*updates* 2>/dev/null | cut -d'=' -f2- | head -1)
+if urlaccesscheck check "$repourl" ; then
+ UPDATE_ISSUES=0
+else
+ UPDATE_ISSUES=1
+fi
+
+if [ $UPDATE_ISSUES -eq 1 ]; then
+ warning="WARNING: There are issues connecting to Fuel update repository.\
+\nPlease fix your connection and update this node with \`yum update\`\
+\nThen run \`dockerctl destroy all; bootstrap_admin_node.sh;\`\
+\nto repeat bootstrap on Fuel Master with the latest updates.\
+\nFor more information, check out Fuel documentation at:\
+\nhttp://docs.mirantis.com/fuel"
+else
+ warning="WARNING: There may be updates available for Fuel.\
+\nYou should update this node with \`yum update\`. If there are available\
+\n updates, run \`dockerctl destroy all; bootstrap_admin_node.sh;\`\
+\nto repeat bootstrap on Fuel Master with the latest updates.\
+\nFor more information, check out Fuel documentation at:\
+\nhttp://docs.mirantis.com/fuel"
+fi
+echo
+echo "*************************************************"
+echo -e "$warning"
+echo "*************************************************"
+echo "Sending notification to Fuel UI..."
+fuel notify --topic warning --send "$warning"
echo "Fuel node deployment complete!"
diff --git a/fuel/build/f_isoroot/f_bootstrap/bootstrap_admin_node.sh.orig b/fuel/build/f_isoroot/f_bootstrap/bootstrap_admin_node.sh.orig
index 7b6e6bd71..8d21c1e72 100755
--- a/fuel/build/f_isoroot/f_bootstrap/bootstrap_admin_node.sh.orig
+++ b/fuel/build/f_isoroot/f_bootstrap/bootstrap_admin_node.sh.orig
@@ -1,4 +1,5 @@
#!/bin/bash
+FUEL_RELEASE=$(grep release: /etc/fuel/version.yaml | cut -d: -f2 | tr -d '" ')
function countdown() {
local i
@@ -37,9 +38,6 @@ if [[ "$showmenu" == "yes" || "$showmenu" == "YES" ]]; then
{ kill "$pid"; wait $!; } 2>/dev/null
case "$key" in
$'\e') echo "Skipping Fuel Setup.."
- echo -n "Applying default Fuel setings..."
- fuelmenu --save-only --iface=eth0
- echo "Done!"
;;
*) echo -e "\nEntering Fuel Setup..."
fuelmenu
@@ -47,30 +45,51 @@ if [[ "$showmenu" == "yes" || "$showmenu" == "YES" ]]; then
esac
fi
fi
+
+
#Reread /etc/sysconfig/network to inform puppet of changes
. /etc/sysconfig/network
hostname "$HOSTNAME"
-### docker stuff
-images_dir="/var/www/nailgun/docker/images"
+service docker start
-# extract docker images
-mkdir -p $images_dir $sources_dir
-rm -f $images_dir/*tar
-pushd $images_dir &>/dev/null
+if [ -f /root/.build_images ]; then
+ #Fail on all errors
+ set -e
+ trap fail EXIT
-echo "Extracting and loading docker images. (This may take a while)"
-lrzip -d -o fuel-images.tar fuel-images.tar.lrz && tar -xf fuel-images.tar && rm -f fuel-images.tar
-popd &>/dev/null
-service docker start
+ echo "Loading Fuel base image for Docker..."
+ docker load -i /var/www/nailgun/docker/images/fuel-images.tar
+
+ echo "Building Fuel Docker images..."
+ WORKDIR=$(mktemp -d /tmp/docker-buildXXX)
+ SOURCE=/var/www/nailgun/docker
+ REPO_CONT_ID=$(docker -D run -d -p 80 -v /var/www/nailgun:/var/www/nailgun fuel/centos sh -c 'mkdir /var/www/html/os;ln -sf /var/www/nailgun/centos/x86_64 /var/www/html/os/x86_64;/usr/sbin/apachectl -DFOREGROUND')
+ RANDOM_PORT=$(docker port $REPO_CONT_ID 80 | cut -d':' -f2)
+
+ for imagesource in /var/www/nailgun/docker/sources/*; do
+ if ! [ -f "$imagesource/Dockerfile" ]; then
+ echo "Skipping ${imagesource}..."
+ continue
+ fi
+ image=$(basename "$imagesource")
+ cp -R "$imagesource" $WORKDIR/$image
+ mkdir -p $WORKDIR/$image/etc
+ cp -R /etc/puppet /etc/fuel $WORKDIR/$image/etc
+ sed -e "s/_PORT_/${RANDOM_PORT}/" -i $WORKDIR/$image/Dockerfile
+ sed -e 's/production:.*/production: "docker-build"/' -i $WORKDIR/$image/etc/fuel/version.yaml
+ docker build -t fuel/${image}_${FUEL_RELEASE} $WORKDIR/$image
+ done
+ docker rm -f $REPO_CONT_ID
+ rm -rf "$WORKDIR"
-# load docker images
-for image in $images_dir/*tar ; do
- echo "Loading docker image ${image}..."
- docker load -i "$image"
- # clean up extracted image
- rm -f "$image"
-done
+ #Remove trap for normal deployment
+ trap - EXIT
+ set +e
+else
+ echo "Loading docker images. (This may take a while)"
+ docker load -i /var/www/nailgun/docker/images/fuel-images.tar
+fi
# apply puppet
puppet apply --detailed-exitcodes -d -v /etc/puppet/modules/nailgun/examples/host-only.pp
@@ -81,4 +100,53 @@ rmdir /var/log/remote && ln -s /var/log/docker-logs/remote /var/log/remote
dockerctl check || fail
bash /etc/rc.local
+
+# Enable updates repository
+cat > /etc/yum.repos.d/mos${FUEL_RELEASE}-updates.repo << EOF
+[mos${FUEL_RELEASE}-updates]
+name=mos${FUEL_RELEASE}-updates
+baseurl=http://mirror.fuel-infra.org/mos/centos-6/mos${FUEL_RELEASE}/updates/
+gpgcheck=0
+skip_if_unavailable=1
+EOF
+
+# Enable security repository
+cat > /etc/yum.repos.d/mos${FUEL_RELEASE}-security.repo << EOF
+[mos${FUEL_RELEASE}-security]
+name=mos${FUEL_RELEASE}-security
+baseurl=http://mirror.fuel-infra.org/mos/centos-6/mos${FUEL_RELEASE}/security/
+gpgcheck=0
+skip_if_unavailable=1
+EOF
+
+#Check if repo is accessible
+echo "Checking for access to updates repository..."
+repourl=$(grep baseurl /etc/yum.repos.d/*updates* 2>/dev/null | cut -d'=' -f2- | head -1)
+if urlaccesscheck check "$repourl" ; then
+ UPDATE_ISSUES=0
+else
+ UPDATE_ISSUES=1
+fi
+
+if [ $UPDATE_ISSUES -eq 1 ]; then
+ warning="WARNING: There are issues connecting to Fuel update repository.\
+\nPlease fix your connection and update this node with \`yum update\`\
+\nThen run \`dockerctl destroy all; bootstrap_admin_node.sh;\`\
+\nto repeat bootstrap on Fuel Master with the latest updates.\
+\nFor more information, check out Fuel documentation at:\
+\nhttp://docs.mirantis.com/fuel"
+else
+ warning="WARNING: There may be updates available for Fuel.\
+\nYou should update this node with \`yum update\`. If there are available\
+\n updates, run \`dockerctl destroy all; bootstrap_admin_node.sh;\`\
+\nto repeat bootstrap on Fuel Master with the latest updates.\
+\nFor more information, check out Fuel documentation at:\
+\nhttp://docs.mirantis.com/fuel"
+fi
+echo
+echo "*************************************************"
+echo -e "$warning"
+echo "*************************************************"
+echo "Sending notification to Fuel UI..."
+fuel notify --topic warning --send "$warning"
echo "Fuel node deployment complete!"
diff --git a/fuel/build/f_isoroot/f_bootstrap/post-scripts/01_fix_iommubug.sh b/fuel/build/f_isoroot/f_bootstrap/post-scripts/01_fix_iommubug.sh
deleted file mode 100755
index 79aa31a4a..000000000
--- a/fuel/build/f_isoroot/f_bootstrap/post-scripts/01_fix_iommubug.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#/bin/sh
-echo "Setting intel_iommu=off in bootstrap profile - a fix for the Dell systems"
-echo "Old settings"
-dockerctl shell cobbler cobbler profile report --name bootstrap
-echo "Modifying"
-dockerctl shell cobbler cobbler profile edit --name bootstrap --kopts "intel_iommu=off" --in-place
-echo "New settings"
-dockerctl shell cobbler cobbler profile report --name bootstrap
-
diff --git a/fuel/build/f_isoroot/f_bootstrap/post-scripts/02_fix_console_speed.sh b/fuel/build/f_isoroot/f_bootstrap/post-scripts/02_fix_console_speed.sh
deleted file mode 100755
index bf7591bdc..000000000
--- a/fuel/build/f_isoroot/f_bootstrap/post-scripts/02_fix_console_speed.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#/bin/sh
-echo "Changing console speed to 115200 (std is 9600) on bootstrap"
-echo "Old settings"
-dockerctl shell cobbler cobbler profile report --name bootstrap
-echo "Modifying"
-dockerctl shell cobbler cobbler profile edit --name bootstrap --kopts "console=tty0 console=ttyS0,115200" --in-place
-echo "New settings"
-dockerctl shell cobbler cobbler profile report --name bootstrap
-echo "Setting console speed to 115200 on ubuntu_1204_x86_64 (std is no serial console)"
-echo "Old settings"
-dockerctl shell cobbler cobbler profile report --name ubuntu_1204_x86_64
-echo "Modifying"
-dockerctl shell cobbler cobbler profile edit --name ubuntu_1204_x86_64 --kopts "console=tty0 console=ttyS0,115200" --in-place
-echo "New settings"
-dockerctl shell cobbler cobbler profile report --name ubuntu_1204_x86_64
diff --git a/fuel/build/f_isoroot/f_bootstrap/post-scripts/03_install_repo.sh b/fuel/build/f_isoroot/f_bootstrap/post-scripts/03_install_repo.sh
new file mode 100755
index 000000000..427a55add
--- /dev/null
+++ b/fuel/build/f_isoroot/f_bootstrap/post-scripts/03_install_repo.sh
@@ -0,0 +1,19 @@
+#/bin/sh
+echo "Installing pre-build repo"
+if [ ! -d /opt/opnfv/nailgun ]; then
+ echo "Error - found no repo!"
+ exit 1
+fi
+
+mkdir -p /var/www/nailgun
+mv /opt/opnfv/nailgun/* /var/www/nailgun
+if [ $? -ne 0 ]; then
+ echo "Error moving repos to their correct location!"
+ exit 1
+fi
+rmdir /opt/opnfv/nailgun
+if [ $? -ne 0 ]; then
+ echo "Error removing /opt/opnfv/nailgun directory!"
+ exit 1
+fi
+echo "Done installing pre-build repo"
diff --git a/fuel/build/f_isoroot/f_kscfg/ks.cfg b/fuel/build/f_isoroot/f_kscfg/ks.cfg
index 508f04436..12cd1abab 100755..100644
--- a/fuel/build/f_isoroot/f_kscfg/ks.cfg
+++ b/fuel/build/f_isoroot/f_kscfg/ks.cfg
@@ -26,10 +26,12 @@ skipx
drives=""
removable_drives=""
for drv in `ls -1 /sys/block | grep "sd\|hd\|vd\|cciss"`; do
- if (grep -q 0 /sys/block/${drv}/removable); then
- drives="${drives} ${drv}"
- else
- removable_drives="${removable_drives} ${drv}"
+ if !(blkid | grep -q "${drv}.*Fuel"); then
+ if (grep -q 0 /sys/block/${drv}/removable); then
+ drives="${drives} ${drv}"
+ else
+ removable_drives="${removable_drives} ${drv}"
+ fi
fi
done
default_drive=`echo ${drives} ${removable_drives} | awk '{print $1}'`
@@ -152,9 +154,9 @@ if [ "$format_confirmed" != "yes" ] ; then
chvt 1
fi
-# verify tgtdrive is at least 30GB
+# verify tgtdrive is at least 41GB
tgtdrivesize=$(( $(cat "/sys/class/block/${tgtdrive}/size") / 2 / 1024 ))
-if [ $tgtdrivesize -lt 30720 ]; then
+if [ $tgtdrivesize -lt 41984 ]; then
exec < /dev/tty3 > /dev/tty3 2>&1
chvt 3
clear
@@ -162,7 +164,7 @@ if [ $tgtdrivesize -lt 30720 ]; then
echo '********************************************************************'
echo '* E R R O R *'
echo '* *'
- echo '* Your disk is under 30GB in size. Installation cannot continue. *'
+ echo '* Your disk is under 41GB in size. Installation cannot continue. *'
echo '* Restart installation with a larger disk. *'
echo '* *'
echo '********************************************************************'
@@ -175,7 +177,9 @@ fi
tgtdrive=$(echo $tgtdrive | sed -e 's/!/\//')
# source
-if test -e /dev/disk/by-uuid/will_be_substituted_with_actual_uuid; then
+if test -e /dev/disk/by-label/"OpenStack_Fuel"; then
+ echo "harddrive --partition=LABEL="OpenStack_Fuel" --dir=/" > /tmp/source.ks
+elif test -e /dev/disk/by-uuid/will_be_substituted_with_actual_uuid; then
echo "harddrive --partition=UUID=will_be_substituted_with_actual_uuid --dir=/" > /tmp/source.ks
else
echo "cdrom" > /tmp/source.ks
@@ -204,16 +208,18 @@ else
fi
echo > /tmp/partition.ks
echo "partition /boot --onpart=/dev/${bootdev}3" >> /tmp/partition.ks
-echo "partition pv.001 --ondisk=${tgtdrive} --size=30000 --grow" >> /tmp/partition.ks
+echo "partition /boot/efi --onpart=/dev/${bootdev}2" >> /tmp/partition.ks
+echo "partition pv.001 --ondisk=${tgtdrive} --size=41000 --grow" >> /tmp/partition.ks
echo "volgroup os pv.001" >> /tmp/partition.ks
echo "logvol swap --vgname=os --recommended --name=swap" >> /tmp/partition.ks
echo "logvol / --vgname=os --size=10000 --name=root --fstype=ext4" >> /tmp/partition.ks
-echo "logvol /var --vgname=os --size=10000 --percent 60 --grow --name=var --fstype=ext4" >> /tmp/partition.ks
-echo "logvol /var/log --vgname=os --size=4096 --percent 40 --grow --name=varlog --fstype=ext4" >> /tmp/partition.ks
+echo "logvol /var --vgname=os --size=10000 --percent 30 --grow --name=var --fstype=ext4" >> /tmp/partition.ks
+echo "logvol /var/lib/docker --vgname=os --size=17000 --percent 20 --grow --name=varlibdocker --fstype=ext4" >> /tmp/partition.ks
+echo "logvol /var/log --vgname=os --size=4096 --percent 50 --grow --name=varlog --fstype=ext4" >> /tmp/partition.ks
# bootloader
-echo "bootloader --location=mbr --driveorder=${tgtdrive} --append=' biosdevname=0 crashkernel=none'" > /tmp/bootloader.ks
+echo "bootloader --location=partition --driveorder=${tgtdrive} --append=' biosdevname=0 crashkernel=none'" > /tmp/bootloader.ks
# Anaconda can not install grub 0.97 on disks which are >4T.
# The reason is that grub does not support such large geometries
@@ -233,6 +239,9 @@ echo "cat /tmp/grub.script | chroot /mnt/sysimage /sbin/grub --no-floppy --batch
%packages --nobase --excludedocs
@Core
+fuel
+fuel-library
+fuel-dockerctl
authconfig
bind-utils
cronie
@@ -241,7 +250,12 @@ curl
daemonize
dhcp
docker-io
+fuel-bootstrap-image
+fuel-createmirror
+fuel-target-centos-images
+fuel-package-updates
fuelmenu
+fuel-docker-images
gdisk
lrzip
lsof
@@ -249,8 +263,10 @@ man
mlocate
nmap-ncat
ntp
+ntpdate
openssh-clients
policycoreutils
+python-daemon
rsync
ruby21-puppet
ruby21-rubygem-netaddr
@@ -266,6 +282,7 @@ vim-enhanced
virt-what
wget
yum
+yum-plugin-priorities
%include /tmp/post_partition.ks
@@ -285,6 +302,8 @@ echo -e "* soft core unlimited\n* hard core unlimited" >> /etc/security/limits.c
%post --nochroot --log=/mnt/sysimage/root/anaconda-post-before-chroot.log
#!/bin/sh
+set -x
+
SOURCE="/mnt/sysimage/tmp/source"
for I in `cat /proc/cmdline`; do case "$I" in *=*) eval $I;; esac ; done
@@ -342,6 +361,7 @@ function save_cfg {
else
echo GATEWAY=$gw >> /etc/sysconfig/network
fi
+ [ -n "$build_images" -a "$build_images" != "0" ] && echo -e "$build_images" > /root/.build_images
}
# Default FQDN
@@ -356,6 +376,7 @@ gw=$gw
device="eth0"
hwaddr=`ifconfig $device | grep -i hwaddr | sed -e 's#^.*hwaddr[[:space:]]*##I'`
dhcp_interface=$dhcp_interface
+build_images=$build_images
save_cfg
# Mounting installation source
@@ -366,7 +387,9 @@ echo
mkdir -p ${SOURCE}
mkdir -p ${FS}
-if test -e /dev/disk/by-uuid/will_be_substituted_with_actual_uuid; then
+if test -e /dev/disk/by-label/"OpenStack_Fuel"; then
+ mount /dev/disk/by-label/"OpenStack_Fuel" ${SOURCE}
+elif test -e /dev/disk/by-uuid/will_be_substituted_with_actual_uuid; then
mount /dev/disk/by-uuid/will_be_substituted_with_actual_uuid ${FS}
mount -o loop ${FS}/nailgun.iso ${SOURCE}
fi
@@ -390,61 +413,47 @@ cp ${SOURCE}/.treeinfo ${repodir}/centos/x86_64
# Copying Ubuntu files
mkdir -p ${repodir}/ubuntu/x86_64/images
-cp -r ${SOURCE}/ubuntu/conf ${repodir}/ubuntu/x86_64
-cp -r ${SOURCE}/ubuntu/db ${repodir}/ubuntu/x86_64
cp -r ${SOURCE}/ubuntu/dists ${repodir}/ubuntu/x86_64
-cp -r ${SOURCE}/ubuntu/indices ${repodir}/ubuntu/x86_64
cp -r ${SOURCE}/ubuntu/pool ${repodir}/ubuntu/x86_64
-cp -r ${SOURCE}/ubuntu/installer-amd64/current/images/netboot/ubuntu-installer/amd64/linux ${repodir}/ubuntu/x86_64/images
-cp -r ${SOURCE}/ubuntu/installer-amd64/current/images/netboot/ubuntu-installer/amd64/initrd.gz ${repodir}/ubuntu/x86_64/images
+
+# We do not ship debian-installer kernel and initrd on ISO.
+# But we still need to be able to create ubuntu cobbler distro
+# which requires kernel and initrd to be available. So, we
+# just touch these files to work around cobbler's limitation.
+touch ${repodir}/ubuntu/x86_64/images/linux
+touch ${repodir}/ubuntu/x86_64/images/initrd.gz
# make links for backward compatibility
ln -s ${repodir}/centos ${wwwdir}/centos
ln -s ${repodir}/ubuntu ${wwwdir}/ubuntu
-# Copying bootstrap image
-mkdir -p ${wwwdir}/bootstrap
-cp -r ${SOURCE}/bootstrap/initramfs.img ${wwwdir}/bootstrap
-cp -r ${SOURCE}/bootstrap/linux ${wwwdir}/bootstrap
-
-# Copying target images
-cp -r ${SOURCE}/targetimages ${wwwdir}
-
-mkdir -p /root/.ssh
-chmod 700 /root/.ssh
-cp ${SOURCE}/bootstrap/bootstrap.rsa /root/.ssh
-chmod 600 /root/.ssh/bootstrap.rsa
-
# --------------------------
# UNPACKING PUPPET MANIFESTS
# --------------------------
# create folders
-mkdir -p /etc/puppet/${OPENSTACK_VERSION}/manifests/
-mkdir -p /etc/puppet/${OPENSTACK_VERSION}/modules/
-rm -rf /etc/puppet/modules/
+#mkdir -p /etc/puppet/${OPENSTACK_VERSION}/manifests/
+#mkdir -p /etc/puppet/${OPENSTACK_VERSION}/modules/
+#rm -rf /etc/puppet/modules/
# TODO(ikalnitsky): investigate why we need this
-cp ${SOURCE}/puppet-slave.tgz ${wwwdir}/
+#cp ${SOURCE}/puppet-slave.tgz ${wwwdir}/
# place modules and manifests
-tar zxf ${SOURCE}/puppet-slave.tgz -C /etc/puppet/${OPENSTACK_VERSION}/modules
-cp /etc/puppet/${OPENSTACK_VERSION}/modules/osnailyfacter/examples/site.pp /etc/puppet/${OPENSTACK_VERSION}/manifests/site.pp
+#tar zxf ${SOURCE}/puppet-slave.tgz -C /etc/puppet/${OPENSTACK_VERSION}/modules
+#cp /etc/puppet/${OPENSTACK_VERSION}/modules/osnailyfacter/examples/site.pp /etc/puppet/${OPENSTACK_VERSION}/manifests/site.pp
cp ${SOURCE}/centos-versions.yaml ${SOURCE}/ubuntu-versions.yaml /etc/puppet/${OPENSTACK_VERSION}/manifests/
# make links for backward compatibility
-pushd /etc/puppet
-ln -s ${OPENSTACK_VERSION}/manifests/ /etc/puppet/manifests
-ln -s ${OPENSTACK_VERSION}/modules/ /etc/puppet/modules
-popd
+#pushd /etc/puppet
+#ln -s ${OPENSTACK_VERSION}/manifests/ /etc/puppet/manifests
+#ln -s ${OPENSTACK_VERSION}/modules/ /etc/puppet/modules
+#popd
cp ${SOURCE}/send2syslog.py /bin/send2syslog.py
mkdir -p /var/lib/hiera
touch /var/lib/hiera/common.yaml /etc/puppet/hiera.yaml
-# Deploy docker images and ctl tools if we built ISO with docker containers support
-[ -d "${SOURCE}/docker" ] && cp -r ${SOURCE}/docker ${wwwdir}/docker
-
# Prepare local repository specification
rm /etc/yum.repos.d/CentOS*.repo
cat > /etc/yum.repos.d/nailgun.repo << EOF
@@ -461,6 +470,24 @@ sed -i 's/^enabled.*/enabled=0/' /etc/yum/pluginconf.d/subscription-manager.conf
# Disable GSSAPI in ssh server config
sed -i -e "/^\s*GSSAPICleanupCredentials yes/d" -e "/^\s*GSSAPIAuthentication yes/d" /etc/ssh/sshd_config
+# Enable MOTD banner in sshd
+sed -i -e "s/^\s*PrintMotd no/PrintMotd yes/g" /etc/ssh/sshd_config
+
+# Add note regarding local repos creation to MOTD
+cat >> /etc/motd << EOF
+
+All environments use online repositories by default.
+Use the following commands to create local repositories
+on master node and change default repository settings:
+
+* CentOS: fuel-package-updates (see --help for options)
+* Ubuntu: fuel-createmirror (see --help for options)
+
+Please refer to the following guide for more information:
+https://docs.mirantis.com/openstack/fuel/fuel-6.1/reference-architecture.html#fuel-rep-mirror
+
+EOF
+
# Copying bootstrap_admin_node.sh, chmod it and
# adding /etc/init/bootstrap_admin_node.conf
cp ${SOURCE}/bootstrap_admin_node.sh /usr/local/sbin/bootstrap_admin_node.sh
@@ -546,10 +573,7 @@ rm -rf ${SOURCE}
umount -f ${FS} || true
rm -rf ${FS}
-# Enabling/configuring NTPD and ntpdate services
-echo "server 127.127.1.0" >> /etc/ntp.conf
-echo "fudge 127.127.1.0 stratum 10" >> /etc/ntp.conf
-echo "tos orphan 7" >> /etc/ntp.conf
+echo "tos orphan 7" >> /etc/ntp.conf
# Do not show error message on ntpdate failure. Customers should not be confused
# if admin node does not have access to the internet time servers.
@@ -571,4 +595,4 @@ cp -f /etc/skel/.bash* /root/
# Blacklist i2c_piix4 module for VirtualBox so it does not create kernel errors
[[ $(virt-what) = "virtualbox" ]] && echo "blacklist i2c_piix4" > /etc/modprobe.d/blacklist-i2c-piix4.conf
-%end
+%end \ No newline at end of file
diff --git a/fuel/build/f_isoroot/f_kscfg/ks.cfg.orig b/fuel/build/f_isoroot/f_kscfg/ks.cfg.orig
index bddf99c87..cf8cf80d1 100644
--- a/fuel/build/f_isoroot/f_kscfg/ks.cfg.orig
+++ b/fuel/build/f_isoroot/f_kscfg/ks.cfg.orig
@@ -26,10 +26,12 @@ skipx
drives=""
removable_drives=""
for drv in `ls -1 /sys/block | grep "sd\|hd\|vd\|cciss"`; do
- if (grep -q 0 /sys/block/${drv}/removable); then
- drives="${drives} ${drv}"
- else
- removable_drives="${removable_drives} ${drv}"
+ if !(blkid | grep -q "${drv}.*Fuel"); then
+ if (grep -q 0 /sys/block/${drv}/removable); then
+ drives="${drives} ${drv}"
+ else
+ removable_drives="${removable_drives} ${drv}"
+ fi
fi
done
default_drive=`echo ${drives} ${removable_drives} | awk '{print $1}'`
@@ -152,9 +154,9 @@ if [ "$format_confirmed" != "yes" ] ; then
chvt 1
fi
-# verify tgtdrive is at least 30GB
+# verify tgtdrive is at least 41GB
tgtdrivesize=$(( $(cat "/sys/class/block/${tgtdrive}/size") / 2 / 1024 ))
-if [ $tgtdrivesize -lt 30720 ]; then
+if [ $tgtdrivesize -lt 41984 ]; then
exec < /dev/tty3 > /dev/tty3 2>&1
chvt 3
clear
@@ -162,7 +164,7 @@ if [ $tgtdrivesize -lt 30720 ]; then
echo '********************************************************************'
echo '* E R R O R *'
echo '* *'
- echo '* Your disk is under 30GB in size. Installation cannot continue. *'
+ echo '* Your disk is under 41GB in size. Installation cannot continue. *'
echo '* Restart installation with a larger disk. *'
echo '* *'
echo '********************************************************************'
@@ -175,7 +177,9 @@ fi
tgtdrive=$(echo $tgtdrive | sed -e 's/!/\//')
# source
-if test -e /dev/disk/by-uuid/will_be_substituted_with_actual_uuid; then
+if test -e /dev/disk/by-label/"OpenStack_Fuel"; then
+ echo "harddrive --partition=LABEL="OpenStack_Fuel" --dir=/" > /tmp/source.ks
+elif test -e /dev/disk/by-uuid/will_be_substituted_with_actual_uuid; then
echo "harddrive --partition=UUID=will_be_substituted_with_actual_uuid --dir=/" > /tmp/source.ks
else
echo "cdrom" > /tmp/source.ks
@@ -204,16 +208,18 @@ else
fi
echo > /tmp/partition.ks
echo "partition /boot --onpart=/dev/${bootdev}3" >> /tmp/partition.ks
-echo "partition pv.001 --ondisk=${tgtdrive} --size=30000 --grow" >> /tmp/partition.ks
+echo "partition /boot/efi --onpart=/dev/${bootdev}2" >> /tmp/partition.ks
+echo "partition pv.001 --ondisk=${tgtdrive} --size=41000 --grow" >> /tmp/partition.ks
echo "volgroup os pv.001" >> /tmp/partition.ks
echo "logvol swap --vgname=os --recommended --name=swap" >> /tmp/partition.ks
echo "logvol / --vgname=os --size=10000 --name=root --fstype=ext4" >> /tmp/partition.ks
-echo "logvol /var --vgname=os --size=10000 --percent 60 --grow --name=var --fstype=ext4" >> /tmp/partition.ks
-echo "logvol /var/log --vgname=os --size=4096 --percent 40 --grow --name=varlog --fstype=ext4" >> /tmp/partition.ks
+echo "logvol /var --vgname=os --size=10000 --percent 30 --grow --name=var --fstype=ext4" >> /tmp/partition.ks
+echo "logvol /var/lib/docker --vgname=os --size=17000 --percent 20 --grow --name=varlibdocker --fstype=ext4" >> /tmp/partition.ks
+echo "logvol /var/log --vgname=os --size=4096 --percent 50 --grow --name=varlog --fstype=ext4" >> /tmp/partition.ks
# bootloader
-echo "bootloader --location=mbr --driveorder=${tgtdrive} --append=' biosdevname=0 crashkernel=none'" > /tmp/bootloader.ks
+echo "bootloader --location=partition --driveorder=${tgtdrive} --append=' biosdevname=0 crashkernel=none'" > /tmp/bootloader.ks
# Anaconda can not install grub 0.97 on disks which are >4T.
# The reason is that grub does not support such large geometries
@@ -233,6 +239,9 @@ echo "cat /tmp/grub.script | chroot /mnt/sysimage /sbin/grub --no-floppy --batch
%packages --nobase --excludedocs
@Core
+fuel
+fuel-library
+fuel-dockerctl
authconfig
bind-utils
cronie
@@ -241,7 +250,12 @@ curl
daemonize
dhcp
docker-io
+fuel-bootstrap-image
+fuel-createmirror
+fuel-target-centos-images
+fuel-package-updates
fuelmenu
+fuel-docker-images
gdisk
lrzip
lsof
@@ -249,8 +263,10 @@ man
mlocate
nmap-ncat
ntp
+ntpdate
openssh-clients
policycoreutils
+python-daemon
rsync
ruby21-puppet
ruby21-rubygem-netaddr
@@ -266,6 +282,7 @@ vim-enhanced
virt-what
wget
yum
+yum-plugin-priorities
%include /tmp/post_partition.ks
@@ -285,6 +302,8 @@ echo -e "* soft core unlimited\n* hard core unlimited" >> /etc/security/limits.c
%post --nochroot --log=/mnt/sysimage/root/anaconda-post-before-chroot.log
#!/bin/sh
+set -x
+
SOURCE="/mnt/sysimage/tmp/source"
for I in `cat /proc/cmdline`; do case "$I" in *=*) eval $I;; esac ; done
@@ -342,6 +361,7 @@ function save_cfg {
else
echo GATEWAY=$gw >> /etc/sysconfig/network
fi
+ [ -n "$build_images" -a "$build_images" != "0" ] && echo -e "$build_images" > /root/.build_images
}
# Default FQDN
@@ -356,6 +376,7 @@ gw=$gw
device="eth0"
hwaddr=`ifconfig $device | grep -i hwaddr | sed -e 's#^.*hwaddr[[:space:]]*##I'`
dhcp_interface=$dhcp_interface
+build_images=$build_images
save_cfg
# Mounting installation source
@@ -366,7 +387,9 @@ echo
mkdir -p ${SOURCE}
mkdir -p ${FS}
-if test -e /dev/disk/by-uuid/will_be_substituted_with_actual_uuid; then
+if test -e /dev/disk/by-label/"OpenStack_Fuel"; then
+ mount /dev/disk/by-label/"OpenStack_Fuel" ${SOURCE}
+elif test -e /dev/disk/by-uuid/will_be_substituted_with_actual_uuid; then
mount /dev/disk/by-uuid/will_be_substituted_with_actual_uuid ${FS}
mount -o loop ${FS}/nailgun.iso ${SOURCE}
fi
@@ -390,61 +413,47 @@ cp ${SOURCE}/.treeinfo ${repodir}/centos/x86_64
# Copying Ubuntu files
mkdir -p ${repodir}/ubuntu/x86_64/images
-cp -r ${SOURCE}/ubuntu/conf ${repodir}/ubuntu/x86_64
-cp -r ${SOURCE}/ubuntu/db ${repodir}/ubuntu/x86_64
cp -r ${SOURCE}/ubuntu/dists ${repodir}/ubuntu/x86_64
-cp -r ${SOURCE}/ubuntu/indices ${repodir}/ubuntu/x86_64
cp -r ${SOURCE}/ubuntu/pool ${repodir}/ubuntu/x86_64
-cp -r ${SOURCE}/ubuntu/installer-amd64/current/images/netboot/ubuntu-installer/amd64/linux ${repodir}/ubuntu/x86_64/images
-cp -r ${SOURCE}/ubuntu/installer-amd64/current/images/netboot/ubuntu-installer/amd64/initrd.gz ${repodir}/ubuntu/x86_64/images
+
+# We do not ship debian-installer kernel and initrd on ISO.
+# But we still need to be able to create ubuntu cobbler distro
+# which requires kernel and initrd to be available. So, we
+# just touch these files to work around cobbler's limitation.
+touch ${repodir}/ubuntu/x86_64/images/linux
+touch ${repodir}/ubuntu/x86_64/images/initrd.gz
# make links for backward compatibility
ln -s ${repodir}/centos ${wwwdir}/centos
ln -s ${repodir}/ubuntu ${wwwdir}/ubuntu
-# Copying bootstrap image
-mkdir -p ${wwwdir}/bootstrap
-cp -r ${SOURCE}/bootstrap/initramfs.img ${wwwdir}/bootstrap
-cp -r ${SOURCE}/bootstrap/linux ${wwwdir}/bootstrap
-
-# Copying target images
-cp -r ${SOURCE}/targetimages ${wwwdir}
-
-mkdir -p /root/.ssh
-chmod 700 /root/.ssh
-cp ${SOURCE}/bootstrap/bootstrap.rsa /root/.ssh
-chmod 600 /root/.ssh/bootstrap.rsa
-
# --------------------------
# UNPACKING PUPPET MANIFESTS
# --------------------------
# create folders
-mkdir -p /etc/puppet/${OPENSTACK_VERSION}/manifests/
-mkdir -p /etc/puppet/${OPENSTACK_VERSION}/modules/
-rm -rf /etc/puppet/modules/
+#mkdir -p /etc/puppet/${OPENSTACK_VERSION}/manifests/
+#mkdir -p /etc/puppet/${OPENSTACK_VERSION}/modules/
+#rm -rf /etc/puppet/modules/
# TODO(ikalnitsky): investigate why we need this
-cp ${SOURCE}/puppet-slave.tgz ${wwwdir}/
+#cp ${SOURCE}/puppet-slave.tgz ${wwwdir}/
# place modules and manifests
-tar zxf ${SOURCE}/puppet-slave.tgz -C /etc/puppet/${OPENSTACK_VERSION}/modules
-cp /etc/puppet/${OPENSTACK_VERSION}/modules/osnailyfacter/examples/site.pp /etc/puppet/${OPENSTACK_VERSION}/manifests/site.pp
+#tar zxf ${SOURCE}/puppet-slave.tgz -C /etc/puppet/${OPENSTACK_VERSION}/modules
+#cp /etc/puppet/${OPENSTACK_VERSION}/modules/osnailyfacter/examples/site.pp /etc/puppet/${OPENSTACK_VERSION}/manifests/site.pp
cp ${SOURCE}/centos-versions.yaml ${SOURCE}/ubuntu-versions.yaml /etc/puppet/${OPENSTACK_VERSION}/manifests/
# make links for backward compatibility
-pushd /etc/puppet
-ln -s ${OPENSTACK_VERSION}/manifests/ /etc/puppet/manifests
-ln -s ${OPENSTACK_VERSION}/modules/ /etc/puppet/modules
-popd
+#pushd /etc/puppet
+#ln -s ${OPENSTACK_VERSION}/manifests/ /etc/puppet/manifests
+#ln -s ${OPENSTACK_VERSION}/modules/ /etc/puppet/modules
+#popd
cp ${SOURCE}/send2syslog.py /bin/send2syslog.py
mkdir -p /var/lib/hiera
touch /var/lib/hiera/common.yaml /etc/puppet/hiera.yaml
-# Deploy docker images and ctl tools if we built ISO with docker containers support
-[ -d "${SOURCE}/docker" ] && cp -r ${SOURCE}/docker ${wwwdir}/docker
-
# Prepare local repository specification
rm /etc/yum.repos.d/CentOS*.repo
cat > /etc/yum.repos.d/nailgun.repo << EOF
@@ -461,6 +470,24 @@ sed -i 's/^enabled.*/enabled=0/' /etc/yum/pluginconf.d/subscription-manager.conf
# Disable GSSAPI in ssh server config
sed -i -e "/^\s*GSSAPICleanupCredentials yes/d" -e "/^\s*GSSAPIAuthentication yes/d" /etc/ssh/sshd_config
+# Enable MOTD banner in sshd
+sed -i -e "s/^\s*PrintMotd no/PrintMotd yes/g" /etc/ssh/sshd_config
+
+# Add note regarding local repos creation to MOTD
+cat >> /etc/motd << EOF
+
+All environments use online repositories by default.
+Use the following commands to create local repositories
+on master node and change default repository settings:
+
+* CentOS: fuel-package-updates (see --help for options)
+* Ubuntu: fuel-createmirror (see --help for options)
+
+Please refer to the following guide for more information:
+https://docs.mirantis.com/openstack/fuel/fuel-6.1/reference-architecture.html#fuel-rep-mirror
+
+EOF
+
# Copying bootstrap_admin_node.sh, chmod it and
# adding /etc/init/bootstrap_admin_node.conf
cp ${SOURCE}/bootstrap_admin_node.sh /usr/local/sbin/bootstrap_admin_node.sh
@@ -540,10 +567,7 @@ rm -rf ${SOURCE}
umount -f ${FS} || true
rm -rf ${FS}
-# Enabling/configuring NTPD and ntpdate services
-echo "server 127.127.1.0" >> /etc/ntp.conf
-echo "fudge 127.127.1.0 stratum 10" >> /etc/ntp.conf
-echo "tos orphan 7" >> /etc/ntp.conf
+echo "tos orphan 7" >> /etc/ntp.conf
# Do not show error message on ntpdate failure. Customers should not be confused
# if admin node does not have access to the internet time servers.
diff --git a/fuel/build/f_isoroot/f_odlpluginbuild/Makefile b/fuel/build/f_isoroot/f_odlpluginbuild/Makefile
new file mode 100644
index 000000000..ce9cd73e1
--- /dev/null
+++ b/fuel/build/f_isoroot/f_odlpluginbuild/Makefile
@@ -0,0 +1,38 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# mskalski@mirantis.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+TOP := $(shell pwd)
+DOCKNAME = fuelrepo
+DOCKVERSION = 1.0
+ODL_BRANCH="juno/lithium-sr1"
+ODL_REPO="https://github.com/stackforge/fuel-plugin-opendaylight.git"
+
+.PHONY: all
+all: .odlbuild
+
+.PHONY: clean
+clean:
+ # Deliberately not cleaning nailgun directory to speed up multiple builds
+ @rm -f ../release/opnfv/opendaylight*.rpm
+
+.PHONY: release
+release:.odlbuild
+ @rm -f ../release/opnfv/opendaylight*.rpm
+ @mkdir -p ../release/opnfv
+ @cp opendaylight*.rpm ../release/opnfv/
+
+.odlbuild:
+ rm -rf fuel-plugin-opendaylight
+ sudo apt-get -y install build-essential ruby-dev rubygems-integration python-pip git rpm createrepo dpkg-dev
+ sudo gem install fpm
+ sudo pip install fuel-plugin-builder
+ git clone -b ${ODL_BRANCH} ${ODL_REPO}
+ INCLUDE_DEPENDENCIES=true fpb --debug --build fuel-plugin-opendaylight/
+ mv fuel-plugin-opendaylight/opendaylight*.rpm .
+ rm -rf fuel-plugin-opendaylight
diff --git a/fuel/build/f_isoroot/f_predeployment/Makefile b/fuel/build/f_isoroot/f_predeployment/Makefile
deleted file mode 100644
index a5252df96..000000000
--- a/fuel/build/f_isoroot/f_predeployment/Makefile
+++ /dev/null
@@ -1,28 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-TOP := $(shell pwd)
-
-.PHONY: all
-all:
- @mkdir -p release/opnfv
- @cp pre-deploy.sh release/opnfv
- @cp sysinfo.sh release/opnfv
- @cp transform_yaml.py release/opnfv
- @chmod 755 release/opnfv/*
-
-.PHONY: clean
-clean:
- @rm -rf release
-
-
-.PHONY: release
-release:clean all
- @cp -Rvp release/* ../release
diff --git a/fuel/build/f_isoroot/f_predeployment/README b/fuel/build/f_isoroot/f_predeployment/README
deleted file mode 100644
index 3eef9f216..000000000
--- a/fuel/build/f_isoroot/f_predeployment/README
+++ /dev/null
@@ -1,18 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-This is the start of the interactive frontend that will add OPNFV configuration into
-the astute.yaml of the nodes. Currently just a test setup - prepare an installation
-up to the point of "deploy changes", but run "./pre-deploy.sh <envid> fragment.yaml"
-before actually hitting deploy, which will make sure to add the example fragment to
-the nodes.
-
-Note that the only part of the fragment.yaml that actually is acted on is the hosts
-part at this time.
diff --git a/fuel/build/f_isoroot/f_predeployment/pre-deploy.sh b/fuel/build/f_isoroot/f_predeployment/pre-deploy.sh
deleted file mode 100755
index c5c6c42c0..000000000
--- a/fuel/build/f_isoroot/f_predeployment/pre-deploy.sh
+++ /dev/null
@@ -1,401 +0,0 @@
-#!/bin/bash -e
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-
-error_exit () {
- echo "$@" >&2
- exit 1
-}
-
-get_env() {
- local env_id=${1:-""}
-
- if [ -z $env_id ]; then
- local n_envs=$(fuel env --list | grep -v -E "^id|^--|^ *$" | wc -l)
- if [ $n_envs -ne 1 ]; then
- echo "Usage: $0 [<env-id>]" >&2
- error_exit "If only a single environment is present it can be left" \
- "out. Otherwise the environment must be selected"
- fi
- env_id=$(fuel env --list | grep -v -E "^id|^--" | awk '{print $1}')
- else
- if ! fuel --env $env_id environment 2>/dev/null grep -v -E "^id|^--" | \
- grep -q ^$env_id; then
- error_exit "No such environment ID: $env_id"
- fi
- fi
- echo $env_id
-}
-
-get_node_uid () {
- cat $1 | grep "^uid: " | sed "s/^uid: '//" | sed "s/'$//"
-}
-
-get_node_role () {
- cat $1 | grep "^role: " | sed "s/^role: //"
-}
-
-get_next_cic () {
- file=$1
-
- last=`cat $file | sed 's/.*://' | grep "cic-" | sed 's/cic\-.*sl//' | sort -n | tail -1`
- if [ -z "$last" ]; then
- next=1
- else
- next=$[$last + 2]
- fi
- echo $next
-}
-
-get_next_compute () {
- file=$1
-
- last=`cat $file | sed 's/.*://' | grep "cmp-" | sed 's/cmp\-.*sl//' | sort -n | tail -1`
- if [ -z "$last" ]; then
- next=7
- else
- next=$[$last + 2]
- fi
- echo $next
-}
-
-modify_hostnames () {
- env=$1
- file=$2
- for line in `cat $file`
- do
- old=`echo $line | sed 's/:.*//'`
- new=`echo $line | sed 's/.*://'`
- echo "Applying: $old -> $new"
-
- for dfile in deployment_$env/*.yaml
- do
- sed -i "s/$old/$new/g" $dfile
- done
-
- for pfile in provisioning_$env/*.yaml
- do
- sed -i "s/$old/$new/g" $pfile
- done
- done
-}
-
-setup_hostnames () {
- ENV=$1
- cd ${CONFIGDIR}
- touch hostnames.$ENV
-
- for dfile in deployment_$ENV/*.yaml
- do
- uid=`get_node_uid $dfile`
- hostname=`grep "^node-$uid:" hostnames.$ENV | sed 's/.*://'`
- if [ -z $hostname ]; then
-
- pfile=provisioning_$ENV/node-$uid.yaml
- role=`get_node_role $dfile`
-
- case $role in
- primary-controller)
- hostname="cic-pod0-sh0-sl`get_next_cic hostnames.$ENV`"
- ;;
- controller)
- hostname="cic-pod0-sh0-sl`get_next_cic hostnames.$ENV`"
- ;;
- compute)
- hostname="cmp-pod0-sh0-sl`get_next_compute hostnames.$ENV`"
- ;;
- *)
- echo "Unknown node type for UID $uid"
- exit 1
- ;;
- esac
-
- echo "node-$uid:$hostname" >> hostnames.$ENV
- else
- echo "Already got hostname $hostname for node-$uid"
-
- fi
- done
-
- rm -f hostnames.$ENV.old
- mv hostnames.$ENV hostnames.$ENV.old
- sort hostnames.$ENV.old | uniq > hostnames.$ENV
- modify_hostnames $ENV hostnames.$ENV
-}
-
-
-
-get_provisioning_info () {
- ENV=$1
- mkdir -p ${CONFIGDIR}
- cd ${CONFIGDIR}
- rm -Rf provisioning_$ENV
- echo "Getting provisioning info..."
- fuel --env $ENV provisioning --default
- if [ $? -ne 0 ]; then
- echo "Error: Could not get provisioning info for env $ENV">&2
- exit 1
- fi
-}
-
-get_deployment_info () {
- ENV=$1
- mkdir -p ${CONFIGDIR}
- cd ${CONFIGDIR}
- rm -Rf deployment_$ENV
- echo "Getting deployment info..."
- fuel --env $ENV deployment --default
- if [ $? -ne 0 ]; then
- echo "Error: Could not get deployment info for env $ENV">&2
- exit 1
- fi
-}
-
-transform_yaml () {
- ENV=$1
- cd ${CONFIGDIR}
- for dfile in deployment_$ENV/*.yaml
- do
- /opt/opnfv/transform_yaml.py $dfile
- done
-}
-
-commit_changes () {
- ENV=$1
- cd ${CONFIGDIR}
-
- fuel --env $ENV deployment --upload
- fuel --env $ENV provisioning --upload
-}
-
-add_yaml_fragment () {
- ENV=$1
- FRAGMENT=${CONFIGDIR}/fragment.yaml.$ENV
-
- cd ${CONFIGDIR}
- for dfile in deployment_$ENV/*.yaml
- do
- cnt=`grep "^opnfv:" $dfile | wc -l `
- if [ $cnt -eq 0 ]; then
- echo "Adding fragment to $dfile"
- cat $FRAGMENT >> $dfile
- else
- echo "Already have fragment in $dfile"
- fi
- done
-}
-
-
-ip_valid() {
- IP_ADDRESS="$1"
- # Check if the format looks right_
- echo "$IP_ADDRESS" | egrep -qE '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' || return 1
- #check that each octect is less than or equal to 255:
- echo $IP_ADDRESS | awk -F'.' '$1 <=255 && $2 <= 255 && $3 <=255 && $4 <= 255 {print "Y" } ' | grep -q Y || return 1
- return 0
-}
-
-
-generate_ntp_entry() {
- FILE=$1
- read -p "NTP server:" NTP_SERVER
- if [ -z "$NTP_SERVER" ]; then
- return 1
- elif confirm_yes "Are you sure you want to add this entry (y/n): "; then
- echo "Confirmed"
- echo " server $NTP_SERVER" >> $FILE
- fi
-}
-
-generate_hostfile_entry() {
- FILE=$1
- read -p "Name:" HOST_NAME
- if [ -z "$HOST_NAME" ]; then
- return 1
- else
- read -p "FQDN:" HOST_FQDN
- read -p "IP: " HOST_IP
- while ! ip_valid "$HOST_IP"
- do
- echo "This is not a valid IP! Try again."
- read -p "IP: " HOST_IP
- done
- fi
- if confirm_yes "Are you sure you want to add this entry (y/n): "; then
- echo "Confirmed"
- echo " - name: $HOST_NAME" >> $FILE
- echo " address: $HOST_IP" >> $FILE
- echo " fqdn: $HOST_FQDN" >> $FILE
- else
- echo "Not confirmed"
- fi
- return 0
-}
-
-generate_dns_entry() {
- FILE=$1
- PROMPT=$2
- read -p "${PROMPT}:" DNS_IP
- if [ -z "$DNS_IP" ]; then
- return 1
- else
- while ! ip_valid "$DNS_IP"
- do
- echo "This is not a valid IP! Try again."
- read -p "${PROMPT}: " DNS_IP
- done
- fi
- if confirm_yes "Are you sure you want to add this entry (y/n): "; then
- echo "Confirmed"
- echo " - $DNS_IP" >> $FILE
- else
- echo "Not confirmed"
- fi
- return 0
-}
-
-confirm_yes() {
- prompt=$1
- while true
- do
- read -p "$prompt" YESNO
- case $YESNO in
- [Yy])
- return 0
- ;;
- [Nn])
- return 1
- ;;
- esac
- done
-}
-
-generate_yaml_fragment() {
- ENV=$1
- FRAGMENT=${CONFIGDIR}/fragment.yaml.$ENV
-
- if [ -f $FRAGMENT ]; then
- echo "Manual configuration already performed, reusing previous data from $FRAGMENT."
- echo "Press return to continue or ^C to stop."
- read ans
- return
- fi
-
- echo "opnfv:" > ${FRAGMENT}
-
- clear
- echo -e "\n\nPre-deployment configuration\n\n"
-
- echo -e "\n\nIPs for the DNS servers to go into /etc/resolv.conf. You will be"
- echo -e "prompted for one IP at the time. Press return on an empty line"
- echo -e "to complete your input. If no DNS server is specified, the IP of"
- echo -e "the Fuel master will be used instead.\n"
-
- DNSCICYAML=${CONFIGDIR}/cicdns.yaml.$ENV
- rm -f $DNSCICYAML
-
- echo -e "\n\n"
-
- while generate_dns_entry $DNSCICYAML "IP for CIC name servers"
- do
- :
- done
-
- if [ -f $DNSCICYAML ]; then
- echo " dns:" >> $FRAGMENT
- echo " controller:" >> $FRAGMENT
- cat $DNSCICYAML >> $FRAGMENT
- fi
-
-
- DNSCMPYAML=${CONFIGDIR}/cmpdns.yaml.$ENV
- rm -f $DNSCMPYAML
-
- echo -e "\n\n"
-
- while generate_dns_entry $DNSCMPYAML "IP for compute node name servers"
- do
- :
- done
-
-
- if [ -f $DNSCMPYAML ]; then
- if [ ! -f $DNSCICYAML ]; then
- echo " dns:" >> $FRAGMENT
- fi
- echo " compute:" >> $FRAGMENT
- cat $DNSCMPYAML >> $FRAGMENT
- fi
-
- echo -e "\n\nHosts file additions for controllers and compute nodes. You will be"
- echo -e "prompted for name, FQDN and IP for each entry. Press return when prompted"
- echo -e "for a name when you have completed your input.\n"
-
-
- HOSTYAML=${CONFIGDIR}/hosts.yaml.$ENV
- rm -f $HOSTYAML
- while generate_hostfile_entry $HOSTYAML
- do
- :
- done
-
- if [ -f $HOSTYAML ]; then
- echo " hosts:" >> $FRAGMENT
- cat $HOSTYAML >> $FRAGMENT
- fi
-
- echo -e "\n\nNTP upstream configuration for controllers.You will be"
- echo -e "prompted for a NTP server each entry. Press return when prompted"
- echo -e "for a NTP serverwhen you have completed your input.\n"
-
-
- NTPYAML=${CONFIGDIR}/ntp.yaml.$ENV
- rm -f $NTPYAML
- while generate_ntp_entry $NTPYAML
- do
- :
- done
-
- if [ -f $NTPYAML ]; then
- echo " ntp:" >> $FRAGMENT
- echo " controller: |" >> $FRAGMENT
- cat $NTPYAML >> $FRAGMENT
-
- echo " compute: |" >> $FRAGMENT
- for ctl in `find $CONFIGDIR/deployment_$ENV -name '*controller*.yaml'`
- do
- fqdn=`grep "^fqdn:" $ctl | sed 's/fqdn: *//'`
- echo " server $fqdn" >> $FRAGMENT
- done
- fi
-
- # If nothing added make sure we get an empty opnfv hash
- # instead of a NULL hash.
- if [ $(wc -l $FRAGMENT | awk '{print $1}') -le 1 ]; then
- echo "opnfv: {}" >$FRAGMENT
- fi
-}
-
-ENV=$(get_env "$@")
-
-CONFIGDIR="/var/lib/opnfv"
-mkdir -p $CONFIGDIR
-
-get_deployment_info $ENV
-# Uncomment the below to enable the control_bond example
-#transform_yaml $ENV
-get_provisioning_info $ENV
-generate_yaml_fragment $ENV
-# The feature to change hostnames from node-<n> to cmp- or cic- is disabled.
-# To turn it on, uncomment the following line.
-#setup_hostnames $ENV
-add_yaml_fragment $ENV
-commit_changes $ENV
diff --git a/fuel/build/f_isoroot/f_predeployment/sysinfo.sh b/fuel/build/f_isoroot/f_predeployment/sysinfo.sh
deleted file mode 100755
index e99cac039..000000000
--- a/fuel/build/f_isoroot/f_predeployment/sysinfo.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-dockerctl shell cobbler cobbler system list | grep -v default | xargs -n 1 host | sort | sed 's/\..* /\t/'
diff --git a/fuel/build/f_isoroot/f_predeployment/transform_yaml.py b/fuel/build/f_isoroot/f_predeployment/transform_yaml.py
deleted file mode 100755
index 14eec4cc6..000000000
--- a/fuel/build/f_isoroot/f_predeployment/transform_yaml.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/python
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Remove control and management network transformations from file.
-# Only to be used together with f_control_bond_example (enable in
-# pre-deploy.sh)
-
-import yaml
-import re
-import sys
-import os
-
-if len(sys.argv) != 2:
- sys.stderr.write("Usage: "+sys.argv[0]+" <filename>\n")
- sys.exit(1)
-
-filename = sys.argv[1]
-if not os.path.exists(filename):
- sys.stderr.write("ERROR: The file "+filename+" could not be opened\n")
- sys.exit(1)
-
-ignore_values = [ "eth0", "eth1", "br-mgmt", "br-fw-admin" ]
-
-infile = open(filename, 'r')
-doc = yaml.load(infile)
-infile.close()
-
-out={}
-
-for scheme in doc:
- if scheme == "network_scheme":
- mytransformation = {}
- for operation in doc[scheme]:
- if operation == "transformations":
- # We need the base bridges for l23network to be happy,
- # remove everything else.
- mytrans = [ { "action": "add-br", "name": "br-mgmt" },
- { "action": "add-br", "name": "br-fw-admin" } ]
- for trans in doc[scheme][operation]:
- delete = 0
- for ignore in ignore_values:
- matchObj = re.search(ignore,str(trans))
- if matchObj:
- delete = 1
- if delete == 0:
- mytrans.append(trans)
- else:
- pass
- #print "Deleted", trans
-
- mytransformation[operation] = mytrans
- else:
- mytransformation[operation] = doc[scheme][operation]
- out[scheme] = mytransformation
- else:
- out[scheme] = doc[scheme]
-
-outfile = open(filename, 'w')
-outfile.write(yaml.dump(out, default_flow_style=False))
-outfile.close()
diff --git a/fuel/build/f_isoroot/f_repobuild/Makefile b/fuel/build/f_isoroot/f_repobuild/Makefile
new file mode 100644
index 000000000..6bfbd35c1
--- /dev/null
+++ b/fuel/build/f_isoroot/f_repobuild/Makefile
@@ -0,0 +1,56 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# stefan.k.berg@ericsson.com
+# jonas.bjurel@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+SHELL := /bin/bash
+TOP := $(shell pwd)
+DOCKNAME = fuelrepo
+DOCKVERSION = 1.0
+
+# try to choose close ubuntu mirror which support rsync protocol
+# https://bugs.launchpad.net/fuel/+bug/1459252
+MIRROR_URLS := $(shell curl -s http://mirrors.ubuntu.com/mirrors.txt)
+MIRROR_HOSTS := $(shell for url in ${MIRROR_URLS}; do echo $$url | cut -d'/' -f3; done)
+RSYNC_HOST := $(shell for host in ${MIRROR_HOSTS}; do rsync -4 --contimeout 5 --no-motd --list-only "$${host}::ubuntu/." &> /dev/null && echo $$host && break; done)
+
+.PHONY: all
+all: .nailgun
+
+.nailgun:
+ sudo apt-get update
+ sudo apt-get upgrade -y
+ sudo apt-get install -y rsync python python-yaml dpkg-dev openssl
+ rm -rf tmpiso tmpdir
+ mkdir tmpiso
+ fuseiso ${ISOCACHE} tmpiso
+ cp tmpiso/ubuntu/pool/main/f/fuel-createmirror/fuel-createmirror_6.1*.deb .
+ fusermount -u tmpiso
+ rm -rf tmpiso
+ sudo dpkg -i fuel-createmirror_6.1*.deb
+ sudo sed -i 's/DOCKER_MODE=true/DOCKER_MODE=false/' /etc/fuel-createmirror/common.cfg
+ sudo sed -i 's/DEBUG="no"/DEBUG="yes"/' /etc/fuel-createmirror/ubuntu.cfg
+ sudo sed -i 's/MIRROR_UBUNTU_HOST="archive.ubuntu.com"/MIRROR_UBUNTU_HOST="${RSYNC_HOST}"/' /etc/fuel-createmirror/common.cfg
+ rm -Rf nailgun
+ sudo mkdir -p /var/www
+ sudo su - -c /opt/fuel-createmirror-6.1/fuel-createmirror
+ sudo chmod -R 755 /var/www/nailgun
+ cp -Rp /var/www/nailgun .
+ touch .nailgun
+
+.PHONY: clean
+clean:
+ # Deliberately not cleaning nailgun directory to speed up multiple builds
+ @rm -rf ../release/opnfv/nailgun fuel-createmirror_6.1*.deb
+
+.PHONY: release
+release:.nailgun
+ @rm -Rf ../release/opnfv/nailgun
+ @mkdir -p ../release/opnfv
+ @cp -Rp nailgun ../release/opnfv/nailgun
+
diff --git a/fuel/build/f_l23network/Makefile b/fuel/build/f_l23network/Makefile
deleted file mode 100644
index 0949737bc..000000000
--- a/fuel/build/f_l23network/Makefile
+++ /dev/null
@@ -1,28 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-TOP := $(shell pwd)
-
-.PHONY: all
-all:
-
-.PHONY: clean
-clean:
- @rm -rf tmp
- @rm -rf release
-
-.PHONY: validate-cache
-validate-cache:
- @echo "No cache validation schema available for $(shell pwd)"
- @echo "Continuing ..."
-
-.PHONY: release
-release:
- @cp -Rvp puppet/modules/* $(PUPPET_DEST)
diff --git a/fuel/build/f_l23network/README b/fuel/build/f_l23network/README
deleted file mode 100644
index 9aa4718a2..000000000
--- a/fuel/build/f_l23network/README
+++ /dev/null
@@ -1,35 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-Addition to add entries to /etc/hosts through Astute.
-
-The astute.yaml file should contain entries as those below to have them picked up during deployment:
-
-opnfv:
- hosts:
- - name: test1
- address: 192.168.100.100
- fqdn: test1.opnfv.org
- - name: test2
- address: 192.168.100.101
- fqdn: test2.opnfv.org
- - name: test3
- address: 192.168.100.102
- fqdn: test3.opnfv.org
-
-The suggested method for adding this information is to prepare for deployment with the Fuel GUI or CLI,
-but before actually deploying:
-
-1. Download the current deployment for all hosts: fuel --env 1 deployment --default
-2. Iterate through the hosts in "deployment_1" and add hosts configuration in the above format to their
- respective yaml file.
-3. Upload the modifed deployment information: fuel --env 1 deployment --upload
-
-After deploying, the additions will be included in /etc/astute.yaml of each host.
diff --git a/fuel/build/f_l23network/puppet/modules/l23network/lib/puppet/parser/functions/extras_to_hosts.rb b/fuel/build/f_l23network/puppet/modules/l23network/lib/puppet/parser/functions/extras_to_hosts.rb
deleted file mode 100644
index 33bfad8fd..000000000
--- a/fuel/build/f_l23network/puppet/modules/l23network/lib/puppet/parser/functions/extras_to_hosts.rb
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# array_or_string_to_array.rb
-#
-
-module Puppet::Parser::Functions
- newfunction(:extras_to_hosts, :type => :rvalue, :doc => <<-EOS
- convert extras array passed from Astute into
- hash for puppet `host` create_resources call
- EOS
- ) do |args|
- hosts=Hash.new
- extras=args[0]
- extras.each do |extras|
- hosts[extras['name']]={:ip=>extras['address'],:host_aliases=>[extras['fqdn']]}
- notice("Generating extras host entry #{extras['name']} #{extras['address']} #{extras['fqdn']}")
- end
- return hosts
- end
-end
-
-# vim: set ts=2 sw=2 et :
diff --git a/fuel/build/f_l23network/puppet/modules/l23network/manifests/hosts_file.pp b/fuel/build/f_l23network/puppet/modules/l23network/manifests/hosts_file.pp
deleted file mode 100644
index 05cff8d60..000000000
--- a/fuel/build/f_l23network/puppet/modules/l23network/manifests/hosts_file.pp
+++ /dev/null
@@ -1,18 +0,0 @@
-class l23network::hosts_file (
- $nodes,
- $extras=[],
- $hosts_file = "/etc/hosts"
-) {
-
- # OPNFV addition: Add additional lines in /etc/hosts through Astute additions
-
- $host_resources = nodes_to_hosts($nodes)
- $extras_host_resources = extras_to_hosts($extras)
- Host {
- ensure => present,
- target => $hosts_file
- }
-
- create_resources(host, $host_resources)
- create_resources(host, $extras_host_resources)
-}
diff --git a/fuel/build/f_l23network/puppet/modules/l23network/manifests/hosts_file.pp.orig b/fuel/build/f_l23network/puppet/modules/l23network/manifests/hosts_file.pp.orig
deleted file mode 100644
index 2295e3f9a..000000000
--- a/fuel/build/f_l23network/puppet/modules/l23network/manifests/hosts_file.pp.orig
+++ /dev/null
@@ -1,16 +0,0 @@
-class l23network::hosts_file (
- $nodes,
- $hosts_file = "/etc/hosts"
-) {
-
- #Move original hosts file
-
- $host_resources = nodes_to_hosts($nodes)
-
- Host {
- ensure => present,
- target => $hosts_file
- }
-
- create_resources(host, $host_resources)
-}
diff --git a/fuel/build/f_l23network/testing/README b/fuel/build/f_l23network/testing/README
deleted file mode 100644
index b68eddf22..000000000
--- a/fuel/build/f_l23network/testing/README
+++ /dev/null
@@ -1,12 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-In order to test the functionality without performing a full deployment, run "puppet apply" on the fake_init.pp
-which will call only the l23network::hosts_file class.
diff --git a/fuel/build/f_l23network/testing/fake_init.pp b/fuel/build/f_l23network/testing/fake_init.pp
deleted file mode 100644
index bc6b163f9..000000000
--- a/fuel/build/f_l23network/testing/fake_init.pp
+++ /dev/null
@@ -1,13 +0,0 @@
-$fuel_settings = parseyaml($astute_settings_yaml)
-
-if $::fuel_settings['nodes'] {
- $nodes_hash = $::fuel_settings['nodes']
- $extras_hash = $::fuel_settings['opnfv']['hosts']
-
- class {'l23network::hosts_file':
- nodes => $nodes_hash,
- extras => $extras_hash
- }
-
- include l23network::hosts_file
-}
diff --git a/fuel/build/f_ntp/Makefile b/fuel/build/f_ntp/Makefile
deleted file mode 100644
index 0949737bc..000000000
--- a/fuel/build/f_ntp/Makefile
+++ /dev/null
@@ -1,28 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-TOP := $(shell pwd)
-
-.PHONY: all
-all:
-
-.PHONY: clean
-clean:
- @rm -rf tmp
- @rm -rf release
-
-.PHONY: validate-cache
-validate-cache:
- @echo "No cache validation schema available for $(shell pwd)"
- @echo "Continuing ..."
-
-.PHONY: release
-release:
- @cp -Rvp puppet/modules/* $(PUPPET_DEST)
diff --git a/fuel/build/f_ntp/README b/fuel/build/f_ntp/README
deleted file mode 100644
index 2bade72d6..000000000
--- a/fuel/build/f_ntp/README
+++ /dev/null
@@ -1,33 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-Addition to add ntp.conf separately for compute hosts and controller hosts through Astute.
-
-The astute.yaml file should contain entries as those below to have them picked up during deployment:
-
-opnfv:
- ntp:
- controller: |
- line 1
- line 2
- compute: |
- line 1
- line 2
-
-The suggested method for adding this information is to prepare for deployment with the Fuel GUI or CLI,
-but before actually deploying:
-
-1. Download the current deployment for all hosts: fuel --env 1 deployment --default
-2. Iterate through the hosts in "deployment_1" and add hosts configuration in the above format to their
- respective yaml file.
-3. Upload the modifed deployment information: fuel --env 1 deployment --upload
-
-After deploying, the additions will be included in /etc/astute.yaml of each host.
-
diff --git a/fuel/build/f_ntp/puppet/modules/opnfv/manifests/ntp.pp b/fuel/build/f_ntp/puppet/modules/opnfv/manifests/ntp.pp
deleted file mode 100644
index c5dce1be0..000000000
--- a/fuel/build/f_ntp/puppet/modules/opnfv/manifests/ntp.pp
+++ /dev/null
@@ -1,80 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Class: Ntp
-#
-# Add Ntp content passed through astute.yaml into ntp.conf depending on the role
-#
-# Suitable yaml content:
-# <begin>
-# opnfv:
-# ntp:
-# controller: |
-# line 1
-# line 2
-# compute: |
-# line 1
-# line 2
-# <end>
-#
-#
-#
-
-class opnfv::ntp(
- $file='/etc/ntp.conf'
-) {
-
- case $::operatingsystem {
- centos, redhat: {
- $service_name = 'ntpd'
- }
- debian, ubuntu: {
- $service_name = 'ntp'
- }
- }
-
- if $::fuel_settings['role'] {
- if ($::fuel_settings['opnfv'] and
- $::fuel_settings['opnfv']['ntp']) {
- case $::fuel_settings['role'] {
- /controller/: {
- if $::fuel_settings['opnfv']['ntp']['controller'] {
- $template = 'opnfv/ntp.conf.controller.erb'
- $file_content = $::fuel_settings['opnfv']['ntp']['controller']
- }
- }
- /compute/: {
- if $::fuel_settings['opnfv']['ntp']['compute'] {
- $template = 'opnfv/ntp.conf.compute.erb'
- $file_content = $::fuel_settings['opnfv']['ntp']['compute']
- }
- }
- }
- }
- }
-
- if $file_content {
- package { 'ntp':
- ensure => installed,
- }
-
- file { $file:
- content => template($template),
- notify => Service['ntp'],
- }
-
- service { 'ntp':
- ensure => running,
- name => $service_name,
- enable => true,
- require => [ Package['ntp'], File[$file]]
- }
- }
-}
diff --git a/fuel/build/f_ntp/puppet/modules/opnfv/templates/ntp.conf.compute.erb b/fuel/build/f_ntp/puppet/modules/opnfv/templates/ntp.conf.compute.erb
deleted file mode 100644
index 37ecfd72d..000000000
--- a/fuel/build/f_ntp/puppet/modules/opnfv/templates/ntp.conf.compute.erb
+++ /dev/null
@@ -1,21 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-tinker panic 0
-driftfile /var/lib/ntp/ntp.drift
-statistics loopstats peerstats clockstats
-filegen loopstats file loopstats type day enable
-filegen peerstats file peerstats type day enable
-filegen clockstats file clockstats type day enable
-restrict -4 default kod notrap nomodify nopeer noquery
-restrict -6 default kod notrap nomodify nopeer noquery
-restrict 127.0.0.1
-restrict ::1
-<%= @file_content %>
diff --git a/fuel/build/f_ntp/puppet/modules/opnfv/templates/ntp.conf.controller.erb b/fuel/build/f_ntp/puppet/modules/opnfv/templates/ntp.conf.controller.erb
deleted file mode 100644
index 37ecfd72d..000000000
--- a/fuel/build/f_ntp/puppet/modules/opnfv/templates/ntp.conf.controller.erb
+++ /dev/null
@@ -1,21 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-tinker panic 0
-driftfile /var/lib/ntp/ntp.drift
-statistics loopstats peerstats clockstats
-filegen loopstats file loopstats type day enable
-filegen peerstats file peerstats type day enable
-filegen clockstats file clockstats type day enable
-restrict -4 default kod notrap nomodify nopeer noquery
-restrict -6 default kod notrap nomodify nopeer noquery
-restrict 127.0.0.1
-restrict ::1
-<%= @file_content %>
diff --git a/fuel/build/f_ntp/testing/README b/fuel/build/f_ntp/testing/README
deleted file mode 100644
index 6d80b0a94..000000000
--- a/fuel/build/f_ntp/testing/README
+++ /dev/null
@@ -1,12 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-In order to test the functionality without performing a full deployment, run "puppet apply" on the
-fake_init.pp which will call only the opnfv::ntp class.
diff --git a/fuel/build/f_odl_docker/Makefile b/fuel/build/f_odl_docker/Makefile
deleted file mode 100755
index 6135e71e5..000000000
--- a/fuel/build/f_odl_docker/Makefile
+++ /dev/null
@@ -1,51 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-TOP := $(shell pwd)
-BUILDTAG := loving_daniel
-
-# Edit this to match the GENESIS / OPNFV in your environment
-export OPNFV_PUPPET := $(BUILD_BASE)/../../common/puppet-opnfv
-include ../config.mk
-
-.PHONY: all
-all:
- @mkdir -p puppet/modules/opnfv/odl_docker
- @rm -rf tmp
- @mkdir -p tmp
- @cp -Rvp ${OPNFV_PUPPET}/manifests/templates/dockerfile tmp/.
- @docker build -t ${BUILDTAG} tmp/dockerfile/.
- @docker save ${BUILDTAG} > puppet/modules/opnfv/odl_docker/odl_docker_image.tar
- @wget ${DOCKER_REPO}/${DOCKER_TAG} -O puppet/modules/opnfv/odl_docker/docker-latest
- @echo "OPFNV_PUPPET is: ${OPNFV_PUPPET}"
- @cp -Rvp ${OPNFV_PUPPET}/manifests/templates/dockerfile/container_scripts puppet/modules/opnfv
-
-.PHONY: clean
-clean:
- @rm -rf tmp
- @rm -rf release
-
-.PHONY: build-clean
-build-clean:
- @rm -rf tmp
- @rm -rf release
- @rm -rf puppet/modules/opnfv/odl_docker/odl_docker_image.tar
- @rm -rf puppet/modules/opnfv/odl_docker/docker-latest
-
-.PHONY: validate-cache
-validate-cache:
- @echo "No cache validation schema available for $(shell pwd)"
- @echo "Continuing ..."
-
-.PHONY: release
-release:
- # Fetch PP from OPNFV Common
- @cp -Rvp ${OPNFV_PUPPET}/manifests/odl_docker.pp ${PUPPET_DEST}
- @cp -Rvp puppet/modules/* $(PUPPET_DEST)
diff --git a/fuel/build/f_odl_docker/dockerfile/Dockerfile b/fuel/build/f_odl_docker/dockerfile/Dockerfile
deleted file mode 100755
index e3c7ee5fe..000000000
--- a/fuel/build/f_odl_docker/dockerfile/Dockerfile
+++ /dev/null
@@ -1,72 +0,0 @@
-####################################################################
-#
-# Dockerfile to build a ODL (Karaf) Docker Container
-#
-# Copyright daniel.smith@ericsson.com
-# License: Apache GPL
-#
-####################################################################
-
-
-#Set the base image - note: the current release of Karaf is using Jdk7 and alot of 12.04, so we will use it rather than 14.04 and backport a ton of stuff
-FROM ubuntu:12.04
-
-# Maintainer Info
-MAINTAINER Daniel Smith
-
-#Run apt-get update one start just to check for updates when building
-RUN echo "Updating APT"
-RUN apt-get update
-RUN echo "Adding wget"
-RUN apt-get install -y wget
-RUN apt-get install -y net-tools
-RUN apt-get install -y openjdk-7-jre
-RUN apt-get install -y openjdk-7-jdk
-RUN apt-get install -y openssh-server
-RUN apt-get install -y vim
-RUN apt-get install -y expect
-RUN apt-get install -y daemontools
-RUN mkdir -p /opt/odl_source
-RUN bash -c 'echo "export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64" >> ~/.bashrc'
-
-
-#Now lets got and fetch the ODL distribution
-RUN echo "Fetching ODL"
-RUN wget https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.3-Helium-SR3/distribution-karaf-0.2.3-Helium-SR3.tar.gz -O /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz
-
-RUN echo "Untarring ODL inplace"
-RUN mkdir -p /opt/odl
-RUN tar zxvf /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz -C /opt/odl
-
-RUN echo "Installing DLUX and other features into ODL"
-COPY tmp/dockerfile/container_scripts/start_odl_docker.sh /etc/init.d/start_odl_docker.sh
-COPY tmp/dockerfile/container_scripts/speak.sh /etc/init.d/speak.sh
-RUN chmod 777 /etc/init.d/start_odl_docker.sh
-RUN chmod 777 /etc/init.d/speak.sh
-
-
-# Expose the ports
-# PORTS FOR BASE SYSTEM AND DLUX
-EXPOSE 8101
-EXPOSE 6633
-EXPOSE 1099
-EXPOSE 43506
-EXPOSE 8181
-EXPOSE 8185
-EXPOSE 9000
-EXPOSE 39378
-EXPOSE 33714
-EXPOSE 44444
-EXPOSE 6653
-
-# PORTS FOR OVSDB AND ODL CONTROL
-EXPOSE 12001
-EXPOSE 6640
-EXPOSE 8080
-EXPOSE 7800
-EXPOSE 55130
-EXPOSE 52150
-EXPOSE 36826
-
-# set the ENTRYPOINT - An entry point allows us to run this container as an exectuable
-CMD ["/etc/init.d/start_odl_docker.sh"]
diff --git a/fuel/build/f_odl_docker/dockerfile/container_scripts/check_feature.sh b/fuel/build/f_odl_docker/dockerfile/container_scripts/check_feature.sh
deleted file mode 100755
index 3e5d0b2bb..000000000
--- a/fuel/build/f_odl_docker/dockerfile/container_scripts/check_feature.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/usr/bin/expect
-spawn /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/client
-expect "root>"
-send "feature:list | grep -i odl-restconf\r"
-send "\r\r\r"
-expect "root>"
-send "logout\r"
-
diff --git a/fuel/build/f_odl_docker/dockerfile/container_scripts/speak.sh b/fuel/build/f_odl_docker/dockerfile/container_scripts/speak.sh
deleted file mode 100755
index 3ba07a844..000000000
--- a/fuel/build/f_odl_docker/dockerfile/container_scripts/speak.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/expect
-# Ericsson Research Canada
-#
-# Author: Daniel Smith <daniel.smith@ericsson.com>
-#
-# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB
-#
-# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY
-# DEPRECATED AFTER ARNO
-
-spawn /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/client
-expect "root>"
-send "feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core"
-send "\r\r\r"
-expect "root>"
-send "logout\r"
-
diff --git a/fuel/build/f_odl_docker/dockerfile/container_scripts/start_odl_docker.sh b/fuel/build/f_odl_docker/dockerfile/container_scripts/start_odl_docker.sh
deleted file mode 100755
index 1c72dda52..000000000
--- a/fuel/build/f_odl_docker/dockerfile/container_scripts/start_odl_docker.sh
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/bash
-# Ericsson Research Canada
-#
-# Author: Daniel Smith <daniel.smith@ericsson.com>
-#
-# Start up script for calling karaf / ODL inside a docker container.
-#
-# This script will also call a couple expect scripts to load the feature set that we want
-
-
-#ENV
-export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
-
-#MAIN
-echo "Starting up the da Sheilds..."
-/opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/karaf server &
-echo "Sleeping 5 bad hack"
-sleep 10
-echo "should see stuff listening now"
-netstat -na
-echo " should see proess running for karaf"
-ps -efa
-echo " Starting the packages we want"
-/etc/init.d/speak.sh
-echo "Printout the status - if its right, you should see 8181 appear now"
-netstat -na
-ps -efa
-
-
-
-## This is a loop that keeps our container going currently, prinout the "status of karaf" to the docker logs every minute
-## Cheap - but effective
-while true;
-do
- echo "Checking status of ODL:"
- /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/status
- sleep 60
-done
diff --git a/fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp b/fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp
deleted file mode 100644
index c286127a4..000000000
--- a/fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp
+++ /dev/null
@@ -1,77 +0,0 @@
-class opnfv::odl_docker
-{
- case $::fuel_settings['role'] {
- /controller/: {
-
- file { '/opt':
- ensure => 'directory',
- }
-
- file { '/opt/opnfv':
- ensure => 'directory',
- owner => 'root',
- group => 'root',
- mode => 777,
- }
-
- file { '/opt/opnfv/odl':
- ensure => 'directory',
- }
-
- file { '/opt/opnfv/odl/odl_docker_image.tar':
- ensure => present,
- source => '/etc/puppet/modules/opnfv/odl_docker/odl_docker_image.tar',
- mode => 750,
- }
-
- file { '/opt/opnfv/odl/docker-latest':
- ensure => present,
- source => '/etc/puppet/modules/opnfv/odl_docker/docker-latest',
- mode => 750,
- }
-
- file { '/opt/opnfv/odl/start_odl_conatiner.sh':
- ensure => present,
- source => '/etc/puppet/modules/opnfv/scripts/start_odl_container.sh',
- mode => 750,
- }
- file { '/opt/opnfv/odl/stage_odl.sh':
- ensure => present,
- source => '/etc/puppet/modules/opnfv/scripts/stage_odl.sh',
- mode => 750,
- }
- file { '/opt/opnfv/odl/config_net_odl.sh':
- ensure => present,
- source => '/etc/puppet/modules/opnfv/scripts/config_net_odl.sh',
- mode => 750,
- }
- file { '/opt/opnfv/odl/change.sh':
- ensure => present,
- source => '/etc/puppet/modules/opnfv/scripts/change.sh',
- mode => 750,
- }
-
-
- # fix failed to find the cgroup root issue
- # https://github.com/docker/docker/issues/8791
- case $::operatingsystem {
- 'ubuntu': {
- package {'cgroup-lite':
- ensure => present,
- }
-
- service {'cgroup-lite':
- ensure => running,
- enable => true,
- require => Package['cgroup-lite'],
- }
- }
- 'centos': {
- package {'docker-io':
- ensure => latest,
- }
- }
- }
- }
- }
-}
diff --git a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/change.sh b/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/change.sh
deleted file mode 100644
index f7f3d6e78..000000000
--- a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/change.sh
+++ /dev/null
@@ -1,219 +0,0 @@
-#!/bin/bash
-# script to remove bridges and reset networking for ODL
-
-
-#VARS
-MODE=0
-DNS=8.8.8.8
-
-#ENV
-source ~/openrc
-
-# GET IPS for that node
-function get_ips {
- BR_MGMT=`grep address /etc/network/ifcfg_backup/ifcfg-br-mgmt | awk -F" " '{print $2}'`
- BR_STORAGE=`grep address /etc/network/ifcfg_backup/ifcfg-br-storage | awk -F" " '{print $2}'`
- BR_FW_ADMIN=`grep address /etc/network/ifcfg_backup/ifcfg-br-fw-admin | awk -F" " '{print $2}'`
- BR_EX=`grep address /etc/network/ifcfg_backup/ifcfg-br-ex | awk -F" " '{print $2}'`
- DEF_NETMASK=255.255.255.0
- DEF_GW=172.30.9.1
-}
-
-function backup_ifcfg {
- echo " backing up "
- mkdir -p /etc/network/ifcfg_backup
- mv /etc/network/interfaces.d/ifcfg-br-ex /etc/network/ifcfg_backup/.
- mv /etc/network/interfaces.d/ifcfg-br-fw-admin /etc/network/ifcfg_backup/.
- mv /etc/network/interfaces.d/ifcfg-br-mgmt /etc/network/ifcfg_backup/.
- mv /etc/network/interfaces.d/ifcfg-br-storage /etc/network/ifcfg_backup/.
- mv /etc/network/interfaces.d/ifcfg-br-prv /etc/network/ifcfg_backup/.
- mv /etc/network/interfaces.d/ifcfg-eth0 /etc/network/ifcfg_backup/.
- mv /etc/network/interfaces.d/ifcfg-eth1 /etc/network/ifcfg_backup/.
- rm -rf /etc/network/interfaces.d/ifcfg-eth1.300
- rm -rf /etc/network/interfaces.d/ifcfg-eth1.301
- rm -rf /etc/network/interfaces.d/ifcfg-eth1
- rm -rf /etc/network/interfaces.d/ifcfg-eth0
-
-}
-
-
-function create_ifcfg_br_mgmt {
- echo "migrating br_mgmt"
- echo "auto eth1.300" >> /etc/network/interfaces.d/ifcfg-eth1.300
- echo "iface eth1.300 inet static" >> /etc/network/interfaces.d/ifcfg-eth1.300
- echo " address $BR_MGMT" >> /etc/network/interfaces.d/ifcfg-eth1.300
- echo " netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1.300
-}
-
-function create_ifcfg_br_storage {
- echo "migration br_storage"
- echo "auto eth1.301" >> /etc/network/interfaces.d/ifcfg-eth1.301
- echo "iface eth1.301 inet static" >> /etc/network/interfaces.d/ifcfg-eth1.301
- echo " address $BR_STORAGE" >> /etc/network/interfaces.d/ifcfg-eth1.301
- echo " netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1.301
-}
-
-function create_ifcfg_br_fw_admin {
- echo " migratinng br_fw_admin"
- echo "auto eth1" >> /etc/network/interfaces.d/ifcfg-eth1
- echo "iface eth1 inet static" >> /etc/network/interfaces.d/ifcfg-eth1
- echo " address $BR_FW_ADMIN" >> /etc/network/interfaces.d/ifcfg-eth1
- echo " netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1
-}
-
-function create_ifcfg_eth0 {
- echo "migratinng br-ex to eth0 - temporarily"
- echo "auto eth0" >> /etc/network/interfaces.d/ifcfg-eth0
- echo "iface eth0 inet static" >> /etc/network/interfaces.d/ifcfg-eth0
- echo " address $BR_EX" >> /etc/network/interfaces.d/ifcfg-eth0
- echo " netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth0
- echo " gateway $DEF_GW" >> /etc/network/interfaces.d/ifcfg-eth0
-}
-
-function set_mode {
- if [ -d "/var/lib/glance/images" ]
- then
- echo " controller "
- MODE=0
- else
- echo " compute "
- MODE=1
- fi
-}
-
-
-function stop_ovs {
- echo "Stopping OpenVSwitch"
- service openvswitch-switch stop
-
-}
-
-function start_ovs {
- echo "Starting OVS"
- service openvswitch-switch start
- ovs-vsctl show
-}
-
-
-function clean_ovs {
- echo "cleaning OVS DB"
- stop_ovs
- rm -rf /var/log/openvswitch/*
- mkdir -p /opt/opnfv/odl/ovs_back
- cp -pr /etc/openvswitch/* /opt/opnfv/odl/ovs_back/.
- rm -rf /etc/openvswitch/conf.db
- echo "restarting OVS - you should see Nothing there"
- start_ovs
-}
-
-
-
-function reboot_me {
- reboot
-}
-
-function allow_challenge {
- sed -i -e 's/ChallengeResponseAuthentication no/ChallengeResponseAuthentication yes/g' /etc/ssh/sshd_config
- service ssh restart
-}
-
-function clean_neutron {
- subnets=( `neutron subnet-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
- networks=( `neutron net-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
- ports=( `neutron port-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
- routers=( `neutron router-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
-
- #display all elements
- echo "SUBNETS: ${subnets[@]} "
- echo "NETWORKS: ${networks[@]} "
- echo "PORTS: ${ports[@]} "
- echo "ROUTERS: ${routers[@]} "
-
-
- # get port and subnet for each router
- for i in "${routers[@]}"
- do
- routerport=( `neutron router-port-list $i | awk -F" " '{print $2}' | grep -v id | sed '/^$/d' `)
- routersnet=( `neutron router-port-list $i | awk -F" " '{print $8}' | grep -v fixed | sed '/^$/d' | sed 's/,$//' | sed -e 's/^"//' -e 's/"$//' `)
- done
-
- echo "ROUTER PORTS: ${routerport[@]} "
- echo "ROUTER SUBNET: ${routersnet[@]} "
-
- #remove router subnets
- echo "router-interface-delete"
- for i in "${routersnet[@]}"
- do
- neutron router-interface-delete ${routers[0]} $i
- done
-
- #remove subnets
- echo "subnet-delete"
- for i in "${subnets[@]}"
- do
- neutron subnet-delete $i
- done
-
- #remove nets
- echo "net-delete"
- for i in "${networks[@]}"
- do
- neutron net-delete $i
- done
-
- #remove routers
- echo "router-delete"
- for i in "${routers[@]}"
- do
- neutron router-delete $i
- done
-
- #remove ports
- echo "port-delete"
- for i in "${ports[@]}"
- do
- neutron port-delete $i
- done
-
- #remove subnets
- echo "subnet-delete second pass"
- for i in "${subnets[@]}"
- do
- neutron subnet-delete $i
- done
-
-}
-
-function set_dns {
- sed -i -e 's/nameserver 10.20.0.2/nameserver $DNS/g' /etc/resolv.conf
-}
-
-
-#OUTPUT
-
-function check {
- echo $BR_MGMT
- echo $BR_STORAGE
- echo $BR_FW_ADMIN
- echo $BR_EX
-}
-
-### MAIN
-
-
-set_mode
-backup_ifcfg
-get_ips
-create_ifcfg_br_mgmt
-create_ifcfg_br_storage
-create_ifcfg_br_fw_admin
-if [ $MODE == "0" ]
-then
- create_ifcfg_eth0
-fi
-allow_challenge
-clean_ovs
-check
-reboot_me
-
-
diff --git a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh b/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh
deleted file mode 100755
index 145da806b..000000000
--- a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh
+++ /dev/null
@@ -1,192 +0,0 @@
-#!/bin/bash
-#
-# Author: Daniel Smith (Ericsson)
-#
-# Script to update neutron configuration for OVSDB/ODL integratino
-#
-# Usage - Set / pass CONTROL_HOST to your needs
-#
-### SET THIS VALUE TO MATCH YOUR SYSTEM
-CONTROL_HOST=192.168.0.2
-BR_EX_IP=172.30.9.70
-
-# ENV
-source ~/openrc
-# VARS
-ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
-MODE=0
-
-
-# FUNCTIONS
-# Update ml2_conf.ini
-function update_ml2conf {
- echo "Backing up and modifying ml2_conf.ini"
- cp $ML2_CONF $ML2_CONF.bak
- sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
- sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF
- sed -i -e 's/bridge_mappings=physnet2:br-prv/bridge_mappings=physnet1:br-ex/g' $ML2_CONF
- echo "[ml2_odl]" >> $ML2_CONF
- echo "password = admin" >> $ML2_CONF
- echo "username = admin" >> $ML2_CONF
- echo "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF
-}
-
-function reset_neutrondb {
- echo "Reseting DB"
- mysql -e "drop database if exists neutron_ml2;"
- mysql -e "create database neutron_ml2 character set utf8;"
- mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';"
- neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
-}
-
-function restart_neutron {
- echo "Restarting Neutron Server"
- service neutron-server restart
- echo "Should see Neutron runing now"
- service neutron-server status
- echo "Shouldnt be any nets, but should work (return empty)"
- neutron net-list
-}
-
-function stop_neutron {
- echo "Stopping Neutron / OVS components"
- service neutron-plugin-openvswitch-agent stop
- if [ $MODE == "0" ]
- then
- service neutron-server stop
- fi
-}
-
-function disable_agent {
- echo "Disabling Neutron Plugin Agents from running"
- service neutron-plugin-openvswitch-agent stop
- echo 'manual' > /etc/init/neutron-plugin-openvswitch-agent.override
-}
-
-
-
-function verify_ML2_working {
- echo "checking that we can talk via ML2 properly"
- curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2
- if grep "network" /tmp/check_ml2
- then
- echo "Success - ML2 to ODL is working"
- else
- echo "im sorry Jim, but its dead"
- fi
-
-}
-
-
-function set_mode {
- if [ -d "/var/lib/glance/images" ]
- then
- echo "Controller Mode"
- MODE=0
- else
- echo "Compute Mode"
- MODE=1
- fi
-}
-
-function stop_ovs {
- echo "Stopping OpenVSwitch"
- service openvswitch-switch stop
-
-}
-
-function start_ovs {
- echo "Starting OVS"
- service openvswitch-vswitch start
- ovs-vsctl show
-}
-
-
-function control_setup {
- echo "Modifying Controller"
- stop_neutron
- stop_ovs
- disable_agent
- rm -rf /var/log/openvswitch/*
- mkdir -p /opt/opnfv/odl/ovs_back
- mv /etc/openvswitch/conf.db /opt/opnfv/odl/ovs_back/.
- mv /etc/openvswitch/.conf*lock* /opt/opnfv/odl/ovs_back/.
- rm -rf /etc/openvswitch/conf.db
- rm -rf /etc/openvswitch/.conf*
- service openvswitch-switch start
- ovs-vsctl add-br br-ex
- ovs-vsctl add-port br-ex eth0
- ovs-vsctl set interface br-ex type=external
- ifconfig br-ex 172.30.9.70/24 up
- service neutron-server restart
-
- echo "setting up networks"
- ip link add link eth1 name br-mgmt type vlan id 300
- ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24 up arp
- ip link add link eth1 name br-storage type vlan id 301
- ip link add link eth1 name br-prv type vlan id 1000
- ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24 up arp
- ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-admin | awk -F" " '{print $2}'`/24 up arp
-
- echo "Setting ODL Manager IP"
- ovs-vsctl set-manager tcp:192.168.0.2:6640
-
- echo "Verifying ODL ML2 plugin is working"
- verify_ML2_working
-
- # BAD HACK - Should be parameterized - this is to catch up
- route add default gw 172.30.9.1
-
-}
-
-function clean_ovs {
- echo "cleaning OVS DB"
- stop_ovs
- rm -rf /var/log/openvswitch/*
- mkdir -p /opt/opnfv/odl/ovs_back
- cp -pr /etc/openvswitch/* /opt/opnfv/odl/ovs_back/.
- rm -rf /etc/openvswitch/conf.db
- echo "restarting OVS - you should see Nothing there"
- start_ovs
-}
-
-function compute_setup {
- echo "Modifying Compute"
- echo "Disabling neutron openvswitch plugin"
- stop_neutron
- disable_agent
- ip link add link eth1 name br-mgmt type vlan id 300
- ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24 up arp
- ip link add link eth1 name br-storage type vlan id 301
- ip link add link eth1 name br-prv type vlan id 1000
- ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24 up arp
- ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-admin | awk -F" " '{print $2}'`/24 up arp
-
- echo "set manager, and route for ODL controller"
- ovs-vsctl set-manager tcp:192.168.0.2:6640
- route add 172.17.0.1 gw 192.168.0.2
- verify_ML2_working
-}
-
-
-# MAIN
-echo "Starting to make call"
-update_ml2conf
-echo "Check Mode"
-set_mode
-
-if [ $MODE == "0" ];
-then
- echo "Calling control setup"
- control_setup
-elif [ $MODE == "1" ];
-then
- echo "Calling compute setup"
- compute_setup
-
-else
- echo "Something is bad - call for help"
- exit
-fi
-
-
diff --git a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh b/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh
deleted file mode 100755
index fa14b47d4..000000000
--- a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/bash
-# Author: Daniel Smith (Ericsson)
-# Stages ODL Controlleer
-# Inputs: odl_docker_image.tar
-# Usage: ./stage_odl.sh
-
-# ENVS
-source ~/.bashrc
-source ~/openrc
-
-LOCALPATH=/opt/opnfv/odl
-DOCKERBIN=docker-latest
-ODLIMGNAME=odl_docker_image.tar
-DNS=8.8.8.8
-HOST_IP=`ifconfig br-ex | grep -i "inet addr" | awk -F":" '{print $2}' | awk -F" " '{print $1}'`
-
-
-
-# DEBUG ECHOS
-echo $LOCALPATH
-echo $DOCKERBIN
-echo $ODLIMGNAME
-echo $DNS
-echo $HOST_IP
-
-
-# Set DNS to someting external and default GW - ODL requires a connection to the internet
-sed -i -e 's/nameserver 10.20.0.2/nameserver 8.8.8.8/g' /etc/resolv.conf
-route delete default gw 10.20.0.2
-route add default gw 172.30.9.1
-
-# Start Docker daemon and in background
-echo "Starting Docker"
-chmod +x $LOCALPATH/$DOCKERBIN
-$LOCALPATH/$DOCKERBIN -d &
-#courtesy sleep for virtual env
-sleep 2
-
-# Import the ODL Container
-echo "Importing ODL Container"
-$LOCALPATH/$DOCKERBIN load -i $LOCALPATH/$ODLIMGNAME
-
-# Start ODL, load DLUX and OVSDB modules
-echo "Removing any old install found - file not found is ok here"
-$LOCALPATH/$DOCKERBIN rm odl_docker
-echo "Starting up ODL controller in Daemon mode - no shell possible"
-$LOCALPATH/$DOCKERBIN run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -d -t loving_daniel
-
-# Following, you should see the docker ps listed and a port opened
-echo " you should reach ODL controller at http://HOST_IP:8181/dlux/index.html"
-$LOCALPATH/$DOCKERBINNAME ps -a
-netstat -lnt
-
-
diff --git a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh b/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh
deleted file mode 100755
index 347ac7488..000000000
--- a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/bash
-# Ericsson Canada Inc.
-# Authoer: Daniel Smith
-#
-# A helper script to install and setup the ODL docker conatiner on the controller
-#
-#
-# Inputs: odl_docker_image.tar
-#
-# Usage: ./start_odl_docker.sh
-echo "DEPRECATED - USE stage_odl.sh instead - this will be removed shortly once automated deployment is working - SR1"
-
-
-# ENVS
-source ~/.bashrc
-source ~/openrc
-
-# VARS
-
-# Switch for Dev mode - uses apt-get on control to cheat and get docker installed locally rather than from puppet source
-
-DEV=1
-
-# Switch for 1:1 port mapping of EXPOSED ports in Docker to the host, if set to 0, then random ports will be used - NOTE: this doesnt work for all web services X port on Host --> Y port in Container,
-# especially for SSL/HTTPS cases. Be aware.
-
-MATCH_PORT=1
-
-LOCALPATH=/opt/opnfv/odl
-DOCKERBINNAME=docker-latest
-DOCKERIMAGENAME=odl_docker_image.tar
-DNS=8.8.8.8
-HOST_IP=`ifconfig br-fw-admin | grep -i "inet addr" | awk -F":" '{print $2}' | awk -F" " '{print $1}'`
-
-
-# Set this to "1" if you want to have your docker container startup into a shell
-
-
-ENABLE_SHELL=1
-
-
-echo " Fetching Docker "
-if [ "$DEV" -eq "1" ];
-# If testing Locally (on a control node) you can set DEV=1 to enable apt-get based install on the control node (not desired target, but good for testing).
-then
- echo "Dev Mode - Fetching from Internet";
- echo " this wont work in production builds";
- apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
- mkdir -p $LOCALPATH
- wget https://get.docker.com/builds/Linux/x86_64/docker-latest -O $LOCALPATH/$DOCKERBINNAME
- wget http://ftp.us.debian.org/debian/pool/main/d/docker.io/docker.io_1.3.3~dfsg1-2_amd64.deb
- chmod 777 $LOCALPATH/$DOCKERBINNAME
- echo "done ";
-else
- echo "Using Binaries delivered from Puppet"
- echo "Starting Docker in Daemon mode"
- chmod +x $LOCALPATH/$DOCKERBINNAME
- $LOCALPATH/$DOCKERBINNAME -d &
-
- # wait until docker will be fully initialized
- # before any further action against just started docker
- sleep 5
-fi
-
-
-# We need to perform some cleanup of the Openstack Environment
-echo "TODO -- This should be automated in the Fuel deployment at some point"
-echo "However, the timing should come after basic tests are running, since this "
-echo " part will remove the subnet router association that is deployed automativally"
-echo " via fuel. Refer to the ODL + Openstack Integration Page "
-
-# Import the ODL container into docker
-
-echo "Importing ODL container into docker"
-$LOCALPATH/$DOCKERBINNAME load -i $LOCALPATH/$DOCKERIMAGENAME
-
-echo " starting up ODL - DLUX and Mapping Ports"
-if [ "$MATCH_PORT" -eq "1" ]
-then
- echo "Starting up Docker..."
- $LOCALPATH/$DOCKERBINNAME rm odl_docker
-fi
-
-if [ "$ENABLE_SHELL" -eq "1" ];
-then
- echo "Starting Container in Interactive Mode (/bin/bash will be provided, you will need to run ./start_odl_docker.sh inside the container yourself)"
- $LOCALPATH/$DOCKERBINNAME run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -t loving_daniel /bin/bash
-else
- echo "Starting Conatiner in Daemon mode - no shell will be provided and docker attach will not provide shell)"
- $LOCALPATH/$DOCKERBINNAME run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -d -t loving_daniel
- echo "should see the process listed here in docker ps -a"
- $LOCALPATH/$DOCKERBINNAME ps -a;
- echo "Match Port enabled, you can reach the DLUX login at: "
- echo "http://$HOST_IP:8181/dlux.index.html"
-fi
diff --git a/fuel/build/f_odl_docker/scripts/config_net_odl.sh b/fuel/build/f_odl_docker/scripts/config_net_odl.sh
deleted file mode 100644
index d292acd93..000000000
--- a/fuel/build/f_odl_docker/scripts/config_net_odl.sh
+++ /dev/null
@@ -1,164 +0,0 @@
-#!/bin/bash
-#
-# Author: Daniel Smith (Ericsson)
-#
-# Script to update neutron configuration for OVSDB/ODL integratino
-#
-# Usage - Set / pass CONTROL_HOST to your needs
-#
-CONTROL_HOST=172.30.9.70
-
-# ENV
-source ~/openrc
-
-# VARS
-ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
-MODE=0
-
-
-# FUNCTIONS
-
-# Update ml2_conf.ini
-function update_ml2conf {
- echo "Backing up and modifying ml2_conf.ini"
- cp $ML2_CONF $ML2_CONF.bak
- sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
- sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF
- cat "[ml2_odl]" >> $ML2_CONF
- cat "password = admin" >> $ML2_CONF
- cat "username = admin" >> $ML2_CONF
- cat "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF
-}
-
-function reset_neutrondb {
- echo "Reseting DB"
- mysql -e "drop database if exists neutron_ml2;"
- mysql -e "create database neutron_ml2 character set utf8;"
- mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';"
- neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
-}
-
-function restart_neutron {
- echo "Restarting Neutron Server"
- service neutron-server restart
- echo "Should see Neutron runing now"
- service neutron-server status
- echo "Shouldnt be any nets, but should work (return empty)"
- neutron net-list
-}
-
-function stop_neutron {
- echo "Stopping Neutron / OVS components"
- service neutron-plugin-openvswitch-agent stop
- if [ $MODE == "0" ]
- then
- service neutron-server stop
- fi
-}
-
-
-
-function verify_ML2_working {
- echo "checking that we can talk via ML2 properly"
- curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2
- if grep "network" /tmp/check_ml2
- then
- echo "Success - ML2 to ODL is working"
- else
- echo "im sorry Jim, but its dead"
- fi
-
-}
-
-
-function set_mode {
- if ls -l /var/lib/glance/images
- then
- echo "Controller Mode"
- MODE=0
- else
- echo "Compute Mode"
- MODE=1
- fi
-}
-
-function stop_ovs {
- echo "Stopping OpenVSwitch"
- service openvswitch-switch stop
-
-}
-
-function control_setup {
- echo "Modifying Controller"
- stop_neutron
- stop_ovs
- rm -rf /var/log/openvswitch/*
- mkdir -p /opt/opnfv/odl/ovs_back
- mv /etc/openvswitch/conf.db /opt/opnfv/odl/ovs_back/.
- mv /etc/openvswitch/.conf*lock* /opt/opnfv/odl/ovs_back/.
- service openvswitch-switch start
- ovs-vsctl set-manager tcp:172.30.9.70:6640
- ovs-vsctl add-br br-eth0
- ovs-vsctl add-br br-ex
- ovs-vsctl add-port br-eth0 eth0
- ovs-vsctl add-port br-eth0 br-eth0--br-ex
- ovs-vsctl add-port br-ex br-ex--br-eth0
- ovs-vsctl set interface br-ex--br-eth0 type=patch
- ovs-vsctl set interface br-eth0--br-ex type=patch
- ovs-vsctl set interface br-ex--br-eth0 options:peer=br-eth0--br-ex
- ovs-vsctl set interface br-eth0--br-ex options:peer=br-ex--br-eth0
- ifconfig br-ex 172.30.9.70/24 up
- service neutron-server restart
-
- echo "setting up networks"
- ip link add link eth1 name br-mgmt type vlan id 300
- ip link add link eth1 name br-storage type vlan id 301
- /etc/init.d/networking restart
-
-
- echo "Reset Neutron DB"
- #reset_neutrondb
- echo "Restarting Neutron Components"
- #restart_neutron
- echo "Verifying ODL ML2 plugin is working"
- verify_ML2_working
-
-}
-
-function compute_setup {
- echo "do compute stuff here"
- echo "stopping neutron openvswitch plugin"
- stop_neutron
- ip link add link eth1 name br-mgmt type vlan id 300
- ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24
- ip link add link eth1 name br-storage type vlan id 301
- ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24
- ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-mgmt | awk -F" " '{print $2}'`/24
- echo "set manager, and route for ODL controller"
- ovs-vsctl set-manager tcp:192.168.0.2:6640
- route add 172.17.0.1 gw 192.168.0.2
- verify_ML2_working
-}
-
-
-# MAIN
-echo "Starting to make call"
-update_ml2conf
-echo "Check Mode"
-set_mode
-
-if [ $MODE == "0" ];
-then
- echo "Calling control setup"
- control_setup
-elif [ $MODE == "1" ];
-then
- echo "Calling compute setup"
- compute_setup
-
-else
- echo "Something is bad - call for help"
- exit
-fi
-
-
diff --git a/fuel/build/f_odl_docker/scripts/config_neutron_for_odl.sh b/fuel/build/f_odl_docker/scripts/config_neutron_for_odl.sh
deleted file mode 100644
index 3b688aee5..000000000
--- a/fuel/build/f_odl_docker/scripts/config_neutron_for_odl.sh
+++ /dev/null
@@ -1,146 +0,0 @@
-#!/bin/bash
-CONTROL_HOST=172.17.0.3
-
-# ENV
-source ~/openrc
-
-
-
-# VARS
-ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
-MODE=0
-
-
-# FUCNTIONS
-
-
-# Update ml2_conf.ini
-function update_ml2conf {
- echo "Backing up and modifying ml2_conf.ini"
- cp $ML2_CONF $ML2_CONF.bak
- sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
-#!/bin/bash
-CONTROL_HOST=172.17.0.3
-
-# ENV
-source ~/openrc
-
-
-
-# VARS
-ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
-MODE=0
-
-
-# FUCNTIONS
-
-
-# Update ml2_conf.ini
-function update_ml2conf {
- echo "Backing up and modifying ml2_conf.ini"
- cp $ML2_CONF $ML2_CONF.bak
- sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
- sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF
- cat "[ml2_odl]" >> $ML2_CONF
- cat "password = admin" >> $ML2_CONF
- cat "username = admin" >> $ML2_CONF
- cat "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF
-}
-
-function reset_neutrondb {
- echo "Reseting DB"
- mysql -e "drop database if exists neutron_ml2;"
- mysql -e "create database neutron_ml2 character set utf8;"
- mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';"
- neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
-}
-
-function restart_neutron {
- echo "Restarting Neutron Server"
- service neutron-server restart
- echo "Should see Neutron runing now"
- service neutron-server status
- echo "Shouldnt be any nets, but should work (return empty)"
- neutron net-list
-}
-
-function stop_neutron {
- echo "Stopping Neutron / OVS components"
- service neutron-plugin-openvswitch-agent stop
- if [ $MODE == "0" ]
- then
- service neutron-server stop
- fi
-}
-
-
-
-function verify_ML2_working {
- echo "checking that we can talk via ML2 properly"
- curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2
- if grep "network" /tmp/check_ml2
- then
- echo "Success - ML2 to ODL is working"
- else
- echo "im sorry Jim, but its dead"
- fi
-
-}
-
-
-function set_mode {
- if df -k | grep glance
- then
- echo "Controller Mode"
- MODE=0
- else
- echo "Compute Mode"
- MODE=1
- fi
-}
-
-function stop_ovs {
- echo "Stopping OpenVSwitch"
- service openvswitch-switch stop
-
-}
-
-function control_setup {
- echo "do control stuff here"
- echo "Reset Neutron DB"
- #reset_neutrondb
- echo "Restarting Neutron Components"
- #restart_neutron
- echo "Verifying ODL ML2 plugin is working"
- verify_ML2_working
-
-}
-
-function compute_setup {
- echo "do compute stuff here"
- stop_neutron
- verify_ML2_working
-}
-
-
-# MAIN
-echo "Starting to make call"
-#update_ml2conf
-echo "Check Mode"
-set_mode
-
-if [ $MODE == "0" ];
-then
- echo "Calling control setup"
- control_setup
-elif [ $MODE == "1" ];
-then
- echo "Calling compute setup"
- compute_setup
-
-else
- echo "Something is bad - call for help"
- exit
-fi
-
-
diff --git a/fuel/build/f_odl_docker/scripts/prep_nets_for_odl.sh b/fuel/build/f_odl_docker/scripts/prep_nets_for_odl.sh
deleted file mode 100755
index dd4fc9fc9..000000000
--- a/fuel/build/f_odl_docker/scripts/prep_nets_for_odl.sh
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/bin/bash
-# a "cheat" way to install docker on the controller
-# can only be used if you have a connecting out to the internet
-
-# Usage: ./install_docker.sh <ip of default route to remove> <ip of default gw to add>
-
-OLDGW=$1
-#!/bin/bash
-# a "cheat" way to install docker on the controller
-# can only be used if you have a connecting out to the internet
-
-# Usage: ./install_docker.sh <ip of default route to remove> <ip of default gw to add>
-
-OLDGW=$1
-NEWGW=$2
-IMAGEPATH=/opt/opnfv
-IMAGENAME=odl_docker_image.tar
-SOURCES=/etc/apt/sources.list
-
-
-if [ "$#" -ne 2]; then
- echo "Two args not provided, will not touch networking"
-else
-
- # Fix routes
- echo "Fixing routes"
- #DEBUG
- netstat -rn
-
- echo "delete old def route"
- route delete default gw $1
- echo "adding new def route"
- route add default gw $2
-
- echo " you should see a good nslookup now"
- nslookup www.google.ca
-#!/bin/bash
-# a "cheat" way to install docker on the controller
-# can only be used if you have a connecting out to the internet
-
-# Usage: ./install_docker.sh <ip of default route to remove> <ip of default gw to add>
-
-OLDGW=$1
-NEWGW=$2
-IMAGEPATH=/opt/opnfv
-IMAGENAME=odl_docker_image.tar
-SOURCES=/etc/apt/sources.list
-
-
-if [ "$#" -ne 2]; then
- echo "Two args not provided, will not touch networking"
-else
-
- # Fix routes
- echo "Fixing routes"
- #DEBUG
- netstat -rn
-
- echo "delete old def route"
- route delete default gw $1
- echo "adding new def route"
- route add default gw $2
-
- echo " you should see a good nslookup now"
- nslookup www.google.ca
-fi
-
-
-if egrep "mirrors.txt" $SOURCES
-then
- echo "Sources was already updated, not touching"
-else
- echo "adding the closests mirrors and docker mirror to the mix"
- echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise main restricted universe multiverse" >> /etc/apt/sources.list
- echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-updates main restricted universe multiverse" >> /etc/apt/sources.list
- echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-backports main restricted universe multiverse" >> /etc/apt/sources.list
- echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-security main restricted universe multiverse" >> /etc/apt/sources.list
- apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
- echo "deb https://get.docker.com/ubuntu docker main " > /etc/apt/sources.list.d/docker.list
-fi
-
-echo "Updating"
-apt-get update
-echo "Installing Docker"
-apt-get install -y lxc-docker
-
-echo "Loading ODL Docker Image"
-docker load -i $IMAGEPATH/$IMAGENAME
-
-
diff --git a/fuel/build/f_odl_docker/scripts/setup_ovs_for_odl.sh b/fuel/build/f_odl_docker/scripts/setup_ovs_for_odl.sh
deleted file mode 100644
index 42c9451bc..000000000
--- a/fuel/build/f_odl_docker/scripts/setup_ovs_for_odl.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash
-
-
-
-ok .. so they created br-int
-
-so lets add a physical nic to it
-
-
-# First - Removal all the bridges you find
-
-for i in $(ovs-vsctl list-br)
-do
- if [ "$i" == "br-int" ];
- then
- echo "skipped br-int"
- elif [ "$i" == "br-prv"];
- then
- echo "skipped br-pr"
- else
- ovs-vsctl del-br $i
- fi
-done
diff --git a/fuel/build/f_opnfv_puppet/Makefile b/fuel/build/f_opnfv_puppet/Makefile
deleted file mode 100644
index 0949737bc..000000000
--- a/fuel/build/f_opnfv_puppet/Makefile
+++ /dev/null
@@ -1,28 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-TOP := $(shell pwd)
-
-.PHONY: all
-all:
-
-.PHONY: clean
-clean:
- @rm -rf tmp
- @rm -rf release
-
-.PHONY: validate-cache
-validate-cache:
- @echo "No cache validation schema available for $(shell pwd)"
- @echo "Continuing ..."
-
-.PHONY: release
-release:
- @cp -Rvp puppet/modules/* $(PUPPET_DEST)
diff --git a/fuel/build/f_opnfv_puppet/README b/fuel/build/f_opnfv_puppet/README
deleted file mode 100644
index 35bea5a00..000000000
--- a/fuel/build/f_opnfv_puppet/README
+++ /dev/null
@@ -1,12 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-This is the top level "OPNFV" Puppet class which (hopefully) only will be used to include
-an appropriate set of sub-classes which themselves will be self-contained.
diff --git a/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/add_packages.pp b/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/add_packages.pp
deleted file mode 100644
index ccb39392f..000000000
--- a/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/add_packages.pp
+++ /dev/null
@@ -1,9 +0,0 @@
-# Class: opnfv::add_packages
-#
-# Ensure added packages are installed:
-#
-
-class opnfv::add_packages {
- if $::osfamily == 'Debian' {
- }
-}
diff --git a/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp b/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp
deleted file mode 100644
index 54f1c86bf..000000000
--- a/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp
+++ /dev/null
@@ -1,28 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# == Class: opnfv
-#
-# This class is used to perform OPNFV inclusions and settings on top of
-# the vanilla Fuel installation.
-#
-# Currently all logic is self contained, i.e. it is sufficient to
-# "include opnfv" from site.pp.
-
-class opnfv {
- # Configure resolv.conf if parameters passed through astute
- include opnfv::resolver
- # Setup OPNFV style NTP config
- include opnfv::ntp
- # Make sure all added packages are installed
- include opnfv::add_packages
- # Setup OpenDaylight
- include opnfv::odl_docker
-}
diff --git a/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/opncheck.pp b/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/opncheck.pp
deleted file mode 100644
index 0822f0233..000000000
--- a/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/opncheck.pp
+++ /dev/null
@@ -1,21 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Class: opnfv::opncheck
-#
-# Make sure that /opt/opnfv/pre-deploy.sh has been run by
-# verifying there is an "opnfv:" level in the astute.yaml.
-
-class opnfv::opncheck()
-{
- unless $::fuel_settings['opnfv'] {
- fail("Error: You have not run /opt/opnfv/pre-deploy.sh on the Fuel master prior to deploying!")
- }
-}
diff --git a/fuel/build/f_osnaily/Makefile b/fuel/build/f_osnaily/Makefile
deleted file mode 100644
index 0949737bc..000000000
--- a/fuel/build/f_osnaily/Makefile
+++ /dev/null
@@ -1,28 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-TOP := $(shell pwd)
-
-.PHONY: all
-all:
-
-.PHONY: clean
-clean:
- @rm -rf tmp
- @rm -rf release
-
-.PHONY: validate-cache
-validate-cache:
- @echo "No cache validation schema available for $(shell pwd)"
- @echo "Continuing ..."
-
-.PHONY: release
-release:
- @cp -Rvp puppet/modules/* $(PUPPET_DEST)
diff --git a/fuel/build/f_osnaily/puppet/modules/osnailyfacter/examples/site.pp b/fuel/build/f_osnaily/puppet/modules/osnailyfacter/examples/site.pp
deleted file mode 100644
index 05cd9e02a..000000000
--- a/fuel/build/f_osnaily/puppet/modules/osnailyfacter/examples/site.pp
+++ /dev/null
@@ -1,366 +0,0 @@
-$fuel_settings = parseyaml($astute_settings_yaml)
-
-$openstack_version = {
- 'keystone' => 'installed',
- 'glance' => 'installed',
- 'horizon' => 'installed',
- 'nova' => 'installed',
- 'novncproxy' => 'installed',
- 'cinder' => 'installed',
-}
-
-tag("${::fuel_settings['deployment_id']}::${::fuel_settings['environment']}")
-
-#Stages configuration
-stage {'zero': } ->
-stage {'opncheck': } ->
-stage {'first': } ->
-stage {'openstack-custom-repo': } ->
-stage {'netconfig': } ->
-stage {'corosync_setup': } ->
-stage {'openstack-firewall': } -> Stage['main']
-
-class begin_deployment ()
-{
- $role = $::fuel_settings['role']
- notify { "***** Beginning deployment of node ${::hostname} with role $role *****": }
-}
-
-class {'begin_deployment': stage => 'zero' }
-
-stage {'glance-image':
- require => Stage['main'],
-}
-
-if $::fuel_settings['nodes'] {
- $nodes_hash = $::fuel_settings['nodes']
-# OPNFV addition to add to hosts file
- if ($::fuel_settings['opnfv'] and
- $::fuel_settings['opnfv']['hosts']) {
- $extras_hash = $::fuel_settings['opnfv']['hosts']
- } else {
- $extras_hash = undef
- }
-
- $dns_nameservers=$::fuel_settings['dns_nameservers']
- $node = filter_nodes($nodes_hash,'name',$::hostname)
- if empty($node) {
- fail("Node $::hostname is not defined in the hash structure")
- }
-
- $default_gateway = $node[0]['default_gateway']
-
- $base_syslog_hash = $::fuel_settings['base_syslog']
- $syslog_hash = $::fuel_settings['syslog']
-
- $disable_offload = $::fuel_settings['disable_offload']
- if $disable_offload {
- L23network::L3::Ifconfig<||> {
- ethtool => {
- 'K' => ['gso off', 'gro off'],
- }
- }
- }
-
- $use_neutron = $::fuel_settings['quantum']
-
- if (!empty(filter_nodes($::fuel_settings['nodes'], 'role', 'ceph-osd')) or
- $::fuel_settings['storage']['volumes_ceph'] or
- $::fuel_settings['storage']['images_ceph'] or
- $::fuel_settings['storage']['objects_ceph']
- ) {
- $use_ceph = true
- } else {
- $use_ceph = false
- }
-
-
- if $use_neutron {
- prepare_network_config($::fuel_settings['network_scheme'])
- #
- $internal_int = get_network_role_property('management', 'interface')
- $internal_address = get_network_role_property('management', 'ipaddr')
- $internal_netmask = get_network_role_property('management', 'netmask')
- #
- $public_int = get_network_role_property('ex', 'interface')
- if $public_int {
- $public_address = get_network_role_property('ex', 'ipaddr')
- $public_netmask = get_network_role_property('ex', 'netmask')
-
- # TODO(Xarses): remove this after completing merge of
- # multiple-cluster-networks
- L23network::L3::Ifconfig<| title == $public_int |> {
- default_gateway => true
- }
- } else {
- # TODO(Xarses): remove this after completing merge of
- # multiple-cluster-networks
- $fw_admin_int = get_network_role_property('fw-admin', 'interface')
- L23network::L3::Ifconfig<| title == $fw_admin_int |> {
- default_gateway => true
- }
- }
- #
- $storage_address = get_network_role_property('storage', 'ipaddr')
- $storage_netmask = get_network_role_property('storage', 'netmask')
- } else {
- $internal_address = $node[0]['internal_address']
- $internal_netmask = $node[0]['internal_netmask']
- $public_address = $node[0]['public_address']
- $public_netmask = $node[0]['public_netmask']
- $storage_address = $node[0]['storage_address']
- $storage_netmask = $node[0]['storage_netmask']
- $public_br = $node[0]['public_br']
- $internal_br = $node[0]['internal_br']
- $public_int = $::fuel_settings['public_interface']
- $internal_int = $::fuel_settings['management_interface']
-
- # TODO(Xarses): remove this after completing merge of
- # multiple-cluster-networks
- L23network::L3::Ifconfig<| title == $public_int |> {
- default_gateway => true
- }
-
- }
-}
-
-if ($::fuel_settings['neutron_mellanox']) {
- $mellanox_mode = $::fuel_settings['neutron_mellanox']['plugin']
-} else {
- $mellanox_mode = 'disabled'
-}
-
-# This parameter specifies the verbosity level of log messages
-# in openstack components config.
-# Debug would have set DEBUG level and ignore verbose settings, if any.
-# Verbose would have set INFO level messages
-# In case of non debug and non verbose - WARNING, default level would have set.
-$verbose = true
-$debug = $::fuel_settings['debug']
-
-### Storage Settings ###
-# Determine if any ceph parts have been asked for.
-# This will ensure that monitors are set up on controllers, even if no
-# ceph-osd roles during deployment
-
-
-### Syslog ###
-#TODO(bogdando) move logging options to astute.yaml
-# Enable error messages reporting to rsyslog. Rsyslog must be installed in this case.
-$use_syslog = $::fuel_settings['use_syslog'] ? { default=>true }
-# Syslog facilities for main openstack services
-# should vary (reserved usage)
-# local1 is reserved for openstack-dashboard
-$syslog_log_facility_glance = 'LOG_LOCAL2'
-$syslog_log_facility_cinder = 'LOG_LOCAL3'
-$syslog_log_facility_neutron = 'LOG_LOCAL4'
-$syslog_log_facility_nova = 'LOG_LOCAL6'
-$syslog_log_facility_keystone = 'LOG_LOCAL7'
-# could be the same
-# local0 is free for use
-$syslog_log_facility_murano = 'LOG_LOCAL0'
-$syslog_log_facility_heat = 'LOG_LOCAL0'
-$syslog_log_facility_sahara = 'LOG_LOCAL0'
-$syslog_log_facility_ceilometer = 'LOG_LOCAL0'
-$syslog_log_facility_ceph = 'LOG_LOCAL0'
-
-### Monit ###
-# Monit for compute nodes.
-# If enabled, will install monit and configure its watchdogs to track
-# nova-compute/api/network (and openvswitch service, if neutron enabled)
-# at compute nodes.
-# TODO(bogdando) set to true once monit package shipped with Fuel ISO
-$use_monit = false
-
-$nova_rate_limits = {
- 'POST' => 100000,
- 'POST_SERVERS' => 100000,
- 'PUT' => 1000, 'GET' => 100000,
- 'DELETE' => 100000
-}
-$cinder_rate_limits = {
- 'POST' => 100000,
- 'POST_SERVERS' => 100000,
- 'PUT' => 100000, 'GET' => 100000,
- 'DELETE' => 100000
-}
-
-###
-class advanced_node_netconfig {
- $sdn = generate_network_config()
- notify {"SDN: ${sdn}": }
-}
-
-case $::operatingsystem {
- 'redhat' : {
- $queue_provider = 'qpid'
- $custom_mysql_setup_class = 'pacemaker_mysql'
- }
- default: {
- $queue_provider='rabbitmq'
- $custom_mysql_setup_class='galera'
- }
-}
-
-class os_common {
- # OPNFV check if pre_deploy.sh has been run, otherwise fail
- class {'opnfv::opncheck': stage => 'opncheck' }
- if ($::fuel_settings['neutron_mellanox']) {
- if ($::mellanox_mode != 'disabled') {
- class { 'mellanox_openstack::ofed_recompile' :
- stage => 'zero',
- }
- }
- if ($::fuel_settings['storage']['iser']) {
- class { 'mellanox_openstack::iser_rename':
- stage => 'zero',
- storage_parent => $::fuel_settings['neutron_mellanox']['storage_parent'],
- iser_interface_name => $::fuel_settings['neutron_mellanox']['iser_interface_name'],
- }
- Class['mellanox_openstack::ofed_recompile'] -> Class['mellanox_openstack::iser_rename']
- }
- }
- class {"l23network::hosts_file": stage => 'netconfig', nodes => $nodes_hash, extras => $extras_hash }
- class {'l23network': use_ovs=>$use_neutron, stage=> 'netconfig'}
- if $use_neutron {
- class {'advanced_node_netconfig': stage => 'netconfig' }
- } else {
- class {'osnailyfacter::network_setup': stage => 'netconfig'}
- }
-
- if ($::osfamily == 'RedHat') {
- package {'irqbalance': ensure => present} -> service {'irqbalance': ensure => running }
- }
-
- class { 'openstack::firewall':
- stage => 'openstack-firewall',
- nova_vnc_ip_range => $::fuel_settings['management_network_range'],
- }
-
- $base_syslog_rserver = {
- 'remote_type' => 'tcp',
- 'server' => $base_syslog_hash['syslog_server'],
- 'port' => $base_syslog_hash['syslog_port']
- }
-
- # setting kernel reserved ports
- # defaults are 49000,35357,41055,58882
- class { 'openstack::reserved_ports':
- stage => 'netconfig',
- }
-
- # setting service down time and report interval
- # to 60 and 180 for Nova respectively to allow kernel
- # to kill dead connections
- # (see zendesk #1158 as well)
- $nova_report_interval = '60'
- $nova_service_down_time = '180'
-
- $syslog_rserver = {
- 'remote_type' => $syslog_hash['syslog_transport'],
- 'server' => $syslog_hash['syslog_server'],
- 'port' => $syslog_hash['syslog_port'],
- }
- if $syslog_hash['syslog_server'] != "" and $syslog_hash['syslog_port'] != "" and $syslog_hash['syslog_transport'] != "" {
- $rservers = [$base_syslog_rserver, $syslog_rserver]
- } else {
- $rservers = [$base_syslog_rserver]
- }
-
- if $use_syslog {
- class { "::openstack::logging":
- stage => 'first',
- role => 'client',
- show_timezone => true,
- # log both locally include auth, and remote
- log_remote => true,
- log_local => true,
- log_auth_local => true,
- # keep four weekly log rotations, force rotate if 300M size have exceeded
- rotation => 'weekly',
- keep => '4',
- # should be > 30M
- limitsize => '300M',
- # remote servers to send logs to
- rservers => $rservers,
- # should be true, if client is running at virtual node
- virtual => str2bool($::is_virtual),
- # Rabbit doesn't support syslog directly
- rabbit_log_level => 'NOTICE',
- debug => $debug,
- }
- }
-
- class { 'osnailyfacter::atop':
- stage => 'first',
- }
-
- class { 'osnailyfacter::ssh': }
-
- #case $role {
- # /controller/: { $hostgroup = 'controller' }
- # /swift-proxy/: { $hostgroup = 'swift-proxy' }
- # /storage/:{ $hostgroup = 'swift-storage' }
- # /compute/: { $hostgroup = 'compute' }
- # /cinder/: { $hostgroup = 'cinder' }
- # default: { $hostgroup = 'generic' }
- #}
-
- # if $nagios != 'false' {
- # class {'nagios':
- # proj_name => $proj_name,
- # services => [
- # 'host-alive','nova-novncproxy','keystone', 'nova-scheduler',
- # 'nova-consoleauth', 'nova-cert', 'haproxy', 'nova-api', 'glance-api',
- # 'glance-registry','horizon', 'rabbitmq', 'mysql',
- # ],
- # whitelist => ['127.0.0.1', $nagios_master],
- # hostgroup => $hostgroup ,
- # }
- # }
-
- # Workaround for fuel bug with firewall
- firewall {'003 remote rabbitmq ':
- sport => [ 4369, 5672, 15672, 41055, 55672, 61613 ],
- source => $::fuel_settings['master_ip'],
- proto => 'tcp',
- action => 'accept',
- require => Class['openstack::firewall'],
- }
-
- firewall {'004 remote puppet ':
- sport => [ 8140 ],
- source => $master_ip,
- proto => 'tcp',
- action => 'accept',
- require => Class['openstack::firewall'],
- }
-
- class { 'puppet::pull' :
- modules_source => $::fuel_settings['puppet_modules_source'],
- manifests_source => $::fuel_settings['puppet_manifests_source'],
- }
-} # OS_COMMON ENDS
-
-
-
-node default {
- case $::fuel_settings['deployment_mode'] {
- "singlenode": {
- include "osnailyfacter::cluster_simple"
- class {'os_common':}
- class {'opnfv':}
- }
- "multinode": {
- include "osnailyfacter::cluster_simple"
- class {'os_common':}
- class {'opnfv':}
- }
- /^(ha|ha_compact)$/: {
- include "osnailyfacter::cluster_ha"
- class {'os_common':}
- class {'opnfv':}
- }
- "rpmcache": { include osnailyfacter::rpmcache }
- }
-}
diff --git a/fuel/build/f_osnaily/puppet/modules/osnailyfacter/examples/site.pp.orig b/fuel/build/f_osnaily/puppet/modules/osnailyfacter/examples/site.pp.orig
deleted file mode 100644
index 9ed557af3..000000000
--- a/fuel/build/f_osnaily/puppet/modules/osnailyfacter/examples/site.pp.orig
+++ /dev/null
@@ -1,353 +0,0 @@
-$fuel_settings = parseyaml($astute_settings_yaml)
-
-$openstack_version = {
- 'keystone' => 'installed',
- 'glance' => 'installed',
- 'horizon' => 'installed',
- 'nova' => 'installed',
- 'novncproxy' => 'installed',
- 'cinder' => 'installed',
-}
-
-tag("${::fuel_settings['deployment_id']}::${::fuel_settings['environment']}")
-
-#Stages configuration
-stage {'zero': } ->
-stage {'first': } ->
-stage {'openstack-custom-repo': } ->
-stage {'netconfig': } ->
-stage {'corosync_setup': } ->
-stage {'openstack-firewall': } -> Stage['main']
-
-class begin_deployment ()
-{
- $role = $::fuel_settings['role']
- notify { "***** Beginning deployment of node ${::hostname} with role $role *****": }
-}
-
-class {'begin_deployment': stage => 'zero' }
-
-stage {'glance-image':
- require => Stage['main'],
-}
-
-if $::fuel_settings['nodes'] {
- $nodes_hash = $::fuel_settings['nodes']
- $dns_nameservers=$::fuel_settings['dns_nameservers']
- $node = filter_nodes($nodes_hash,'name',$::hostname)
- if empty($node) {
- fail("Node $::hostname is not defined in the hash structure")
- }
-
- $default_gateway = $node[0]['default_gateway']
-
- $base_syslog_hash = $::fuel_settings['base_syslog']
- $syslog_hash = $::fuel_settings['syslog']
-
- $disable_offload = $::fuel_settings['disable_offload']
- if $disable_offload {
- L23network::L3::Ifconfig<||> {
- ethtool => {
- 'K' => ['gso off', 'gro off'],
- }
- }
- }
-
- $use_neutron = $::fuel_settings['quantum']
-
- if (!empty(filter_nodes($::fuel_settings['nodes'], 'role', 'ceph-osd')) or
- $::fuel_settings['storage']['volumes_ceph'] or
- $::fuel_settings['storage']['images_ceph'] or
- $::fuel_settings['storage']['objects_ceph']
- ) {
- $use_ceph = true
- } else {
- $use_ceph = false
- }
-
-
- if $use_neutron {
- prepare_network_config($::fuel_settings['network_scheme'])
- #
- $internal_int = get_network_role_property('management', 'interface')
- $internal_address = get_network_role_property('management', 'ipaddr')
- $internal_netmask = get_network_role_property('management', 'netmask')
- #
- $public_int = get_network_role_property('ex', 'interface')
- if $public_int {
- $public_address = get_network_role_property('ex', 'ipaddr')
- $public_netmask = get_network_role_property('ex', 'netmask')
-
- # TODO(Xarses): remove this after completing merge of
- # multiple-cluster-networks
- L23network::L3::Ifconfig<| title == $public_int |> {
- default_gateway => true
- }
- } else {
- # TODO(Xarses): remove this after completing merge of
- # multiple-cluster-networks
- $fw_admin_int = get_network_role_property('fw-admin', 'interface')
- L23network::L3::Ifconfig<| title == $fw_admin_int |> {
- default_gateway => true
- }
- }
- #
- $storage_address = get_network_role_property('storage', 'ipaddr')
- $storage_netmask = get_network_role_property('storage', 'netmask')
- } else {
- $internal_address = $node[0]['internal_address']
- $internal_netmask = $node[0]['internal_netmask']
- $public_address = $node[0]['public_address']
- $public_netmask = $node[0]['public_netmask']
- $storage_address = $node[0]['storage_address']
- $storage_netmask = $node[0]['storage_netmask']
- $public_br = $node[0]['public_br']
- $internal_br = $node[0]['internal_br']
- $public_int = $::fuel_settings['public_interface']
- $internal_int = $::fuel_settings['management_interface']
-
- # TODO(Xarses): remove this after completing merge of
- # multiple-cluster-networks
- L23network::L3::Ifconfig<| title == $public_int |> {
- default_gateway => true
- }
-
- }
-}
-
-if ($::fuel_settings['neutron_mellanox']) {
- $mellanox_mode = $::fuel_settings['neutron_mellanox']['plugin']
-} else {
- $mellanox_mode = 'disabled'
-}
-
-# This parameter specifies the verbosity level of log messages
-# in openstack components config.
-# Debug would have set DEBUG level and ignore verbose settings, if any.
-# Verbose would have set INFO level messages
-# In case of non debug and non verbose - WARNING, default level would have set.
-$verbose = true
-$debug = $::fuel_settings['debug']
-
-### Storage Settings ###
-# Determine if any ceph parts have been asked for.
-# This will ensure that monitors are set up on controllers, even if no
-# ceph-osd roles during deployment
-
-
-### Syslog ###
-#TODO(bogdando) move logging options to astute.yaml
-# Enable error messages reporting to rsyslog. Rsyslog must be installed in this case.
-$use_syslog = $::fuel_settings['use_syslog'] ? { default=>true }
-# Syslog facilities for main openstack services
-# should vary (reserved usage)
-# local1 is reserved for openstack-dashboard
-$syslog_log_facility_glance = 'LOG_LOCAL2'
-$syslog_log_facility_cinder = 'LOG_LOCAL3'
-$syslog_log_facility_neutron = 'LOG_LOCAL4'
-$syslog_log_facility_nova = 'LOG_LOCAL6'
-$syslog_log_facility_keystone = 'LOG_LOCAL7'
-# could be the same
-# local0 is free for use
-$syslog_log_facility_murano = 'LOG_LOCAL0'
-$syslog_log_facility_heat = 'LOG_LOCAL0'
-$syslog_log_facility_sahara = 'LOG_LOCAL0'
-$syslog_log_facility_ceilometer = 'LOG_LOCAL0'
-$syslog_log_facility_ceph = 'LOG_LOCAL0'
-
-### Monit ###
-# Monit for compute nodes.
-# If enabled, will install monit and configure its watchdogs to track
-# nova-compute/api/network (and openvswitch service, if neutron enabled)
-# at compute nodes.
-# TODO(bogdando) set to true once monit package shipped with Fuel ISO
-$use_monit = false
-
-$nova_rate_limits = {
- 'POST' => 100000,
- 'POST_SERVERS' => 100000,
- 'PUT' => 1000, 'GET' => 100000,
- 'DELETE' => 100000
-}
-$cinder_rate_limits = {
- 'POST' => 100000,
- 'POST_SERVERS' => 100000,
- 'PUT' => 100000, 'GET' => 100000,
- 'DELETE' => 100000
-}
-
-###
-class advanced_node_netconfig {
- $sdn = generate_network_config()
- notify {"SDN: ${sdn}": }
-}
-
-case $::operatingsystem {
- 'redhat' : {
- $queue_provider = 'qpid'
- $custom_mysql_setup_class = 'pacemaker_mysql'
- }
- default: {
- $queue_provider='rabbitmq'
- $custom_mysql_setup_class='galera'
- }
-}
-
-class os_common {
- if ($::fuel_settings['neutron_mellanox']) {
- if ($::mellanox_mode != 'disabled') {
- class { 'mellanox_openstack::ofed_recompile' :
- stage => 'zero',
- }
- }
- if ($::fuel_settings['storage']['iser']) {
- class { 'mellanox_openstack::iser_rename':
- stage => 'zero',
- storage_parent => $::fuel_settings['neutron_mellanox']['storage_parent'],
- iser_interface_name => $::fuel_settings['neutron_mellanox']['iser_interface_name'],
- }
- Class['mellanox_openstack::ofed_recompile'] -> Class['mellanox_openstack::iser_rename']
- }
- }
-
- class {"l23network::hosts_file": stage => 'netconfig', nodes => $nodes_hash }
- class {'l23network': use_ovs=>$use_neutron, stage=> 'netconfig'}
- if $use_neutron {
- class {'advanced_node_netconfig': stage => 'netconfig' }
- } else {
- class {'osnailyfacter::network_setup': stage => 'netconfig'}
- }
-
- if ($::osfamily == 'RedHat') {
- package {'irqbalance': ensure => present} -> service {'irqbalance': ensure => running }
- }
-
- class { 'openstack::firewall':
- stage => 'openstack-firewall',
- nova_vnc_ip_range => $::fuel_settings['management_network_range'],
- }
-
- $base_syslog_rserver = {
- 'remote_type' => 'tcp',
- 'server' => $base_syslog_hash['syslog_server'],
- 'port' => $base_syslog_hash['syslog_port']
- }
-
- # setting kernel reserved ports
- # defaults are 49000,35357,41055,58882
- class { 'openstack::reserved_ports':
- stage => 'netconfig',
- }
-
- # setting service down time and report interval
- # to 60 and 180 for Nova respectively to allow kernel
- # to kill dead connections
- # (see zendesk #1158 as well)
- $nova_report_interval = '60'
- $nova_service_down_time = '180'
-
- $syslog_rserver = {
- 'remote_type' => $syslog_hash['syslog_transport'],
- 'server' => $syslog_hash['syslog_server'],
- 'port' => $syslog_hash['syslog_port'],
- }
- if $syslog_hash['syslog_server'] != "" and $syslog_hash['syslog_port'] != "" and $syslog_hash['syslog_transport'] != "" {
- $rservers = [$base_syslog_rserver, $syslog_rserver]
- } else {
- $rservers = [$base_syslog_rserver]
- }
-
- if $use_syslog {
- class { "::openstack::logging":
- stage => 'first',
- role => 'client',
- show_timezone => true,
- # log both locally include auth, and remote
- log_remote => true,
- log_local => true,
- log_auth_local => true,
- # keep four weekly log rotations, force rotate if 300M size have exceeded
- rotation => 'weekly',
- keep => '4',
- # should be > 30M
- limitsize => '300M',
- # remote servers to send logs to
- rservers => $rservers,
- # should be true, if client is running at virtual node
- virtual => str2bool($::is_virtual),
- # Rabbit doesn't support syslog directly
- rabbit_log_level => 'NOTICE',
- debug => $debug,
- }
- }
-
- class { 'osnailyfacter::atop':
- stage => 'first',
- }
-
- class { 'osnailyfacter::ssh': }
-
- #case $role {
- # /controller/: { $hostgroup = 'controller' }
- # /swift-proxy/: { $hostgroup = 'swift-proxy' }
- # /storage/:{ $hostgroup = 'swift-storage' }
- # /compute/: { $hostgroup = 'compute' }
- # /cinder/: { $hostgroup = 'cinder' }
- # default: { $hostgroup = 'generic' }
- #}
-
- # if $nagios != 'false' {
- # class {'nagios':
- # proj_name => $proj_name,
- # services => [
- # 'host-alive','nova-novncproxy','keystone', 'nova-scheduler',
- # 'nova-consoleauth', 'nova-cert', 'haproxy', 'nova-api', 'glance-api',
- # 'glance-registry','horizon', 'rabbitmq', 'mysql',
- # ],
- # whitelist => ['127.0.0.1', $nagios_master],
- # hostgroup => $hostgroup ,
- # }
- # }
-
- # Workaround for fuel bug with firewall
- firewall {'003 remote rabbitmq ':
- sport => [ 4369, 5672, 15672, 41055, 55672, 61613 ],
- source => $::fuel_settings['master_ip'],
- proto => 'tcp',
- action => 'accept',
- require => Class['openstack::firewall'],
- }
-
- firewall {'004 remote puppet ':
- sport => [ 8140 ],
- source => $master_ip,
- proto => 'tcp',
- action => 'accept',
- require => Class['openstack::firewall'],
- }
-
- class { 'puppet::pull' :
- modules_source => $::fuel_settings['puppet_modules_source'],
- manifests_source => $::fuel_settings['puppet_manifests_source'],
- }
-} # OS_COMMON ENDS
-
-
-
-node default {
- case $::fuel_settings['deployment_mode'] {
- "singlenode": {
- include "osnailyfacter::cluster_simple"
- class {'os_common':}
- }
- "multinode": {
- include "osnailyfacter::cluster_simple"
- class {'os_common':}
- }
- /^(ha|ha_compact)$/: {
- include "osnailyfacter::cluster_ha"
- class {'os_common':}
- }
- "rpmcache": { include osnailyfacter::rpmcache }
- }
-}
diff --git a/fuel/build/f_resolvconf/Makefile b/fuel/build/f_resolvconf/Makefile
deleted file mode 100644
index 0949737bc..000000000
--- a/fuel/build/f_resolvconf/Makefile
+++ /dev/null
@@ -1,28 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-TOP := $(shell pwd)
-
-.PHONY: all
-all:
-
-.PHONY: clean
-clean:
- @rm -rf tmp
- @rm -rf release
-
-.PHONY: validate-cache
-validate-cache:
- @echo "No cache validation schema available for $(shell pwd)"
- @echo "Continuing ..."
-
-.PHONY: release
-release:
- @cp -Rvp puppet/modules/* $(PUPPET_DEST)
diff --git a/fuel/build/f_resolvconf/README b/fuel/build/f_resolvconf/README
deleted file mode 100644
index 5ff570f0a..000000000
--- a/fuel/build/f_resolvconf/README
+++ /dev/null
@@ -1,36 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-Addition to generate resolv.conf separately for compute hosts and controller
-hosts through Astute.
-
-The astute.yaml file should contain entries as those below to have them picked
-up during deployment:
-
-opnfv:
- dns:
- compute:
- - 100.100.100.2
- - 100.100.100.3
- controller:
- - 100.100.100.102
- - 100.100.100.104
-
-The suggested method for adding this information is to prepare for deployment
-with the Fuel GUI or CLI, but before actually deploying:
-
-1. Download the current deployment for all hosts: fuel --env 1 deployment --default
-2. Iterate through the hosts in "deployment_1" and add hosts configuration in
- the above format to their respective yaml file.
-3. Upload the modifed deployment information: fuel --env 1 deployment --upload
-
-After deploying, the additions will be included in /etc/astute.yaml of each
-host.
-
diff --git a/fuel/build/f_resolvconf/puppet/modules/opnfv/manifests/resolver.pp b/fuel/build/f_resolvconf/puppet/modules/opnfv/manifests/resolver.pp
deleted file mode 100644
index 44f36a237..000000000
--- a/fuel/build/f_resolvconf/puppet/modules/opnfv/manifests/resolver.pp
+++ /dev/null
@@ -1,73 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Class: opnfv::resolver
-#
-# Add resolver content passed through astute.yaml into resolv.conf
-# depending on the role
-#
-# Suitable yaml content:
-# <begin>
-# opnfv:
-# dns:
-# compute:
-# - 100.100.100.2
-# - 100.100.100.3
-# controller:
-# - 100.100.100.102
-# - 100.100.100.104
-# <end>
-#
-#
-#
-
-class opnfv::resolver()
-{
- if $::fuel_settings['role'] {
- if $::fuel_settings['role'] == 'primary-controller' {
- $role = 'controller'
- } else {
- $role = $::fuel_settings['role']
- }
-
- if ($::fuel_settings['opnfv']
- and $::fuel_settings['opnfv']['dns']
- and $::fuel_settings['opnfv']['dns'][$role]) {
- $nameservers=$::fuel_settings['opnfv']['dns'][$role]
-
- file { '/etc/resolv.conf':
- owner => root,
- group => root,
- mode => '0644',
- content => template('opnfv/resolv.conf.erb'),
- }
-
- # /etc/resolv.conf is re-generated at each boot by resolvconf, so we
- # need to store there as well.
-
- case $::operatingsystem {
- 'ubuntu': {
- file { '/etc/resolvconf/resolv.conf.d/head':
- owner => root,
- group => root,
- mode => '0644',
- content => template('opnfv/resolv.conf.erb'),
- }
- }
- 'centos': {
- exec { 'for file in ifcfg-eth*; do grep -q -F "PEERDNS=" $file || echo "PEERDNS=no" >> $file; done ':
- provider => 'shell',
- cwd => '/etc/sysconfig/network-scripts',
- }
- }
- }
- }
- }
-}
diff --git a/fuel/build/f_resolvconf/puppet/modules/opnfv/templates/resolv.conf.erb b/fuel/build/f_resolvconf/puppet/modules/opnfv/templates/resolv.conf.erb
deleted file mode 100644
index 7a29dcaf7..000000000
--- a/fuel/build/f_resolvconf/puppet/modules/opnfv/templates/resolv.conf.erb
+++ /dev/null
@@ -1,15 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Dynamic resolv.conf(5) file for glibc resolver(3) generated by resolvconf(8)
-# DO NOT EDIT THIS FILE BY HAND -- YOUR CHANGES WILL BE OVERWRITTEN
-# Modified by OPNFV.
-<% @nameservers.each do |ns| %>nameserver <%= ns %>
-<% end -%>
diff --git a/fuel/build/f_resolvconf/testing/README b/fuel/build/f_resolvconf/testing/README
deleted file mode 100644
index 6846a8dc9..000000000
--- a/fuel/build/f_resolvconf/testing/README
+++ /dev/null
@@ -1,13 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-In order to test the functionality without performing a full deployment,
-run "puppet apply" on the fake_init.pp which will call only the
-opnfv::resolvconf class.
diff --git a/fuel/build/f_resolvconf/testing/fake_init.pp b/fuel/build/f_resolvconf/testing/fake_init.pp
deleted file mode 100644
index 496dcd216..000000000
--- a/fuel/build/f_resolvconf/testing/fake_init.pp
+++ /dev/null
@@ -1,13 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-$fuel_settings = parseyaml($astute_settings_yaml)
-
-include opnfv::resolvconf
diff --git a/fuel/build/fuel-agent_1.patch b/fuel/build/fuel-agent_1.patch
new file mode 100644
index 000000000..b0808966f
--- /dev/null
+++ b/fuel/build/fuel-agent_1.patch
@@ -0,0 +1,36 @@
+*** build/repos/nailgun/fuel_agent/fuel_agent/manager.py.orig Thu Sep 24 11:08:38 2015
+--- build/repos/nailgun/fuel_agent/fuel_agent/manager.py Thu Sep 24 11:10:25 2015
+***************
+*** 541,546 ****
+--- 541,552 ----
+ fs_options=fs.options,
+ fs_label=fs.label,
+ dev=str(fs.device))
++ if fs.type == 'ext4':
++ LOG.debug('Trying to disable journaling for ext4 '
++ 'in order to speed up the build')
++ utils.execute('tune2fs', '-O', '^has_journal',
++ str(fs.device))
++
+
+ # mounting all images into chroot tree
+ self.mount_target(chroot, treat_mtab=False, pseudo=False)
+***************
+*** 631,636 ****
+--- 637,652 ----
+ self.umount_target(chroot, pseudo=False, try_lazy_umount=False)
+
+ for image in self.driver.image_scheme.images:
++ # find fs with the same loop device object
++ # as image.target_device
++ fs = self.driver.partition_scheme.fs_by_device(
++ image.target_device)
++
++ if fs.type == 'ext4':
++ LOG.debug('Trying to re-enable journaling for ext4')
++ utils.execute('tune2fs', '-O', 'has_journal',
++ str(fs.device))
++
+ LOG.debug('Deattaching loop device from file: %s',
+ image.img_tmp_file)
+ bu.deattach_loop(str(image.target_device))
diff --git a/fuel/build/fuel-main_1.patch b/fuel/build/fuel-main_1.patch
deleted file mode 100644
index 24b25b2c7..000000000
--- a/fuel/build/fuel-main_1.patch
+++ /dev/null
@@ -1,104 +0,0 @@
-diff --git a/docker/astute/Dockerfile b/docker/astute/Dockerfile
-index 55f617a..cd05f19 100644
---- a/docker/astute/Dockerfile
-+++ b/docker/astute/Dockerfile
-@@ -5,7 +5,7 @@
- FROM fuel/centos
- MAINTAINER Matthew Mosesohn mmosesohn@mirantis.com
-
--RUN rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo; yum clean all; yum --quiet install -y ruby21-nailgun-mcagents sysstat
-+RUN sleep 15; rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo; yum clean all; yum --quiet install -y ruby21-nailgun-mcagents sysstat
-
- ADD etc /etc
- ADD start.sh /usr/local/bin/start.sh
-diff --git a/docker/cobbler/Dockerfile b/docker/cobbler/Dockerfile
-index 0c80abd..3a3d966 100644
---- a/docker/cobbler/Dockerfile
-+++ b/docker/cobbler/Dockerfile
-@@ -5,7 +5,7 @@
- FROM fuel/centos
- MAINTAINER Matthew Mosesohn mmosesohn@mirantis.com
-
--RUN rm -rf /etc/yum.repos.d/*;echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo; yum clean all; yum --quiet install -y httpd cobbler dnsmasq xinetd tftp-server; ln -s /etc/dnsmasq.conf /etc/cobbler.dnsmasq.conf
-+RUN sleep 15; rm -rf /etc/yum.repos.d/*;echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo; yum clean all; yum --quiet install -y httpd cobbler dnsmasq xinetd tftp-server; ln -s /etc/dnsmasq.conf /etc/cobbler.dnsmasq.conf
-
- ADD etc /etc
- ADD start.sh /usr/local/bin/start.sh
-diff --git a/docker/mcollective/Dockerfile b/docker/mcollective/Dockerfile
-index e70e87d..d6554b7 100644
---- a/docker/mcollective/Dockerfile
-+++ b/docker/mcollective/Dockerfile
-@@ -4,7 +4,7 @@ MAINTAINER Aleksandr Didenko adidenko@mirantis.com
-
- WORKDIR /root
-
--RUN rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo; yum clean all; yum --quiet install -y sudo ruby21-mcollective
-+RUN sleep 15; rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo; yum clean all; yum --quiet install -y sudo ruby21-mcollective
-
- ADD etc /etc
- ADD start.sh /usr/local/bin/start.sh
-diff --git a/docker/ostf/Dockerfile b/docker/ostf/Dockerfile
-index 43f911e..8da9108 100644
---- a/docker/ostf/Dockerfile
-+++ b/docker/ostf/Dockerfile
-@@ -5,7 +5,7 @@
- FROM fuel/centos
- MAINTAINER Matthew Mosesohn mmosesohn@mirantis.com
-
--RUN rm -rf /etc/yum.repos.d/*;echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo;yum clean all; yum --quiet install -y python-fuelclient supervisor postgresql-libs
-+RUN sleep 15; rm -rf /etc/yum.repos.d/*;echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo;yum clean all; yum --quiet install -y python-fuelclient supervisor postgresql-libs
-
- ADD etc /etc
- ADD start.sh /usr/local/bin/start.sh
-diff --git a/docker/postgres/Dockerfile b/docker/postgres/Dockerfile
-index b2930db..63cc4c2 100644
---- a/docker/postgres/Dockerfile
-+++ b/docker/postgres/Dockerfile
-@@ -3,7 +3,7 @@ FROM fuel/centos
-
- MAINTAINER Aleksandr Didenko adidenko@mirantis.com
-
--RUN rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo;yum clean all;yum --quiet install -y sudo
-+RUN sleep 15; rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo;yum clean all;yum --quiet install -y sudo
-
- ADD etc /etc
- ADD start.sh /usr/local/bin/start.sh
-diff --git a/docker/rabbitmq/Dockerfile b/docker/rabbitmq/Dockerfile
-index 201648f..4f3b67c 100644
---- a/docker/rabbitmq/Dockerfile
-+++ b/docker/rabbitmq/Dockerfile
-@@ -3,7 +3,7 @@
- FROM fuel/centos
- MAINTAINER Aleksandr Didenko adidenko@mirantis.com
-
--RUN rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo; yum clean all; yum --quiet install -y rabbitmq-server
-+RUN sleep 15; rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo; yum clean all; yum --quiet install -y rabbitmq-server
-
- ADD etc /etc
- ADD start.sh /usr/local/bin/start.sh
-diff --git a/docker/rsync/Dockerfile b/docker/rsync/Dockerfile
-index ef737bd..b6eefd1 100644
---- a/docker/rsync/Dockerfile
-+++ b/docker/rsync/Dockerfile
-@@ -5,7 +5,7 @@
- FROM fuel/centos
- MAINTAINER Matthew Mosesohn mmosesohn@mirantis.com
-
--RUN rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo;yum clean all;yum --quiet install -y ruby21-puppet xinetd rsync logrotate
-+RUN sleep 15; rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo;yum clean all;yum --quiet install -y ruby21-puppet xinetd rsync logrotate
-
- ADD etc /etc
- ADD start.sh /usr/local/bin/start.sh
-diff --git a/docker/rsyslog/Dockerfile b/docker/rsyslog/Dockerfile
-index 5efd623..8721b39 100644
---- a/docker/rsyslog/Dockerfile
-+++ b/docker/rsyslog/Dockerfile
-@@ -2,7 +2,7 @@ FROM fuel/centos
-
- MAINTAINER Aleksandr Didenko adidenko@mirantis.com
-
--RUN rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo; yum clean all;yum --quiet install -y anacron rsyslog
-+RUN sleep 15; rm -rf /etc/yum.repos.d/*; echo -e "[nailgun]\nname=Nailgun Local Repo\nbaseurl=http://$(route -n | awk '/^0.0.0.0/ { print $2 }'):_PORT_/os/x86_64/\ngpgcheck=0" > /etc/yum.repos.d/nailgun.repo; yum clean all;yum --quiet install -y anacron rsyslog
-
- ADD etc /etc
- ADD start.sh /usr/local/bin/start.sh
diff --git a/fuel/build/fuel-main_2.patch b/fuel/build/fuel-main_2.patch
deleted file mode 100644
index 72588cba4..000000000
--- a/fuel/build/fuel-main_2.patch
+++ /dev/null
@@ -1,18 +0,0 @@
-*** fuel-main/sandbox.mk.orig 2015-02-13 12:12:55.362989171 +0100
---- fuel-main/sandbox.mk 2015-02-13 14:50:39.103017653 +0100
-***************
-*** 71,77 ****
---- 71,83 ----
- echo "Updating apt package database"
- sudo chroot $(SANDBOX_UBUNTU) apt-get update
- echo "Installing additional packages: $(SANDBOX_DEB_PKGS)"
-+ test -e $(SANDBOX_UBUNTU)/sbin/start.orig || mv $(SANDBOX_UBUNTU)/sbin/start $(SANDBOX_UBUNTU)/sbin/start.orig
-+ echo "#!/bin/sh" > $(SANDBOX_UBUNTU)/sbin/start
-+ echo "exit 0" >> $(SANDBOX_UBUNTU)/sbin/start
-+ chmod 755 $(SANDBOX_UBUNTU)/sbin/start
- test -n "$(SANDBOX_DEB_PKGS)" && sudo chroot $(SANDBOX_UBUNTU) apt-get install --yes $(SANDBOX_DEB_PKGS)
-+ test -e $(SANDBOX_UBUNTU)/sbin/start.orig && (cp $(SANDBOX_UBUNTU)/sbin/start.orig $(SANDBOX_UBUNTU)/sbin/start; \
-+ rm $(SANDBOX_UBUNTU)/sbin/start.orig)
- echo "SANDBOX_UBUNTU_UP: done"
- endef
-
diff --git a/fuel/build/fuel-main_3.patch b/fuel/build/fuel-main_3.patch
new file mode 100644
index 000000000..8341d72d6
--- /dev/null
+++ b/fuel/build/fuel-main_3.patch
@@ -0,0 +1,19 @@
+*** prepare-build-env.sh.orig Tue Sep 8 08:47:46 2015
+--- prepare-build-env.sh Tue Sep 8 08:48:22 2015
+***************
+*** 41,47 ****
+
+ trusty)
+ GEMPKG="ruby ruby-dev"
+! sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys D5A05778
+ echo "deb http://mirror.fuel-infra.org/devops/ubuntu/ ./" | sudo tee /etc/apt/sources.list.d/fuel-devops.list
+ sudo apt-get update && sudo apt-get -y install nodejs nodejs-legacy npm
+ ;;
+--- 41,47 ----
+
+ trusty)
+ GEMPKG="ruby ruby-dev"
+! sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 1D2B45A2
+ echo "deb http://mirror.fuel-infra.org/devops/ubuntu/ ./" | sudo tee /etc/apt/sources.list.d/fuel-devops.list
+ sudo apt-get update && sudo apt-get -y install nodejs nodejs-legacy npm
+ ;;
diff --git a/fuel/build/fuel-main_5.patch b/fuel/build/fuel-main_5.patch
new file mode 100644
index 000000000..ec75626d0
--- /dev/null
+++ b/fuel/build/fuel-main_5.patch
@@ -0,0 +1,19 @@
+*** prepare-build-env.sh.orig Tue Sep 8 10:29:08 2015
+--- prepare-build-env.sh Tue Sep 8 10:30:21 2015
+***************
+*** 43,49 ****
+ GEMPKG="ruby ruby-dev"
+ sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 1D2B45A2
+ echo "deb http://mirror.fuel-infra.org/devops/ubuntu/ ./" | sudo tee /etc/apt/sources.list.d/fuel-devops.list
+! sudo apt-get update && sudo apt-get -y install nodejs nodejs-legacy npm
+ ;;
+
+ precise)
+--- 43,49 ----
+ GEMPKG="ruby ruby-dev"
+ sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 1D2B45A2
+ echo "deb http://mirror.fuel-infra.org/devops/ubuntu/ ./" | sudo tee /etc/apt/sources.list.d/fuel-devops.list
+! sudo apt-get update && sudo apt-get -y install nodejs nodejs-legacy npm dosfstools xorriso
+ ;;
+
+ precise)
diff --git a/fuel/build/install/apt-ftparchive-deb.conf b/fuel/build/install/apt-ftparchive-deb.conf
index 1101ac95b..0d15aec6f 100644
--- a/fuel/build/install/apt-ftparchive-deb.conf
+++ b/fuel/build/install/apt-ftparchive-deb.conf
@@ -17,9 +17,9 @@ TreeDefault {
};
BinDirectory "pool/main" {
- Packages "dists/precise/main/binary-amd64/Packages";
- BinOverride "./indices/override.precise.main";
- ExtraOverride "./indices/override.precise.extra.main";
+ Packages "dists/trusty/main/binary-amd64/Packages";
+ BinOverride "./indices/override.trusty.main";
+ ExtraOverride "./indices/override.trusty.extra.main";
};
Default {
diff --git a/fuel/build/install/apt-ftparchive-release.conf b/fuel/build/install/apt-ftparchive-release.conf
index 02528829d..02706bd7d 100644
--- a/fuel/build/install/apt-ftparchive-release.conf
+++ b/fuel/build/install/apt-ftparchive-release.conf
@@ -10,9 +10,9 @@
APT::FTPArchive::Release::Origin "Ubuntu";
APT::FTPArchive::Release::Label "Ubuntu";
-APT::FTPArchive::Release::Suite "precise";
-APT::FTPArchive::Release::Version "12.04";
-APT::FTPArchive::Release::Codename "precise";
+APT::FTPArchive::Release::Suite "trusty";
+APT::FTPArchive::Release::Version "1.04";
+APT::FTPArchive::Release::Codename "trusty";
APT::FTPArchive::Release::Architectures "amd64";
APT::FTPArchive::Release::Components "main";
-APT::FTPArchive::Release::Description "Ubuntu Precise 12.04 LTS";
+APT::FTPArchive::Release::Description "Ubuntu Trusty Tahr 14.04 LTS";
diff --git a/fuel/build/install/apt-ftparchive-udeb.conf b/fuel/build/install/apt-ftparchive-udeb.conf
index 2acbcf0de..3b5b239a6 100644
--- a/fuel/build/install/apt-ftparchive-udeb.conf
+++ b/fuel/build/install/apt-ftparchive-udeb.conf
@@ -17,8 +17,8 @@ TreeDefault {
};
BinDirectory "pool/debian-installer" {
- Packages "dists/precise/main/debian-installer/binary-amd64/Packages";
- BinOverride "./indices/override.precise.main.debian-installer";
+ Packages "dists/trusty/main/debian-installer/binary-amd64/Packages";
+ BinOverride "./indices/override.trusty.main.debian-installer";
};
Default {
diff --git a/fuel/build/install/install.sh b/fuel/build/install/install.sh
index dbb26d6d5..f0bb84914 100755
--- a/fuel/build/install/install.sh
+++ b/fuel/build/install/install.sh
@@ -122,17 +122,17 @@ prep_make_live() {
ssh-copy-id root@$FUELHOST
sshfs root@1${FUELHOST}:/ $TMP_HOSTMOUNT
- if [ -f $REPO/dists/precise/main/binary-amd64/Packages.backup ]; then
+ if [ -f $REPO/dists/trusty/main/binary-amd64/Packages.backup ]; then
echo "Error - found backup file for Packages!"
exit 1
fi
- if [ -f $REPO/dists/precise/main/binary-amd64/Packages.gz.backup ]; then
+ if [ -f $REPO/dists/trusty/main/binary-amd64/Packages.gz.backup ]; then
echo "Error - found backup file for Packages.gz!"
exit 1
fi
- if [ -f $REPO/dists/precise/Release.backup ]; then
+ if [ -f $REPO/dists/trusty/Release.backup ]; then
echo "Error - found backup file for Release!"
exit 1
fi
@@ -142,20 +142,24 @@ prep_make_live() {
exit 1
fi
- cp $REPO/dists/precise/main/binary-amd64/Packages $REPO/dists/precise/main/binary-amd64/Packages.backup
- cp $REPO/dists/precise/main/binary-amd64/Packages.gz $REPO/dists/precise/main/binary-amd64/Packages.gz.backup
- cp $REPO/dists/precise/Release $REPO/dists/precise/Release.backup
+ cp $REPO/dists/trusty/main/binary-amd64/Packages $REPO/dists/trusty/main/binary-amd64/Packages.backup
+ cp $REPO/dists/trusty/main/binary-amd64/Packages.gz $REPO/dists/trusty/main/binary-amd64/Packages.gz.backup
+ cp $REPO/dists/trusty/Release $REPO/dists/trusty/Release.backup
cp -Rvp $DEST/etc/puppet $DEST/etc/puppet.backup
}
post_make_live() {
- echo "Installing into Puppet:"
- cd $TOP/release/puppet/modules
- for dir in *
- do
- echo " $dir"
- cp -Rp $dir $DEST/etc/puppet/modules
- done
+ if [ -d $TOP/release/puppet/modules ]; then
+ echo "Installing into Puppet:"
+ cd $TOP/release/puppet/modules
+ if [ `ls -1 | wc -l` -gt 0 ]; then
+ for dir in *
+ do
+ echo " $dir"
+ cp -Rp $dir $DEST/etc/puppet/modules
+ done
+ fi
+ fi
}
make_live() {
@@ -210,18 +214,21 @@ iso_copy_puppet() {
tar xzf $DEST/puppet-slave.tgz
cd $TOP/release/puppet/modules
- verify_orig_files $TMP_ISOPUPPET/release/puppet $TOP/release/puppet/modules
# Remove all .orig files before copying as they now have been verfied
- find $TOP/release/puppet/modules -type f -name '*.orig' -exec rm {} \;
-
- for dir in $TOP/release/puppet/modules/*
- do
- echo " $dir"
- cp -Rp $dir $TMP_ISOPUPPET/release/puppet
- done
- cd $TMP_ISOPUPPET/release/puppet
+ if [ -d $TOP/release/puppet/modules ]; then
+ if [ `ls -1 | wc -l` -gt 0 ]; then
+ verify_orig_files $TMP_ISOPUPPET/release/puppet $TOP/release/puppet/modules
+ find $TOP/release/puppet/modules -type f -name '*.orig' -exec rm {} \;
+ for dir in $TOP/release/puppet/modules/*
+ do
+ echo " $dir"
+ cp -Rp $dir $TMP_ISOPUPPET/release/puppet
+ done
+ fi
+ fi
+ cd $TMP_ISOPUPPET/release/puppet
tar czf $DEST/puppet-slave.tgz .
cd $TOP
rm -Rf $TMP_ISOPUPPET
@@ -250,7 +257,7 @@ iso_modify_image () {
make_iso() {
prep_make_iso
copy_packages
- iso_copy_puppet
+ #iso_copy_puppet
iso_modify_image
make_iso_image
}
@@ -263,6 +270,8 @@ copy_packages() {
do
echo " $udeb"
cp $udeb $REPO/pool/debian-installer
+ echo "Did not expect a package here, not supported"
+ exit 1
done
cd $TOP/release/packages/ubuntu/pool/main
@@ -270,6 +279,8 @@ copy_packages() {
do
echo " $deb"
cp $deb $REPO/pool/main
+ echo "Did not expect a package here, not supported"
+ exit 1
done
echo "Running Fuel package patch file"
@@ -277,6 +288,8 @@ copy_packages() {
for line in `cat $TOP/apply_patches | grep -v "^#" | grep -v "^$"`; do
echo "Line is $line"
+ echo "Did not expect a line here, not supported"
+ exit 1
ref=`echo $line | cut -d '>' -f 1`
origpkg=`echo $line| cut -d '>' -f 2`
url=`echo $line | cut -d '>' -f 3`
@@ -315,10 +328,11 @@ copy_packages() {
done
printf "Done running Fuel patch file\n\n"
-
echo "Running add packages file"
for line in `cat $TOP/add_opnfv_packages | grep -v "^#" | grep -v "^$"`; do
echo "Line is $line"
+ echo "Did not expect a line here, not supported"
+ exit 1
ref=`echo $line | cut -d '>' -f 1`
origpkg=`echo $line| cut -d '>' -f 2`
url=`echo $line | cut -d '>' -f 3`
@@ -370,6 +384,8 @@ copy_packages() {
printf "\n\n" | tee -a $REPORTFILE
for line in `cat $TOP/patch-packages/release/patch-replacements`
do
+ echo "Did not expect a line here, not supported"
+ exit 1
frompkg=`echo $line | cut -d ">" -f 1`
topkg=`echo $line | cut -d ">" -f 2`
echo "CM: Applying patch to $frompkg" | tee -a $REPORTFILE
@@ -411,17 +427,19 @@ copy_packages() {
APT_DEB_CONF="$TOP/install/apt-ftparchive-deb.conf"
APT_UDEB_CONF="$TOP/install/apt-ftparchive-udeb.conf"
- apt-ftparchive -c "${APT_REL_CONF}" generate "${APT_DEB_CONF}"
- apt-ftparchive generate "${APT_UDEB_CONF}"
+ echo Not running echo apt-ftparchive -c "${APT_REL_CONF}" generate "${APT_DEB_CONF}"
+ echo Not running apt-ftparchive -c "${APT_REL_CONF}" generate "${APT_DEB_CONF}"
+ echo Not running apt-ftparchive generate "${APT_UDEB_CONF}"
+ echo Not running apt-ftparchive generate "${APT_UDEB_CONF}"
# Fuel also needs this index file
- cat dists/precise/main/binary-amd64/Packages | \
- awk '/^Package:/{pkg=$2}
- /^Version:/{print pkg ": \"" $2 "\""}' > ubuntu-versions.yaml
- cp ubuntu-versions.yaml $DEST
+ # cat dists/trusty/main/binary-amd64/Packages | \
+ # awk '/^Package:/{pkg=$2}
+ # /^Version:/{print pkg ": \"" $2 "\""}' > ubuntu-versions.yaml
+ # cp ubuntu-versions.yaml $DEST
- apt-ftparchive -c "${APT_REL_CONF}" release dists/precise/ > dists/precise/Release
- gzip -9cf dists/precise/Release > dists/precise/Release.gz
+ # apt-ftparchive -c "${APT_REL_CONF}" release dists/trusty/ > dists/trusty/Release
+ # gzip -9cf dists/trusty/Release > dists/trusty/Release.gz
popd > /dev/null
@@ -444,6 +462,8 @@ if [ $MODE = "iso" ]; then
NEWISO=$3
VOLUMEID="$4 $5"
REPORTFILE="${NEWISO}.txt"
+ echo "Opening reportfile at $REPORTFILE"
+ touch $REPORTFILE
if [ ! -f $ORIGISO ]; then
echo "Can't find original MOS 5.1 iso at $ORIGISO"
rm $CONF
diff --git a/fuel/build/install/uninstall.sh b/fuel/build/install/uninstall.sh
index 36b888441..a9e74bc39 100755
--- a/fuel/build/install/uninstall.sh
+++ b/fuel/build/install/uninstall.sh
@@ -31,17 +31,17 @@ DEST=$MOUNT
REPO=$DEST/var/www/nailgun/ubuntu/fuelweb/x86_64
cd $REPO
-if [ ! -f $REPO/dists/precise/main/binary-amd64/Packages.backup ]; then
+if [ ! -f $REPO/dists/trusty/main/binary-amd64/Packages.backup ]; then
echo "Error - didn't find backup file for Packages!"
exit 1
fi
-if [ ! -f $REPO/dists/precise/main/binary-amd64/Packages.gz.backup ]; then
+if [ ! -f $REPO/dists/trusty/main/binary-amd64/Packages.gz.backup ]; then
echo "Error - didn't find backup file for Packages.gz!"
exit 1
fi
-if [ ! -f $REPO/dists/precise/Release.backup ]; then
+if [ ! -f $REPO/dists/trusty/Release.backup ]; then
echo "Error - didn't find backup file for Release!"
exit 1
fi
@@ -71,9 +71,9 @@ cd $REPO
echo "Restoring backups of datafiles"
-rm -f $REPO/dists/precise/main/binary-amd64/Packages $REPO/dists/precise/main/binary-amd64/Packages.gz
-rm -f $REPO/dists/precise/Release $DEST/etc/puppet/manifests/site.pp
-mv $REPO/dists/precise/main/binary-amd64/Packages.backup $REPO/dists/precise/main/binary-amd64/Packages
-mv $REPO/dists/precise/main/binary-amd64/Packages.gz.backup $REPO/dists/precise/main/binary-amd64/Packages.gz
-mv $REPO/dists/precise/Release.backup $REPO/dists/precise/Release
+rm -f $REPO/dists/trusty/main/binary-amd64/Packages $REPO/dists/trusty/main/binary-amd64/Packages.gz
+rm -f $REPO/dists/trusty/Release $DEST/etc/puppet/manifests/site.pp
+mv $REPO/dists/trusty/main/binary-amd64/Packages.backup $REPO/dists/trusty/main/binary-amd64/Packages
+mv $REPO/dists/trusty/main/binary-amd64/Packages.gz.backup $REPO/dists/trusty/main/binary-amd64/Packages.gz
+mv $REPO/dists/trusty/Release.backup $REPO/dists/trusty/Release
mv $DEST/etc/puppet/manifests/site.pp.backup $DEST/etc/puppet/manifests/site.pp
diff --git a/fuel/build/opendaylight/Makefile b/fuel/build/opendaylight/Makefile
deleted file mode 100644
index bd2eeb5a3..000000000
--- a/fuel/build/opendaylight/Makefile
+++ /dev/null
@@ -1,102 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-############################################################################
-# BEGIN of variables to customize
-#
-SHELL = /bin/bash
-
-
-BUILD_DIR := $(shell pwd)
-GIT_DIR := /tmp
-export CONFIG_SPEC_SCRIPT
-export MAVEN_OPTS = -Xmx1024m -XX:MaxPermSize=512m
-MAINTAINER = "Main Tainer <main.tainer@example.org>"
-ODL_SHORT_NAME = odl
-ODL_VERSION = 0.1-1
-DEPEND = openjdk-8-jdk
-TARGET_BUILD_PATH="/tmp/controller/opendaylight/distribution/opendaylight-karaf/target/"
-MAVEN_SPEC = $(BUILD_DIR)/odl_maven/settings.xml
-
-#
-# END of variables to customize
-#############################################################################
-
-.PHONY: all
-all: odl
-
-############################################################################
-# BEGIN of Include definitions
-#
-include ../config.mk
-#
-# END Include definitions
-#############################################################################
-
-.PHONY: setup
-setup:
- rm -f "$(BUILD_BASE)/f_odl"
- ln -s "$(shell readlink -e $(BUILD_DIR))/f_odl" "$(shell readlink -e $(BUILD_BASE))/f_odl"
-
-.PHONY: validate-cache
-validate-cache:
- @REMOTE_ID=$(shell git ls-remote $(ODL_MAIN_REPO) $(ODL_MAIN_TAG)^{} | awk '{print $$(NF-1)}'); \
- if [ -z $$REMOTE_ID ] || [ $$REMOTE_ID = " " ]; \
- then \
- REMOTE_ID=$(shell git ls-remote $(ODL_MAIN_REPO) $(ODL_MAIN_TAG) | awk '{print $$(NF-1)}'); \
- fi; \
- if [ $$REMOTE_ID != $(shell cat $(VERSION_FILE) | grep odl | awk '{print $$NF}') ]; \
- then \
- echo "Cache does not match upstream OpenDaylight, cache must be rebuilt!"; \
- exit 1; \
- fi
-
-.PHONY: odl
-odl:
-
-ifeq ($(ODL_MAIN_REPO),)
- @echo "No config-spec target for ODL, nothing to build"
-else
-
-ifeq ($(shell if [ -e .odl-build.log ];then cat .odl-build.log; fi;),$(ODL_MAIN_TAG))
- @cd /tmp && git clone $(ODL_MAIN_REPO) && cd /tmp/controller && git checkout $(ODL_MAIN_TAG)
-
- @echo "ODL is up to date"
-else
- @if [ ! -d "/tmp/controller" ]; then\
- cd /tmp && git clone $(ODL_MAIN_REPO);\
- fi;
-
- @if [ "$(UNIT_TEST)" = "FALSE" ]; then\
- echo "Building ODL without unit test";\
- cd /tmp/controller &&\
- git checkout $(ODL_MAIN_TAG) &&\
- mvn -D maven.test.skip=true -gs $(MAVEN_SPEC) clean install;\
- else\
- echo "Building ODL with unit test";\
- cd /tmp/controller &&\
- git checkout $(ODL_MAIN_TAG) &&\
- mvn -gs $(MAVEN_SPEC) clean install;\
- fi;
-
- @echo "odl" `git -C /tmp/controller show | grep commit | head -1 | cut -d " " -f2` >> $(VERSION_FILE)
- @./make-odl-deb.sh -N $(ODL_SHORT_NAME)_`cd /tmp/controller; git rev-parse --short HEAD` -n $(ODL_SHORT_NAME) -v "$(ODL_VERSION)" -t "$(ODL_MAIN_TAG)" -m $(MAINTAINER) -d $(DEPEND) -p $(TARGET_BUILD_PATH)
- @echo $(ODL_MAIN_TAG) > .odl-build.log
-endif
-endif
-
-.PHONY: clean $(SUBCLEAN)
-clean: $(SUBCLEAN)
- @rm -Rf /tmp/controller
- @rm -f .odl-build.log
- @./make-odl-deb.sh -C
-
-.PHONY: release
-release:
diff --git a/fuel/build/opendaylight/README b/fuel/build/opendaylight/README
deleted file mode 100644
index 7aa392e92..000000000
--- a/fuel/build/opendaylight/README
+++ /dev/null
@@ -1,52 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-This directory builds the OpenDaylight debian package by cloning the
-opendaylight.org repo, building the odl tag specified in
-"fuel-build/config-spec" and constructing a debian package source tree under
-"f_odl", which automatically is linked into "fuel_build/." for further build processing.
-
-The opendaylight has the following structure:
-.
-+--------+-------------+-----------+
-| | | |
-| Makefile make-odl-deb.sh README
-| (this file)
-|
-+----------+----------+
- | |
- odl_maven/ f_odl/
-
-Makefile:
-Invoked by the git root Makefile, it builds the clones the odl repo from
-odl, checkout the tag/branch indicated in "fuelbuild/config-spec", builds
-odl, and calls "make-odl-deb.sh" to create a debian package source tree.
-
-make-odl-deb.sh:
-Creates the odl debian package source tree in "f_odl" from the odl build
-results.
-
-odl_maven/:
-Contains needed control files for maven OpenDaylight build
-
-f_odl/:
-Contains buildscripts and the generated odl debian package source tree produced
-by the odl build (make-odl-deb.sh) which is later used by the root build system.
-
-NOTE on the controller/ git repo clone:
-The git controller repo clone Contains all artifacts from the odl build, it only
-exists in /tmp inside the build docker container and is not visible anywhere on
-the build host
-
-Note on build caching:
-The latest build results are cached, and will not be rebuilt unless the fuel-build/
-config-spec is changed in respect to odl version or if make clean is applied.
-./.odl-build.log and ./.odl-build.history are used to keep adequate bookmaking to
-track caching and needed cleanout.
diff --git a/fuel/build/opendaylight/f_odl/Makefile b/fuel/build/opendaylight/f_odl/Makefile
deleted file mode 100644
index f7ebd3e73..000000000
--- a/fuel/build/opendaylight/f_odl/Makefile
+++ /dev/null
@@ -1,49 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-include $(BUILD_BASE)/config.mk
-ODL_NAME_SHORT := odl
-PACKAGE := odl_$(shell cd /tmp/controller; git rev-parse --short HEAD)
-VERSION := 0.1-1
-DEB_NAME := $(PACKAGE)_$(VERSION)
-
-.PHONY: all
-all: release/pool/main/$(DEB_NAME).deb
-
-release/pool/main/$(DEB_NAME).deb:
-ifeq ($(ODL_MAIN_REPO),)
- @echo "No config-spec target for ODL, nothing to build"
-else
- @mkdir -p tmp/src
- @mkdir -p release/pool/main
- @cp -rp package/$(DEB_NAME) tmp/src
- @gzip -f9 tmp/src/$(DEB_NAME)/usr/share/doc/$(ODL_NAME_SHORT)/changelog.Debian
- @fakeroot dpkg-deb --build tmp/src/$(DEB_NAME)
- @lintian tmp/src/$(DEB_NAME).deb
- @cp tmp/src/$(DEB_NAME).deb release/pool/main
-endif
-
-.PHONY: clean
-clean:
- @rm -rf tmp
- @rm -rf release
- @rm -f $(DEB_DEST)/$(DEB_NAME).deb
-
-.PHONY: validate-cache
-validate-cache:
- @echo "No cache validation schema available for $(shell pwd)"
- @echo "Continuing ..."
-
-.PHONY: release
-release:release/pool/main/$(DEB_NAME).deb
-ifneq ($(ODL_MAIN_REPO),)
- @cp release/pool/main/$(DEB_NAME).deb $(DEB_DEST)
- @cp -Rvp puppet/modules/* $(PUPPET_DEST)
-endif
diff --git a/fuel/build/opendaylight/f_odl/README b/fuel/build/opendaylight/f_odl/README
deleted file mode 100644
index 077962de4..000000000
--- a/fuel/build/opendaylight/f_odl/README
+++ /dev/null
@@ -1,49 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-This directory adds the OpenDaylight (odl) package and related puppet
-deployment manifest such that it gets built into the .iso image an deployed
-on the stack controller cluster.
-
-The f_odl has the following structure:
-.
-+--------+----------+-----------+------------+
- | | | |
- puppet/ Makefile README odl_<change_id>
- | (this file) /<version>
- | |
- | odl deb pkg src
- modules/
- |
- |
- |
- opnfv/
- |
- |
- |
- manifests/
- |
- |
- |
- odl.pp
-
-Makefile:
-Invoked by the git root Makefile, it builds the odl debian package from the
-debian pkg source directory (inside this directory) and pushes it together
-with the manifests to the fuel build source artifact directory, such that it
-eventually gets built into the new fuel .iso
-
-odl.pp:
-Controls the installation and configuration of odl
-
-odl deb pkg src:
-Is the debian package source directory tree including all needed odl artifacts
-and debian pakage meta data. This debian source package tree is built from
-fuel-build/opendaylight and doesnt exist before it has bee built.
diff --git a/fuel/build/opendaylight/f_odl/puppet/modules/opnfv/manifests/odl.pp b/fuel/build/opendaylight/f_odl/puppet/modules/opnfv/manifests/odl.pp
deleted file mode 100644
index 6165646ce..000000000
--- a/fuel/build/opendaylight/f_odl/puppet/modules/opnfv/manifests/odl.pp
+++ /dev/null
@@ -1,13 +0,0 @@
-class opnfv::odl {
- if $::osfamily == 'Debian' {
-
-
- case $::fuel_settings['role'] {
- /controller/: {
- package { 'odl':
- ensure => installed,
- }
- }
- }
- }
-}
diff --git a/fuel/build/opendaylight/f_odl/testing/README b/fuel/build/opendaylight/f_odl/testing/README
deleted file mode 100644
index 2ef497656..000000000
--- a/fuel/build/opendaylight/f_odl/testing/README
+++ /dev/null
@@ -1,12 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-In order to test the functionality without performing a full deployment, run "puppet apply" on the
-fake_init.pp which will call only the opnfv::odl class.
diff --git a/fuel/build/opendaylight/f_odl/testing/fake_init.pp b/fuel/build/opendaylight/f_odl/testing/fake_init.pp
deleted file mode 100644
index 0600d2e0c..000000000
--- a/fuel/build/opendaylight/f_odl/testing/fake_init.pp
+++ /dev/null
@@ -1,13 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-$fuel_settings = parseyaml($astute_settings_yaml)
-
-include opnfv::odl
diff --git a/fuel/build/opendaylight/make-odl-deb.sh b/fuel/build/opendaylight/make-odl-deb.sh
deleted file mode 100755
index 5222087be..000000000
--- a/fuel/build/opendaylight/make-odl-deb.sh
+++ /dev/null
@@ -1,314 +0,0 @@
-#!/bin/bash
-set -e
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-##############################################################################
-# Default variable declarations
-
-COMMAND=
-PACKAGE_NAME=
-PACKAGE_SHORT_NAME=
-PACKAGE_VERSION=
-TARGET_BUILD_PATH=
-DEPENDENCIES=
-MAINTAINER=
-ARCH="amd64"
-BUILD_HISTORY=".odl-build-history"
-
-##############################################################################
-# subroutine: usage
-# Description: Prints out usage of this script
-
-usage ()
-{
-cat <<EOF
-usage: $0 options
-
-$0 creates a ${PACKAGE_NAME} Debian package
-
-OPTIONS:
- -n Package shoer name
- -N Package name
- -v Version
- -t Tag
- -p Target build path, the path where the built tar ball is to be fetched
- -m Maintainer
- -d Package dependencies
- -h Prints this message
- -C Clean
-
-E.g.: $0 -n my/deb/src/dest/path -N my-package -v 1.0-1 -t myTag -p path/to/the/source -m "Main Tainer <main.tainer.exampe.org> -d myJavaDependence
-EOF
-}
-
-##############################################################################
-# subroutine: clean
-# Description: Cleans up all artifacts from earlier builds
-
-clean ()
-{
-if [ -e $BUILD_HISTORY ]; then
- while read line
- do
- rm -rf $line
- done < $BUILD_HISTORY
- rm ${BUILD_HISTORY}
- exit 0
-fi
-}
-
-##############################################################################
-# make-DEBIAN_control
-# Description: constructs the Debian pack control file
-
-make-DEBIAN_control ()
-{
-cat <<EOF
-Package: $PACKAGE_SHORT_NAME
-Version: $PACKAGE_VERSION
-Section: base
-Priority: optional
-Architecture: $ARCH
-Depends: $DEPENDENCIES
-Maintainer: $MAINTAINER
-Description: OpenDaylight deamon
- This is a daemon for the opendaylight/odl controller service.
-EOF
-}
-
-##############################################################################
-# subroutine: make-DEBIAN_conffiles
-# Description: Constructs the Debian package config files assignment
-
-make-DEBIAN_conffiles ()
-{
-cat <<EOF
-/etc/odl/etc/all.policy
-/etc/odl/etc/config.properties
-/etc/odl/etc/custom.properties
-/etc/odl/etc/distribution.info
-/etc/odl/etc/equinox-debug.properties
-/etc/odl/etc/java.util.logging.properties
-/etc/odl/etc/jmx.acl.cfg
-/etc/odl/etc/jmx.acl.java.lang.Memory.cfg
-/etc/odl/etc/jmx.acl.org.apache.karaf.bundle.cfg
-/etc/odl/etc/jmx.acl.org.apache.karaf.config.cfg
-/etc/odl/etc/jmx.acl.org.apache.karaf.security.jmx.cfg
-/etc/odl/etc/jmx.acl.osgi.compendium.cm.cfg
-/etc/odl/etc/jre.properties
-/etc/odl/etc/keys.properties
-/etc/odl/etc/org.apache.felix.fileinstall-deploy.cfg
-/etc/odl/etc/org.apache.karaf.command.acl.bundle.cfg
-/etc/odl/etc/org.apache.karaf.command.acl.config.cfg
-/etc/odl/etc/org.apache.karaf.command.acl.feature.cfg
-/etc/odl/etc/org.apache.karaf.command.acl.jaas.cfg
-/etc/odl/etc/org.apache.karaf.command.acl.kar.cfg
-/etc/odl/etc/org.apache.karaf.command.acl.shell.cfg
-/etc/odl/etc/org.apache.karaf.command.acl.system.cfg
-/etc/odl/etc/org.apache.karaf.features.cfg
-/etc/odl/etc/org.apache.karaf.features.obr.cfg
-/etc/odl/etc/org.apache.karaf.features.repos.cfg
-/etc/odl/etc/org.apache.karaf.jaas.cfg
-/etc/odl/etc/org.apache.karaf.kar.cfg
-/etc/odl/etc/org.apache.karaf.log.cfg
-/etc/odl/etc/org.apache.karaf.management.cfg
-/etc/odl/etc/org.apache.karaf.shell.cfg
-/etc/odl/etc/org.ops4j.pax.logging.cfg
-/etc/odl/etc/org.ops4j.pax.url.mvn.cfg
-/etc/odl/etc/regions-config.xml
-/etc/odl/etc/shell.init.script
-/etc/odl/etc/startup.properties
-/etc/odl/etc/system.properties
-/etc/odl/etc/users.properties
-/etc/odl/configuration/context.xml
-/etc/odl/configuration/logback.xml
-/etc/odl/configuration/tomcat-logging.properties
-/etc/odl/configuration/tomcat-server.xml
-EOF
-}
-
-##############################################################################
-# subroutine: make-DEBIAN_postinst
-# Description: Constructs the Debian package post installation script
-
-make-DEBIAN_postinst ()
-{
-cat <<EOF
-#!/bin/bash -e
-ln -s /etc/${PACKAGE_SHORT_NAME}/* ${TARGET_INSTALL_PATH}
-echo "OpenDaylight $TAG version $PACKAGE_VERSION has been installed"
-EOF
-}
-
-##############################################################################
-# subroutine: make-DEBIAN_bin
-# Description: Constructs the bin script (normally under /usr/bin)
-
-make-DEBIAN_bin ()
-{
-cat <<EOF
-#!/bin/bash -e
-${TARGET_INSTALL_PATH}bin/karaf $@
-EOF
-}
-
-##############################################################################
-# subroutine: make-DEBIAN_copyright
-# Description: Constructs the copyright text (normally under /usr/share/doc...)
-
-make-DEBIAN_copyright ()
-{
-cat <<EOF
-OpenDaylight - an open source SDN controller
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-EOF
-}
-
-##############################################################################
-# subroutine: make-DEBIAN_changelog
-# Description: Constructs the changelog text (normally under /usr/share/doc...)
-
-make-DEBIAN_changelog ()
-{
-cat <<EOF
-$PACKAGE_SHORT_NAME ($PACKAGE_VERSION) precise-proposed; urgency=low
-
- * Derived from $PACKAGE_NAME $PACHAGE_VERSION
-
- -- $MAINTAINER $(date)
-EOF
-}
-
-##############################################################################
-# MAIN
-
-while getopts "N:n:v:d:Chm:t:p:" OPTION
-do
- case $OPTION in
- h)
- usage
- exit 0
- ;;
-
- N)
- PACKAGE_NAME=$OPTARG
- COMMAND+="-N ${PACKAGE_NAME} "
- ;;
-
- n)
- PACKAGE_SHORT_NAME=$OPTARG
- COMMAND+="-n ${PACKAGE_SHORT_NAME} "
- ;;
-
- v)
- PACKAGE_VERSION=$OPTARG
- COMMAND+="-v ${PACKAGE_VERSION} "
- ;;
-
- p)
- TARGET_BUILD_PATH=$OPTARG
- COMMAND+="-p ${TARGET_BUILD_PATH} "
- ;;
-
- t)
- TAG=$OPTARG
- COMMAND+="-t ${TAG} "
- ;;
-
- m)
- MAINTAINER=$OPTARG
- COMMAND+="-m ${MAINTAINER} "
- ;;
-
- d)
- DEPENDENCIES=$OPTARG
- COMMAND+="-d ${DEPENDENCIES} "
- ;;
-
- A)
- ARCH=$OPTARG
- COMMAND+="-A ${ARCH} "
- ;;
-
- C)
- COMMAND+="-C "
- clean
- exit 0
- ;;
- esac
-done
-
-# Constructing script variables
-DEB_PACK_BASE_PATH="f_${PACKAGE_SHORT_NAME}/package/${PACKAGE_NAME}_${PACKAGE_VERSION}"
-echo ${DEB_PACK_BASE_PATH} >> "$BUILD_HISTORY"
-TARGET_INSTALL_PATH="/usr/share/java/${PACKAGE_SHORT_NAME}/"
-DEB_PACK_CONTENT_PATH="${DEB_PACK_BASE_PATH}/usr/share/java/${PACKAGE_SHORT_NAME}/"
-DEB_PACK_CONFIG_PATH="${DEB_PACK_BASE_PATH}/etc/${PACKAGE_SHORT_NAME}"
-TARGET_TAR=$(ls ${TARGET_BUILD_PATH}*.tar.gz)
-TARGET_TAR="${TARGET_TAR##*/}"
-TAR_PATH="${TARGET_TAR%.*}"
-TAR_PATH="${TAR_PATH%.*}"
-if [ -e $DEB_PACK_BASE_PATH ]; then
- rm -R $DEB_PACK_BASE_PATH
-fi
-
-# Create Deb pack content and configuration
-mkdir -p ${DEB_PACK_CONTENT_PATH}
-cp ${TARGET_BUILD_PATH}${TARGET_TAR} ${DEB_PACK_CONTENT_PATH}
-tar -xzf ${DEB_PACK_CONTENT_PATH}${TARGET_TAR} -C ${DEB_PACK_CONTENT_PATH}
-rm ${DEB_PACK_CONTENT_PATH}${TARGET_TAR}
-mv ${DEB_PACK_CONTENT_PATH}${TAR_PATH}/* ${DEB_PACK_CONTENT_PATH}.
-rm -R ${DEB_PACK_CONTENT_PATH}${TAR_PATH}
-
-# Crate and populate Deb pack config target
-mkdir -p ${DEB_PACK_CONFIG_PATH}/etc
-mv ${DEB_PACK_CONTENT_PATH}etc/* ${DEB_PACK_CONFIG_PATH}/etc/
-rm -R ${DEB_PACK_CONTENT_PATH}etc
-mkdir -p ${DEB_PACK_CONFIG_PATH}/configuration
-mv ${DEB_PACK_CONTENT_PATH}configuration/* ${DEB_PACK_CONFIG_PATH}/configuration/
-rm -R ${DEB_PACK_CONTENT_PATH}configuration
-
-# Set package permisions
-find ${DEB_PACK_CONTENT_PATH} -type d -print -exec chmod 755 {} \;
-find ${DEB_PACK_CONFIG_PATH}/etc/ -type f -print -exec chmod 644 {} \;
-find ${DEB_PACK_CONFIG_PATH}/etc/ -type d -print -exec chmod 755 {} \;
-
-# Create package usr/bin odl script
-mkdir "${DEB_PACK_BASE_PATH}/usr/bin"
-chmod 755 "${DEB_PACK_BASE_PATH}/usr/bin"
-make-DEBIAN_bin > "${DEB_PACK_BASE_PATH}/usr/bin/odl"
-chmod 755 "${DEB_PACK_BASE_PATH}/usr/bin/odl"
-
-# Create Deb pack install meta-data
-mkdir "${DEB_PACK_BASE_PATH}/DEBIAN"
-make-DEBIAN_control > "${DEB_PACK_BASE_PATH}/DEBIAN/control"
-make-DEBIAN_conffiles > "${DEB_PACK_BASE_PATH}/DEBIAN/conffiles"
-mkdir -p "${DEB_PACK_BASE_PATH}/usr/share/doc/${PACKAGE_SHORT_NAME}"
-make-DEBIAN_copyright > "${DEB_PACK_BASE_PATH}/usr/share/doc/${PACKAGE_SHORT_NAME}/copyright"
-make-DEBIAN_changelog > "${DEB_PACK_BASE_PATH}/usr/share/doc/${PACKAGE_SHORT_NAME}/changelog.Debian"
-
-# Create Deb pack post install symlinks and usr/bin scripts
-make-DEBIAN_postinst > "${DEB_PACK_BASE_PATH}/DEBIAN/postinst"
-chmod 755 "${DEB_PACK_BASE_PATH}/DEBIAN/postinst"
-mkdir -p "${DEB_PACK_BASE_PATH}/usr/bin"
diff --git a/fuel/build/opendaylight/odl_maven/settings.xml b/fuel/build/opendaylight/odl_maven/settings.xml
deleted file mode 100644
index 35a444264..000000000
--- a/fuel/build/opendaylight/odl_maven/settings.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
-
- <profiles>
- <profile>
- <id>opendaylight-release</id>
- <repositories>
- <repository>
- <releases>
- <enabled>true</enabled>
- <updatePolicy>never</updatePolicy>
- </releases>
- <snapshots>
- <enabled>false</enabled>
- </snapshots>
- <id>opendaylight-mirror</id>
- <name>opendaylight-mirror</name>
- <url>http://nexus.opendaylight.org/content/groups/public/</url>
- </repository>
- </repositories>
- </profile>
-
- <profile>
- <id>opendaylight-snapshots</id>
- <repositories>
- <repository>
- <releases>
- <enabled>false</enabled>
- </releases>
- <snapshots>
- <enabled>true</enabled>
- </snapshots>
- <id>opendaylight-snapshot</id>
- <name>opendaylight-snapshot</name>
- <url>http://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
- </repository>
- </repositories>
- </profile>
- </profiles>
-
- <activeProfiles>
- <activeProfile>opendaylight-release</activeProfile>
- <activeProfile>opendaylight-snapshots</activeProfile>
- </activeProfiles>
-</settings>
diff --git a/fuel/build/patch-packages/Makefile b/fuel/build/patch-packages/Makefile
index bd3a43717..339c9e7cf 100644
--- a/fuel/build/patch-packages/Makefile
+++ b/fuel/build/patch-packages/Makefile
@@ -8,7 +8,7 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-SUBDIRS := debootstrap novnc neutron-common
+SUBDIRS :=
SUBCLEAN = $(addsuffix .clean,$(SUBDIRS))
.PHONY: $(SUBDIRS) $(SUBCLEAN) clean
diff --git a/fuel/build/patch-packages/debootstrap/Makefile b/fuel/build/patch-packages/debootstrap/Makefile
deleted file mode 100644
index 010931225..000000000
--- a/fuel/build/patch-packages/debootstrap/Makefile
+++ /dev/null
@@ -1,28 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-TOP := $(shell pwd)
-
-.PHONY: all
-all:
-
-.PHONY: clean
-clean:
- @rm -rf udebPackage
- @rm -rf *.udeb
- @rm -rf patch-replacements
- @rm -rf .udebpackage
-
-.PHONY: release
-release:
- ../tools/udeb_unpack debootstrap-udeb_1.0.4*.udeb $(ORIGISO)
- patch -s -p0 < debootstrap.patch
- ../tools/udeb_pack $(REVSTATE)
- @cp *.udeb $(UDEB_DEST)
diff --git a/fuel/build/patch-packages/debootstrap/debootstrap.patch b/fuel/build/patch-packages/debootstrap/debootstrap.patch
deleted file mode 100644
index 62342c96c..000000000
--- a/fuel/build/patch-packages/debootstrap/debootstrap.patch
+++ /dev/null
@@ -1,12 +0,0 @@
---- udebPackage/usr/share/debootstrap/scripts/gutsy.orig 2014-11-10 18:21:37.000000000 +0000
-+++ udebPackage/usr/share/debootstrap/scripts/gutsy 2015-04-15 09:28:44.290437000 +0000
-@@ -112,7 +112,8 @@
-
- p; progress $baseprog $bases INSTCORE "Installing core packages" #2
- ln -sf mawk "$TARGET/usr/bin/awk"
-- x_core_install base-files base-passwd
-+ x_core_install base-passwd
-+ x_core_install base-files
- p; progress $baseprog $bases INSTCORE "Installing core packages" #3
- x_core_install dpkg
-
diff --git a/fuel/build/patch-packages/neutron-common/Makefile b/fuel/build/patch-packages/neutron-common/Makefile
deleted file mode 100644
index e9d43a4c0..000000000
--- a/fuel/build/patch-packages/neutron-common/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-TOP := $(shell pwd)
-
-.PHONY: all
-all:
-
-.PHONY: clean
-clean:
- @rm -rf package
- @rm -rf *.deb
- @rm -rf patch-replacements
- @rm -rf .package
-
-.PHONY: release
-release:
- ../tools/deb_unpack neutron-common_*.deb $(ORIGISO)
- patch -s -p0 < quota.patch
- ../tools/deb_pack $(REVSTATE)
- @cp *.deb ../release/packages
- @cat patch-replacements >> ../release/patch-replacements
diff --git a/fuel/build/patch-packages/neutron-common/quota.patch b/fuel/build/patch-packages/neutron-common/quota.patch
deleted file mode 100644
index 6f179f0ab..000000000
--- a/fuel/build/patch-packages/neutron-common/quota.patch
+++ /dev/null
@@ -1,67 +0,0 @@
-*** package/etc/neutron/neutron.conf.orig 2015-05-25 15:50:09.933131041 +0200
---- package/etc/neutron/neutron.conf 2015-05-25 15:55:07.859210010 +0200
-***************
-*** 502,518 ****
- # default_quota = -1
-
- # Number of networks allowed per tenant. A negative value means unlimited.
-! # quota_network = 10
-
- # Number of subnets allowed per tenant. A negative value means unlimited.
-! # quota_subnet = 10
-
- # Number of ports allowed per tenant. A negative value means unlimited.
- # quota_port = 50
-
- # Number of security groups allowed per tenant. A negative value means
- # unlimited.
-! # quota_security_group = 10
-
- # Number of security group rules allowed per tenant. A negative value means
- # unlimited.
---- 502,521 ----
- # default_quota = -1
-
- # Number of networks allowed per tenant. A negative value means unlimited.
-! # This quota modified by OPNFV: 10 -> 50
-! quota_network = 50
-
- # Number of subnets allowed per tenant. A negative value means unlimited.
-! # This quota modified by OPNFV: 10 -> 50
-! quota_subnet = 50
-
- # Number of ports allowed per tenant. A negative value means unlimited.
- # quota_port = 50
-
- # Number of security groups allowed per tenant. A negative value means
- # unlimited.
-! # This quota modified by OPNFV: 10 -> 50
-! quota_security_group = 50
-
- # Number of security group rules allowed per tenant. A negative value means
- # unlimited.
-***************
-*** 538,547 ****
- # quota_health_monitor = -1
-
- # Number of routers allowed per tenant. A negative value means unlimited.
-! # quota_router = 10
-
- # Number of floating IPs allowed per tenant. A negative value means unlimited.
-! # quota_floatingip = 50
-
- # Number of firewalls allowed per tenant. A negative value means unlimited.
- # quota_firewall = 1
---- 541,552 ----
- # quota_health_monitor = -1
-
- # Number of routers allowed per tenant. A negative value means unlimited.
-! # This quota modified by OPNFV: 10 -> 50
-! quota_router = 50
-
- # Number of floating IPs allowed per tenant. A negative value means unlimited.
-! # This quota modified by OPNFV: 50 -> 100
-! quota_floatingip = 100
-
- # Number of firewalls allowed per tenant. A negative value means unlimited.
- # quota_firewall = 1
diff --git a/fuel/build/patch-packages/novnc/Makefile b/fuel/build/patch-packages/novnc/Makefile
deleted file mode 100644
index 16c0196e9..000000000
--- a/fuel/build/patch-packages/novnc/Makefile
+++ /dev/null
@@ -1,22 +0,0 @@
-# This is a temporary patch which add missing files
-# inside novnc ubuntu package.
-# Related bug: https://bugs.launchpad.net/fuel/+bug/1433894
-TOP := $(shell pwd)
-
-.PHONY: all
-all:
-
-.PHONY: clean
-clean:
- @rm -rf package
- @rm -rf *.deb
- @rm -rf patch-replacements
- @rm -rf .package
-
-.PHONY: release
-release:
- ../tools/deb_unpack novnc_0.5.1*.deb $(ORIGISO)
- ./fix-missing.sh
- ../tools/deb_pack $(REVSTATE)
- @cp *.deb ../release/packages
- @cat patch-replacements >> ../release/patch-replacements
diff --git a/fuel/build/patch-packages/novnc/fix-missing.sh b/fuel/build/patch-packages/novnc/fix-missing.sh
deleted file mode 100755
index 61ef1db14..000000000
--- a/fuel/build/patch-packages/novnc/fix-missing.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-MISSING_FILES="keyboard.js keysymdef.js keysym.js"
-NOVNC_SOURCE="http://raw.githubusercontent.com/kanaka/noVNC/v0.5.1/include"
-
-for file in $MISSING_FILES
-do
- wget -P package/usr/share/novnc/include/ "$NOVNC_SOURCE/$file"
-done
diff --git a/fuel/ci/README b/fuel/ci/README
index 3525d4da9..aab823bc0 100644
--- a/fuel/ci/README
+++ b/fuel/ci/README
@@ -16,8 +16,91 @@ There are two Fuel@OPNF autonomous scripts fo this, complying to the OPNFV CI pi
For more info on usage:
./build.sh -h
-./deploy.sh -h
+sudo ./deploy.sh -h
+python deploy.py -h
-To be able to deploy on a certain metal environment there needs to be a Deplyment Environment Adaptor" executable with propper added to $PATH such that
-deploy.sh can call it by $dea [options] as indicated by ./deploy -h.
+usage: python deploy.py [-h] [-nf] [-nh] [-fo] [-co] [-c] [-iso [ISO_FILE]]
+ [-dea [DEA_FILE]] [-dha [DHA_FILE]] [-s STORAGE_DIR]
+ [-b PXE_BRIDGE] [-p FUEL_PLUGINS_DIR]
+optional arguments:
+ -h, --help show this help message and exit
+ -nf Do not install Fuel Master (and Node VMs when using
+ libvirt)
+ -nh Don't run health check after deployment
+ -fo Install Fuel Master only (and Node VMs when using
+ libvirt)
+ -co Cleanup VMs and Virtual Networks according to what is
+ defined in DHA
+ -c Cleanup after deploy
+ -iso [ISO_FILE] ISO File [default: OPNFV.iso]
+ -dea [DEA_FILE] Deployment Environment Adapter: dea.yaml
+ -dha [DHA_FILE] Deployment Hardware Adapter: dha.yaml
+ -s STORAGE_DIR Storage Directory [default: images]
+ -b PXE_BRIDGE Linux Bridge for booting up the Fuel Master VM
+ [default: pxebr]
+ -p FUEL_PLUGINS_DIR Fuel Plugins directory
+
+
+
+* EXAMPLES:
+
+- Install Fuel Master and deploy OPNFV Cloud from scratch on Hardware Environment:
+
+ sudo ./deploy.sh -iso ~/ISO/opnfv.iso -dea ~/CONF/hardware/dea.yaml -dha ~/CONF/hardware/dha.yaml -s /mnt/images -b pxebr
+
+
+- Install Fuel Master and deploy OPNFV Cloud from scratch on Virtual Environment:
+
+ sudo ./deploy.sh -iso ~/ISO/opnfv.iso -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -s /mnt/images
+
+
+- Deploy OPNFV Cloud on an already active Environment where Fuel Master VM is running so no need to install Fuel again:
+
+ sudo ./deploy.sh -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml
+
+ => with plugin installation
+ sudo ./deploy.sh -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml
+
+ => with cleanup after deployment is finished
+ sudo ./deploy.sh -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -c
+
+ => no healthcheck after deployment is completed
+ sudo ./deploy.sh -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -nh
+
+
+- Install Fuel Master only (and Node VMs when using virtual environment):
+
+ => for virtual environment:
+ sudo ./deploy.sh -iso ~/ISO/opnfv.iso -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -s /mnt/images
+
+ => for hardware environment:
+ sudo ./deploy.sh -iso ~/ISO/opnfv.iso -dea ~/CONF/hardware/dea.yaml -dha ~/CONF/hardware/dha.yaml -s /mnt/images -b pxebr
+
+
+- Cleanup a running OPNFV environment:
+
+ sudo ./deploy.sh -co -dha ~/CONF/virtual/dha.yaml
+
+
+* WARNINGS:
+
+=> If optional argument -s <storage_dir> is not specified, Autodeployment will use
+"<current_working_dir>/images" as default, and it will create it, if it hasn't been created before
+
+=> If optional argument -b <pxe_bridge> is not specified, Autodeployment will use "pxebr" as default,
+if the bridge does not exist, the application will terminate with an error message
+
+=> If argument -iso [ISO_FILE] is not specified, Autodeployment will use "<current_working_dir>/OPNFV.iso"
+as default, if the iso file does not exist, the application will terminate with an error message
+
+=> If argument -dea [DEA_FILE] is not specified, Autodeployment will use "<current_working_dir>/dea.yaml"
+as default, if DEA file does not exist, the application will terminate with an error message
+
+=> If argument -dha [DHA_FILE] is not specified, Autodeployment will use "<current_working_dir>/dha.yaml"
+as default, if DHA file does not exist, the application will terminate with an error message
+
+=> Optional argument -b PXE_BRIDGE is not required for Autodeployment in virtual environment,
+ even if it is specified it will not be used at all because virtual environment is using a different virtual network setup
+
+=> If optional argument -p FUEL_PLUGINS_DIR is not specified, no external plugins will be installed in Fuel \ No newline at end of file
diff --git a/fuel/ci/build.sh b/fuel/ci/build.sh
index 51ccdae5b..f8e164a76 100755
--- a/fuel/ci/build.sh
+++ b/fuel/ci/build.sh
@@ -80,6 +80,32 @@ EOF
############################################################################
############################################################################
+# Begin of string xor function
+#
+function xor()
+{
+ local res=(`echo "$1" | sed "s/../0x& /g"`)
+ shift 1
+ while [[ "$1" ]]; do
+ local one=(`echo "$1" | sed "s/../0x& /g"`)
+ local count1=${#res[@]}
+ if [ $count1 -lt ${#one[@]} ]
+ then
+ count1=${#one[@]}
+ fi
+ for (( i = 0; i < $count1; i++ ))
+ do
+ res[$i]=$((${one[$i]:-0} ^ ${res[$i]:-0}))
+ done
+ shift 1
+ done
+ printf "%02x" "${res[@]}"
+}
+#
+# END of string xor function
+############################################################################
+
+############################################################################
# BEGIN of variables to customize
#
BUILD_BASE=$(readlink -e ../build/)
@@ -87,7 +113,7 @@ RESULT_DIR="${BUILD_BASE}/release"
BUILD_SPEC="${BUILD_BASE}/config.mk"
CACHE_DIR="cache"
LOCAL_CACHE_ARCH_NAME="fuel-cache"
-REMOTE_CACHE_ARCH_NAME="fuel_cache-$(md5sum ${BUILD_SPEC}| cut -f1 -d " ")"
+
REMOTE_ACCESS_METHD=curl
INCLUDE_DIR=../include
#
@@ -117,6 +143,14 @@ BUILD_DIR=
BUILD_LOG=
BUILD_VERSION=
MAKE_ARGS=
+FUEL_GIT_SRC="$(make -f ../build/config.mk get-fuel-repo | cut -d " " -f1)"
+FUEL_GIT_BRANCH="$(make -f ../build/config.mk get-fuel-repo | cut -d " " -f2)"
+CACHE_MD5=$(md5sum ../build/cache.mk | cut -f1 -d " ")
+CONFIG_MD5=$(md5sum ../build/config.mk | cut -f1 -d " ")
+FUEL_COMMIT_ID=$(git ls-remote $FUEL_GIT_SRC -t $FUEL_GIT_BRANCH | cut -d $'\t' -f1)
+REMOTE_CACHE_ARCH_HASH_TMP="$(xor $CACHE_MD5 $CONFIG_MD5)"
+REMOTE_CACHE_ARCH_HASH="$(xor $REMOTE_CACHE_ARCH_HASH_TMP $FUEL_COMMIT_ID)"
+REMOTE_CACHE_ARCH_NAME="fuel_cache-$REMOTE_CACHE_ARCH_HASH"
#
# END of script assigned variables
############################################################################
@@ -135,53 +169,53 @@ source ${INCLUDE_DIR}/build.sh.debug
while getopts "s:c:v:f:l:r:RtTh" OPTION
do
case $OPTION in
- h)
- usage
- rc=0
- exit $rc
- ;;
-
- s)
- BUILD_SPEC=${OPTARG}
- ;;
-
- c)
- BUILD_CACHE_URI=${OPTARG}
- ;;
-
- l)
- BUILD_LOG=${OPTARG}
- ;;
-
- v)
- BUILD_VERSION=${OPTARG}
- ;;
-
- f)
- BUILD_FLAGS=${OPTARG}
- ;;
-
- r) REMOTE_ACCESS_METHD=${OPTARG}
- ;;
-
- R)
- RECURSIVE=1
- ;;
-
- t)
- INTEGRATION_TEST=1
- ;;
-
- T)
- INTEGRATION_TEST=1
- FULL_INTEGRATION_TEST=1
- ;;
-
- *)
- echo "${OPTION} is not a valid argument"
- rc=100
- exit $rc
- ;;
+ h)
+ usage
+ rc=0
+ exit $rc
+ ;;
+
+ s)
+ BUILD_SPEC=${OPTARG}
+ ;;
+
+ c)
+ BUILD_CACHE_URI=${OPTARG}
+ ;;
+
+ l)
+ BUILD_LOG=${OPTARG}
+ ;;
+
+ v)
+ BUILD_VERSION=${OPTARG}
+ ;;
+
+ f)
+ BUILD_FLAGS=${OPTARG}
+ ;;
+
+ r) REMOTE_ACCESS_METHD=${OPTARG}
+ ;;
+
+ R)
+ RECURSIVE=1
+ ;;
+
+ t)
+ INTEGRATION_TEST=1
+ ;;
+
+ T)
+ INTEGRATION_TEST=1
+ FULL_INTEGRATION_TEST=1
+ ;;
+
+ *)
+ echo "${OPTION} is not a valid argument"
+ rc=100
+ exit $rc
+ ;;
esac
done
@@ -191,44 +225,44 @@ fi
for ((i=0; i<${#BUILD_FLAGS};i++)); do
case ${BUILD_FLAGS:$i:1} in
- s)
- rc=0
- exit $rc
- ;;
-
- f)
- rc=1
- exit $rc
- ;;
-
- t)
- UNIT_TEST=1
- ;;
-
- i)
- INTERACTIVE=1
- ;;
-
- P)
- POPULATE_CACHE=1
- ;;
-
- d)
- DETACH=1
- echo "Detach is not yet supported - exiting ...."
- rc=100
- exit $rc
- ;;
-
- D)
- DEBUG=1
- ;;
-
- *)
- echo "${BUILD_FLAGS:$i:1} is not a valid build flag - exiting ...."
- rc=100
- exit $rc
- ;;
+ s)
+ rc=0
+ exit $rc
+ ;;
+
+ f)
+ rc=1
+ exit $rc
+ ;;
+
+ t)
+ UNIT_TEST=1
+ ;;
+
+ i)
+ INTERACTIVE=1
+ ;;
+
+ P)
+ POPULATE_CACHE=1
+ ;;
+
+ d)
+ DETACH=1
+ echo "Detach is not yet supported - exiting ...."
+ rc=100
+ exit $rc
+ ;;
+
+ D)
+ DEBUG=1
+ ;;
+
+ *)
+ echo "${BUILD_FLAGS:$i:1} is not a valid build flag - exiting ...."
+ rc=100
+ exit $rc
+ ;;
esac
done
@@ -252,13 +286,13 @@ fi
if [ ! -z ${BUILD_LOG} ]; then
if [[ ${RECURSIVE} -ne 1 ]]; then
- set +e
- eval $0 -R $@ > ${BUILD_LOG} 2>&1
- rc=$?
- set -e
- if [ $rc -ne 0]; then
- exit $rc
- fi
+ set +e
+ eval $0 -R $@ > ${BUILD_LOG} 2>&1
+ rc=$?
+ set -e
+ if [ $rc -ne 0]; then
+ exit $rc
+ fi
fi
fi
@@ -284,47 +318,86 @@ echo $$ > ${LOCK_FILE}
if [ ! -z ${BUILD_CACHE_URI} ]; then
if [ ${POPULATE_CACHE} -ne 1 ]; then
- rm -rf ${CACHE_TMP}/cache
- mkdir -p ${CACHE_TMP}/cache
- echo "Downloading cach file ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME} ..."
- set +e
- ${REMOTE_ACCESS_METHD} -o ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}.tgz
- rc=$?
- set -e
- if [ $rc -ne 0 ]; then
- echo "Remote cache does not exist, or is not accessible - a new cache will be built ..."
- POPULATE_CACHE=1
- else
- echo "Unpacking cache file ..."
- tar -C ${CACHE_TMP}/cache -xvf ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz
- cp ${CACHE_TMP}/cache/cache/.versions ${BUILD_BASE}/.
- set +e
- make -C ${BUILD_BASE} validate-cache;
- rc=$?
- set -e
-
- if [ $rc -ne 0 ]; then
- echo "Cache invalid - a new cache will be built "
- POPULATE_CACHE=1
- else
- cp -rf ${CACHE_TMP}/cache/cache/. ${BUILD_BASE}
- fi
- rm -rf ${CACHE_TMP}/cache
- fi
+ rm -rf ${CACHE_TMP}/cache
+ mkdir -p ${CACHE_TMP}/cache
+ echo "Downloading cache archive ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME} ..."
+ set +e
+ ${REMOTE_ACCESS_METHD} -o ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}.tgz
+ rc=$?
+ set -e
+ if [ $rc -ne 0 ]; then
+ echo "Remote cache does not exist, or is not accessible - a new cache will be built ..."
+ POPULATE_CACHE=1
+ else
+ echo "Unpacking cache archive ..."
+ set +e
+ tar -C ${CACHE_TMP}/cache -xvf ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz
+ rc=$?
+ set -e
+ if [ $rc -ne 0 ]; then
+ echo "WARNING: The cache seems to be corrupt or has trailing garbage, will try to use brute force"
+ echo "Info about the cache below:"
+ set +e
+ file ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz
+ tar -C ${CACHE_TMP}/cache -tvf ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz
+ set -e
+ echo "Current time is: `date`"
+ set +e
+ pushd ${CACHE_TMP}/cache
+ gunzip -dcq ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz | tar -xvf -
+ rc=$?
+ set -e
+ popd
+ if [ $rc -ne 0 ]; then
+ echo "ERROR: Not able to resolve the cache corruption"
+ POPULATE_CACHE=1
+ else
+ echo "The chache corruption was resolved"
+ cp ${CACHE_TMP}/cache/cache/.versions ${BUILD_BASE}/.
+ set +e
+ make -C ${BUILD_BASE} validate-cache;
+ rc=$?
+ set -e
+ if [ $rc -ne 0 ]; then
+ echo "Cache invalid - a new cache will be built "
+ POPULATE_CACHE=1
+ else
+ echo "Cache is up to date and will be used"
+ cp -rf ${CACHE_TMP}/cache/cache/. ${BUILD_BASE}
+ fi
+ fi
+ else
+ echo "Cache archive is intact"
+ cp ${CACHE_TMP}/cache/cache/.versions ${BUILD_BASE}/.
+ set +e
+ make -C ${BUILD_BASE} validate-cache;
+ rc=$?
+ set -e
+
+ if [ $rc -ne 0 ]; then
+ echo "Cache invalid - a new cache will be built "
+ POPULATE_CACHE=1
+ else
+ echo "Cache is up to date and will be used"
+ cp -rf ${CACHE_TMP}/cache/cache/. ${BUILD_BASE}
+ fi
+ fi
+ rm -rf ${CACHE_TMP}/cache
+ fi
fi
fi
if [ ${POPULATE_CACHE} -eq 1 ]; then
if [ ${DEBUG} -eq 0 ]; then
- set +e
- cd ${BUILD_BASE} && make clean
- rc=$?
- set -e
- if [ $rc -ne 0 ]; then
- echo "Build - make clean failed, exiting ..."
- rc=100
- exit $rc
- fi
+ set +e
+ cd ${BUILD_BASE} && make clean
+ rc=$?
+ set -e
+ if [ $rc -ne 0 ]; then
+ echo "Build - make clean failed, exiting ..."
+ rc=100
+ exit $rc
+ fi
fi
fi
@@ -352,12 +425,12 @@ if [ ${DEBUG} -eq 0 ]; then
rc=$?
set -e
if [ $rc -gt 0 ]; then
- echo "Build: make all failed, exiting ..."
- rc=200
- exit $rc
+ echo "Build: make all failed, exiting ..."
+ rc=200
+ exit $rc
fi
else
-debug_make
+ debug_make
fi
set +e
make -C ${BUILD_BASE} prepare-cache
@@ -377,11 +450,20 @@ cp ${RESULT_DIR}/*.iso* ${BUILD_DIR}
if [ $POPULATE_CACHE -eq 1 ]; then
if [ ! -z ${BUILD_CACHE_URI} ]; then
- echo "Building cache ..."
- tar --dereference -C ${BUILD_BASE} -caf ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${CACHE_DIR}
- echo "Uploading cache ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}"
- ${REMOTE_ACCESS_METHD} -T ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}.tgz
- rm ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz
+ echo "Building cache ..."
+ tar --dereference -C ${BUILD_BASE} -caf ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${CACHE_DIR}
+ set +e
+ tar -C ${CACHE_TMP}/cache -tvf ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz
+ rc=$?
+ set -e
+ if [ $rc -ne 0 ]; then
+ echo "WARNING the cache archive generated seems to be corrupt, or containing trailing garbage"
+ else
+ echo "The Cache archive build is intact"
+ fi
+ echo "Uploading cache ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}"
+ ${REMOTE_ACCESS_METHD} -T ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}.tgz
+ rm ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz
fi
fi
echo "Success!!!"
diff --git a/fuel/ci/deploy.sh b/fuel/ci/deploy.sh
index df232497b..d5b70d0d6 100755
--- a/fuel/ci/deploy.sh
+++ b/fuel/ci/deploy.sh
@@ -1,12 +1,8 @@
-#!/bin/bash -x
-set -o xtrace
+#!/bin/bash
set -o errexit
-set -o nounset
-set -o pipefail
-
-WORKSPACE=$(readlink -e ..)
-ISO_LOCATION="$(readlink -f $(find $WORKSPACE -iname 'fuel*iso' -type f))"
-INTERFACE="fuel"
-
-cd "${WORKSPACE}/deploy"
-./deploy_fuel.sh "$ISO_LOCATION" $INTERFACE 2>&1 | tee deploy_fuel.log
+topdir=$(dirname $(readlink -f $BASH_SOURCE))
+deploydir=$(cd ${topdir}/../deploy; pwd)
+pushd ${deploydir} > /dev/null
+echo -e "python deploy.py $@\n"
+python deploy.py $@
+popd > /dev/null \ No newline at end of file
diff --git a/fuel/deploy/README b/fuel/deploy/README
new file mode 100644
index 000000000..167078bf8
--- /dev/null
+++ b/fuel/deploy/README
@@ -0,0 +1,186 @@
+
+======== PREREQUISITES ========
+
+the following dependencies and python modules are required to be installed:
+
+- for Ubuntu:
+
+sudo apt-get install -y libvirt-bin qemu-kvm python-pip fuseiso mkisofs
+sudo apt-get install -y python-dev libz-dev libxml2-dev libxslt-dev
+sudo pip install pyyaml netaddr paramiko lxml scp pycrypto ecdsa
+
+During libvirt install the user is added to the libvirtd group, so you have to
+logout then login back again
+
+
+======== PREPARE and RUN the OPNFV Autodeployment ========
+
+
+--- Step.1 Prepare the DEA and DHA configuration files and the OPNFV ISO file
+
+Make sure that you are using the right DEA - Deployment Environment Adapter and
+DHA - Deployment Hardware Adapter configuration files, the ones provided are only templates
+you will have to modify them according to your needs
+
+- If wou wish to deploy OPNFV cloud environment on top of KVM/Libvirt
+ virtualization use as example the following configuration files:
+
+ * SR1 configuration files
+
+ => templates/virtual_environment/conf/ha
+ dea.yaml
+ dha.yaml
+
+
+ * ARNO configuration files
+
+ => templates/virtual_environment/old_conf/ha
+ dea.yaml
+ dha.yaml
+
+ => templates/virtual_environment/old_conf/multinode
+ dea.yaml
+ dha.yaml
+
+
+- If you wish to deploy OPNFV cloud environment on hardware
+ use as example the following configuration files:
+
+ * SR1 configuration files
+
+ => templates/hardware_environment/conf/ericsson_montreal_lab/ha
+ dea.yaml
+ dha.yaml
+
+ => templates/hardware_environment/conf/linux_foundation_lab/pod1/ha
+ dea.yaml
+ dha.yaml
+
+ => templates/hardware_environment/conf/linux_foundation_lab/pod2/ha
+ dea.yaml
+ dha.yaml
+
+
+ * ARNO configuration files
+
+ => templates/hardware_environment/old_conf/ericsson_montreal_lab/ha
+ dea.yaml
+ dha.yaml
+
+ => templates/hardware_environment/old_conf/ericsson_montreal_lab/multinode
+ dea.yaml
+ dha.yaml
+
+ => templates/hardware_environment/old_conf/linux_foundation_lab/ha
+ dea.yaml
+ dha.yaml
+
+ => templates/hardware_environment/old_conf/linux_foundation_lab/multinode
+ dea.yaml
+ dha.yaml
+
+
+--- Step.2 Run Autodeployment ---
+
+usage: python deploy.py [-h] [-nf] [-nh] [-fo] [-co] [-c] [-iso [ISO_FILE]]
+ [-dea [DEA_FILE]] [-dha [DHA_FILE]] [-s STORAGE_DIR]
+ [-b PXE_BRIDGE] [-p FUEL_PLUGINS_DIR]
+
+optional arguments:
+ -h, --help show this help message and exit
+ -nf Do not install Fuel Master (and Node VMs when using libvirt)
+ -nh Don't run health check after deployment
+ -fo Install Fuel Master only (and Node VMs when using libvirt)
+ -co Cleanup VMs and Virtual Networks according to what is
+ defined in DHA
+ -c Cleanup after deploy
+ -iso [ISO_FILE] ISO File [default: OPNFV.iso]
+ -dea [DEA_FILE] Deployment Environment Adapter: dea.yaml
+ -dha [DHA_FILE] Deployment Hardware Adapter: dha.yaml
+ -s STORAGE_DIR Storage Directory [default: images]
+ -b PXE_BRIDGE Linux Bridge for booting up the Fuel Master VM
+ [default: pxebr]
+ -p FUEL_PLUGINS_DIR Fuel Plugins directory
+
+
+* EXAMPLES:
+
+- Install Fuel Master and deploy OPNFV Cloud from scratch on Hardware Environment:
+
+ sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/hardware/dea.yaml -dha ~/CONF/hardware/dha.yaml -s /mnt/images -b pxebr
+
+
+- Install Fuel Master and deploy OPNFV Cloud from scratch on Virtual Environment:
+
+ sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -s /mnt/images
+
+
+- Deploy OPNFV Cloud on an already active Environment where Fuel Master VM is running so no need to install Fuel again:
+
+ sudo python deploy.py -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml
+
+ => with plugin installation
+ sudo python deploy.py -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml
+
+ => with cleanup after deployment is finished
+ sudo python deploy.py -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -c
+
+ => no healthcheck after deployment is completed
+ sudo python deploy.py -nf -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -nh
+
+
+- Install Fuel Master only (and Node VMs when using virtual environment):
+
+ => for virtual environment:
+ sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -s /mnt/images
+
+ => for hardware environment:
+ sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/hardware/dea.yaml -dha ~/CONF/hardware/dha.yaml -s /mnt/images -b pxebr
+
+
+- Cleanup a running OPNFV environment:
+
+ sudo python deploy.py -co -dha ~/CONF/virtual/dha.yaml
+
+
+* WARNINGS:
+
+=> If optional argument -s <storage_dir> is not specified, Autodeployment will use
+"<current_working_dir>/images" as default, and it will create it, if it hasn't been created before
+
+=> If optional argument -b <pxe_bridge> is not specified, Autodeployment will use "pxebr" as default,
+if the bridge does not exist, the application will terminate with an error message
+
+=> If argument -iso [ISO_FILE] is not specified, Autodeployment will use "<current_working_dir>/OPNFV.iso"
+as default, if the iso file does not exist, the application will terminate with an error message
+
+=> If argument -dea [DEA_FILE] is not specified, Autodeployment will use "<current_working_dir>/dea.yaml"
+as default, if DEA file does not exist, the application will terminate with an error message
+
+=> If argument -dha [DHA_FILE] is not specified, Autodeployment will use "<current_working_dir>/dha.yaml"
+as default, if DHA file does not exist, the application will terminate with an error message
+
+=> Optional argument -b PXE_BRIDGE is not required for Autodeployment in virtual environment,
+ even if it is specified it will not be used at all because virtual environment is using a different virtual network setup
+
+=> If optional argument -p FUEL_PLUGINS_DIR is not specified, no external plugins will be installed in Fuel
+
+
+--- Networking considerations ---
+
+For Virtual Environment:
+
+There are some NAT, IPTABLE conflicts on the edge of libvirt bridging and Fuel Master
+according to http://wiki.libvirt.org/page/Networking
+netfilter on the bridges should be disabled
+
+Add these lines to /etc/sysctl.conf
+
+cat >> /etc/sysctl.conf <<EOF
+net.bridge.bridge-nf-call-ip6tables = 0
+net.bridge.bridge-nf-call-iptables = 0
+net.bridge.bridge-nf-call-arptables = 0
+EOF
+
+and then reload configuration:
+sysctl -p /etc/sysctl.conf
diff --git a/fuel/deploy/README.txt b/fuel/deploy/README.txt
deleted file mode 100644
index d392f8f65..000000000
--- a/fuel/deploy/README.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-
-======== How to prepare and run the OPNFV Autodeployment =======
-
-in fuel/build/deploy run these:
-
-
-
---- Step.1 Install prerequisites
-
-sudo ./install-ubuntu-packages.sh
-
-
-
-
-
-
---- Step.2-A If wou want to deploy OPNFV cloud environment on top of KVM/Libvirt virtualization
- run the following environment setup script
-
-sudo python setup_environment.py <storage_directory> <path_to_dha_file>
-
-Example:
- sudo python setup_environment.py /mnt/images dha.yaml
-
-
-
-
-
-
---- Step.2-B If you want to deploy OPNFV cloud environment on baremetal run the
- following environment setup script
-
-sudo python setup_vfuel.py <storage_directory> <path_to_dha_file>
-
-Example:
- sudo python setup_vfuel.py /mnt/images dha.yaml
-
-
-WARNING!:
-setup_vfuel.py adds the following snippet into /etc/network/interfaces
-making sure to replace in setup_vfuel.py interfafe 'p1p1.20' with your actual outbound
-interface in order to provide network access to the Fuel master for DNS and NTP.
-
-iface vfuelnet inet static
- bridge_ports em1
- address 10.40.0.1
- netmask 255.255.255.0
- pre-down iptables -t nat -D POSTROUTING --out-interface p1p1.20 -j MASQUERADE -m comment --comment "vfuelnet"
- pre-down iptables -D FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet"
- post-up iptables -t nat -A POSTROUTING --out-interface p1p1.20 -j MASQUERADE -m comment --comment "vfuelnet"
- post-up iptables -A FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet"
-
-
-
-
-
-
---- Step.3 Start Autodeployment
-Make sure you use the right Deployment Environment Adapter and
-Deployment Hardware Adaper configuration files:
-
- - for baremetal: baremetal/dea.yaml baremetal/dha.yaml
-
- - for libvirt: libvirt/dea.yaml libvirt/dha.yaml
-
-
-sudo python deploy.py [-nf] <isofile> <deafile> <dhafile>
-
-Example:
- sudo python deploy.py ~/ISO/opnfv.iso baremetal/dea.yaml baremetal/dha.yaml
-
diff --git a/fuel/deploy/__init__.py b/fuel/deploy/__init__.py
index e69de29bb..fb73157f9 100644
--- a/fuel/deploy/__init__.py
+++ b/fuel/deploy/__init__.py
@@ -0,0 +1,8 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
diff --git a/fuel/deploy/baremetal/dea.yaml b/fuel/deploy/baremetal/dea.yaml
deleted file mode 100644
index eb3019cab..000000000
--- a/fuel/deploy/baremetal/dea.yaml
+++ /dev/null
@@ -1,982 +0,0 @@
-title: Deployment Environment Adapter (DEA)
-# DEA API version supported
-version: 1.1
-created: Tue May 5 15:33:07 UTC 2015
-comment: Test environment Ericsson Montreal
-environment_name: opnfv
-environment_mode: multinode
-wanted_release: Juno on Ubuntu 12.04.4
-nodes:
-- id: 1
- interfaces: interface1
- transformations: controller1
- role: controller
-- id: 2
- interfaces: interface1
- transformations: compute1
- role: compute
-fuel:
- ADMIN_NETWORK:
- ipaddress: 10.40.0.2
- netmask: 255.255.255.0
- dhcp_pool_start: 10.40.0.3
- dhcp_pool_end: 10.40.0.254
- DNS_UPSTREAM: 10.118.32.193
- DNS_DOMAIN: opnfvericsson.ca
- DNS_SEARCH: opnfvericsson.ca
- FUEL_ACCESS:
- user: admin
- password: admin
- HOSTNAME: opnfv
- NTP1: 0.ca.pool.ntp.org
- NTP2: 1.ca.pool.ntp.org
- NTP3: 2.ca.pool.ntp.org
-interfaces:
- interface1:
- eth0:
- - fuelweb_admin
- eth2:
- - public
- - management
- - storage
- - private
-transformations:
- controller1:
- - action: add-br
- name: br-eth0
- - action: add-port
- bridge: br-eth0
- name: eth0
- - action: add-br
- name: br-eth1
- - action: add-port
- bridge: br-eth1
- name: eth1
- - action: add-br
- name: br-eth2
- - action: add-port
- bridge: br-eth2
- name: eth2
- - action: add-br
- name: br-eth3
- - action: add-port
- bridge: br-eth3
- name: eth3
- - action: add-br
- name: br-eth4
- - action: add-port
- bridge: br-eth4
- name: eth4
- - action: add-br
- name: br-eth5
- - action: add-port
- bridge: br-eth5
- name: eth5
- - action: add-br
- name: br-ex
- - action: add-br
- name: br-mgmt
- - action: add-br
- name: br-storage
- - action: add-br
- name: br-fw-admin
- - action: add-patch
- bridges:
- - br-eth2
- - br-storage
- tags:
- - 220
- - 0
- vlan_ids:
- - 220
- - 0
- - action: add-patch
- bridges:
- - br-eth2
- - br-mgmt
- tags:
- - 320
- - 0
- vlan_ids:
- - 320
- - 0
- - action: add-patch
- bridges:
- - br-eth0
- - br-fw-admin
- trunks:
- - 0
- - action: add-patch
- bridges:
- - br-eth2
- - br-ex
- tags:
- - 120
- - 0
- vlan_ids:
- - 120
- - 0
- - action: add-br
- name: br-prv
- - action: add-patch
- bridges:
- - br-eth2
- - br-prv
- compute1:
- - action: add-br
- name: br-eth0
- - action: add-port
- bridge: br-eth0
- name: eth0
- - action: add-br
- name: br-eth1
- - action: add-port
- bridge: br-eth1
- name: eth1
- - action: add-br
- name: br-eth2
- - action: add-port
- bridge: br-eth2
- name: eth2
- - action: add-br
- name: br-eth3
- - action: add-port
- bridge: br-eth3
- name: eth3
- - action: add-br
- name: br-eth4
- - action: add-port
- bridge: br-eth4
- name: eth4
- - action: add-br
- name: br-eth5
- - action: add-port
- bridge: br-eth5
- name: eth5
- - action: add-br
- name: br-mgmt
- - action: add-br
- name: br-storage
- - action: add-br
- name: br-fw-admin
- - action: add-patch
- bridges:
- - br-eth2
- - br-storage
- tags:
- - 220
- - 0
- vlan_ids:
- - 220
- - 0
- - action: add-patch
- bridges:
- - br-eth2
- - br-mgmt
- tags:
- - 320
- - 0
- vlan_ids:
- - 320
- - 0
- - action: add-patch
- bridges:
- - br-eth0
- - br-fw-admin
- trunks:
- - 0
- - action: add-br
- name: br-prv
- - action: add-patch
- bridges:
- - br-eth2
- - br-prv
-opnfv:
- compute: {}
- controller: {}
-network:
- networking_parameters:
- base_mac: fa:16:3e:00:00:00
- dns_nameservers:
- - 10.118.32.193
- - 8.8.8.8
- floating_ranges:
- - - 172.16.0.130
- - 172.16.0.254
- gre_id_range:
- - 2
- - 65535
- internal_cidr: 192.168.111.0/24
- internal_gateway: 192.168.111.1
- net_l23_provider: ovs
- segmentation_type: vlan
- vlan_range:
- - 2022
- - 2023
- networks:
- - cidr: 172.16.0.0/24
- gateway: 172.16.0.1
- ip_ranges:
- - - 172.16.0.2
- - 172.16.0.126
- meta:
- assign_vip: true
- cidr: 172.16.0.0/24
- configurable: true
- floating_range_var: floating_ranges
- ip_range:
- - 172.16.0.2
- - 172.16.0.126
- map_priority: 1
- name: public
- notation: ip_ranges
- render_addr_mask: public
- render_type: null
- use_gateway: true
- vlan_start: null
- name: public
- vlan_start: 120
- - cidr: 192.168.0.0/24
- gateway: null
- ip_ranges:
- - - 192.168.0.2
- - 192.168.0.254
- meta:
- assign_vip: true
- cidr: 192.168.0.0/24
- configurable: true
- map_priority: 2
- name: management
- notation: cidr
- render_addr_mask: internal
- render_type: cidr
- use_gateway: false
- vlan_start: 101
- name: management
- vlan_start: 320
- - cidr: 192.168.1.0/24
- gateway: null
- ip_ranges:
- - - 192.168.1.2
- - 192.168.1.254
- meta:
- assign_vip: false
- cidr: 192.168.1.0/24
- configurable: true
- map_priority: 2
- name: storage
- notation: cidr
- render_addr_mask: storage
- render_type: cidr
- use_gateway: false
- vlan_start: 102
- name: storage
- vlan_start: 220
- - cidr: null
- gateway: null
- ip_ranges: []
- meta:
- assign_vip: false
- configurable: false
- map_priority: 2
- name: private
- neutron_vlan_range: true
- notation: null
- render_addr_mask: null
- render_type: null
- seg_type: vlan
- use_gateway: false
- vlan_start: null
- name: private
- vlan_start: null
- - cidr: 10.40.0.0/24
- gateway: null
- ip_ranges:
- - - 10.40.0.3
- - 10.40.0.254
- meta:
- assign_vip: false
- configurable: false
- map_priority: 0
- notation: ip_ranges
- render_addr_mask: null
- render_type: null
- unmovable: true
- use_gateway: true
- name: fuelweb_admin
- vlan_start: null
-settings:
- editable:
- access:
- email:
- description: Email address for Administrator
- label: email
- type: text
- value: admin@localhost
- weight: 40
- metadata:
- label: Access
- weight: 10
- password:
- description: Password for Administrator
- label: password
- type: password
- value: admin
- weight: 20
- tenant:
- description: Tenant (project) name for Administrator
- label: tenant
- regex:
- error: Invalid tenant name
- source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
- type: text
- value: admin
- weight: 30
- user:
- description: Username for Administrator
- label: username
- regex:
- error: Invalid username
- source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
- type: text
- value: admin
- weight: 10
- additional_components:
- ceilometer:
- description: If selected, Ceilometer component will be installed
- label: Install Ceilometer
- type: checkbox
- value: false
- weight: 40
- heat:
- description: ''
- label: ''
- type: hidden
- value: true
- weight: 30
- metadata:
- label: Additional Components
- weight: 20
- murano:
- description: If selected, Murano component will be installed
- label: Install Murano
- restrictions:
- - cluster:net_provider != 'neutron'
- type: checkbox
- value: false
- weight: 20
- sahara:
- description: If selected, Sahara component will be installed
- label: Install Sahara
- type: checkbox
- value: false
- weight: 10
- common:
- auth_key:
- description: Public key(s) to include in authorized_keys on deployed nodes
- label: Public Key
- type: text
- value: ''
- weight: 70
- auto_assign_floating_ip:
- description: If selected, OpenStack will automatically assign a floating IP
- to a new instance
- label: Auto assign floating IP
- restrictions:
- - cluster:net_provider == 'neutron'
- type: checkbox
- value: false
- weight: 40
- compute_scheduler_driver:
- label: Scheduler driver
- type: radio
- value: nova.scheduler.filter_scheduler.FilterScheduler
- values:
- - data: nova.scheduler.filter_scheduler.FilterScheduler
- description: Currently the most advanced OpenStack scheduler. See the OpenStack
- documentation for details.
- label: Filter scheduler
- - data: nova.scheduler.simple.SimpleScheduler
- description: This is 'naive' scheduler which tries to find the least loaded
- host
- label: Simple scheduler
- weight: 40
- debug:
- description: Debug logging mode provides more information, but requires more
- disk space.
- label: OpenStack debug logging
- type: checkbox
- value: false
- weight: 20
- disable_offload:
- description: If set, generic segmentation offload (gso) and generic receive
- offload (gro) on physical nics will be disabled. See ethtool man.
- label: Disable generic offload on physical nics
- restrictions:
- - action: hide
- condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type
- == 'gre'
- type: checkbox
- value: true
- weight: 80
- libvirt_type:
- label: Hypervisor type
- type: radio
- value: kvm
- values:
- - data: kvm
- description: Choose this type of hypervisor if you run OpenStack on hardware
- label: KVM
- restrictions:
- - settings:common.libvirt_type.value == 'vcenter'
- - data: qemu
- description: Choose this type of hypervisor if you run OpenStack on virtual
- hosts.
- label: QEMU
- restrictions:
- - settings:common.libvirt_type.value == 'vcenter'
- - data: vcenter
- description: Choose this type of hypervisor if you run OpenStack in a vCenter
- environment.
- label: vCenter
- restrictions:
- - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider
- == 'neutron'
- weight: 30
- metadata:
- label: Common
- weight: 30
- nova_quota:
- description: Quotas are used to limit CPU and memory usage for tenants. Enabling
- quotas will increase load on the Nova database.
- label: Nova quotas
- type: checkbox
- value: false
- weight: 25
- resume_guests_state_on_host_boot:
- description: Whether to resume previous guests state when the host reboots.
- If enabled, this option causes guests assigned to the host to resume their
- previous state. If the guest was running a restart will be attempted when
- nova-compute starts. If the guest was not running previously, a restart
- will not be attempted.
- label: Resume guests state on host boot
- type: checkbox
- value: true
- weight: 60
- use_cow_images:
- description: For most cases you will want qcow format. If it's disabled, raw
- image format will be used to run VMs. OpenStack with raw format currently
- does not support snapshotting.
- label: Use qcow format for images
- type: checkbox
- value: true
- weight: 50
- corosync:
- group:
- description: ''
- label: Group
- type: text
- value: 226.94.1.1
- weight: 10
- metadata:
- label: Corosync
- restrictions:
- - action: hide
- condition: 'true'
- weight: 50
- port:
- description: ''
- label: Port
- type: text
- value: '12000'
- weight: 20
- verified:
- description: Set True only if multicast is configured correctly on router.
- label: Need to pass network verification.
- type: checkbox
- value: false
- weight: 10
- external_dns:
- dns_list:
- description: List of upstream DNS servers, separated by comma
- label: DNS list
- type: text
- value: 10.118.32.193, 8.8.8.8
- weight: 10
- metadata:
- label: Upstream DNS
- weight: 90
- external_ntp:
- metadata:
- label: Upstream NTP
- weight: 100
- ntp_list:
- description: List of upstream NTP servers, separated by comma
- label: NTP servers list
- type: text
- value: 0.pool.ntp.org, 1.pool.ntp.org
- weight: 10
- kernel_params:
- kernel:
- description: Default kernel parameters
- label: Initial parameters
- type: text
- value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset
- weight: 45
- metadata:
- label: Kernel parameters
- weight: 40
- neutron_mellanox:
- metadata:
- enabled: true
- label: Mellanox Neutron components
- toggleable: false
- weight: 50
- plugin:
- label: Mellanox drivers and SR-IOV plugin
- type: radio
- value: disabled
- values:
- - data: disabled
- description: If selected, Mellanox drivers, Neutron and Cinder plugin will
- not be installed.
- label: Mellanox drivers and plugins disabled
- restrictions:
- - settings:storage.iser.value == true
- - data: drivers_only
- description: If selected, Mellanox Ethernet drivers will be installed to
- support networking over Mellanox NIC. Mellanox Neutron plugin will not
- be installed.
- label: Install only Mellanox drivers
- restrictions:
- - settings:common.libvirt_type.value != 'kvm'
- - data: ethernet
- description: If selected, both Mellanox Ethernet drivers and Mellanox network
- acceleration (Neutron) plugin will be installed.
- label: Install Mellanox drivers and SR-IOV plugin
- restrictions:
- - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
- == 'neutron' and networking_parameters:segmentation_type == 'vlan')
- weight: 60
- vf_num:
- description: Note that one virtual function will be reserved to the storage
- network, in case of choosing iSER.
- label: Number of virtual NICs
- restrictions:
- - settings:neutron_mellanox.plugin.value != 'ethernet'
- type: text
- value: '16'
- weight: 70
- nsx_plugin:
- connector_type:
- description: Default network transport type to use
- label: NSX connector type
- type: select
- value: stt
- values:
- - data: gre
- label: GRE
- - data: ipsec_gre
- label: GRE over IPSec
- - data: stt
- label: STT
- - data: ipsec_stt
- label: STT over IPSec
- - data: bridge
- label: Bridge
- weight: 80
- l3_gw_service_uuid:
- description: UUID for the default L3 gateway service to use with this cluster
- label: L3 service UUID
- regex:
- error: Invalid L3 gateway service UUID
- source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
- type: text
- value: ''
- weight: 50
- metadata:
- enabled: false
- label: VMware NSX
- restrictions:
- - action: hide
- condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider
- != 'nsx'
- weight: 20
- nsx_controllers:
- description: One or more IPv4[:port] addresses of NSX controller node, separated
- by comma (e.g. 10.40.30.2,192.168.110.254:443)
- label: NSX controller endpoint
- regex:
- error: Invalid controller endpoints, specify valid IPv4[:port] pair
- source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$
- type: text
- value: ''
- weight: 60
- nsx_password:
- description: Password for Administrator
- label: NSX password
- regex:
- error: Empty password
- source: \S
- type: password
- value: ''
- weight: 30
- nsx_username:
- description: NSX administrator's username
- label: NSX username
- regex:
- error: Empty username
- source: \S
- type: text
- value: admin
- weight: 20
- packages_url:
- description: URL to NSX specific packages
- label: URL to NSX bits
- regex:
- error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g.
- http://10.20.0.2/nsx)
- source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$
- type: text
- value: ''
- weight: 70
- replication_mode:
- description: ''
- label: NSX cluster has Service nodes
- type: checkbox
- value: true
- weight: 90
- transport_zone_uuid:
- description: UUID of the pre-existing default NSX Transport zone
- label: Transport zone UUID
- regex:
- error: Invalid transport zone UUID
- source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
- type: text
- value: ''
- weight: 40
- provision:
- metadata:
- label: Provision
- restrictions:
- - action: hide
- condition: not ('experimental' in version:feature_groups)
- weight: 80
- method:
- description: Which provision method to use for this cluster.
- label: Provision method
- type: radio
- value: cobbler
- values:
- - data: image
- description: Copying pre-built images on a disk.
- label: Image
- - data: cobbler
- description: Install from scratch using anaconda or debian-installer.
- label: Classic (use anaconda or debian-installer)
- public_network_assignment:
- assign_to_all_nodes:
- description: When disabled, public network will be assigned to controllers
- and zabbix-server only
- label: Assign public network to all nodes
- type: checkbox
- value: false
- weight: 10
- metadata:
- label: Public network assignment
- restrictions:
- - action: hide
- condition: cluster:net_provider != 'neutron'
- weight: 50
- storage:
- ephemeral_ceph:
- description: Configures Nova to store ephemeral volumes in RBD. This works
- best if Ceph is enabled for volumes and images, too. Enables live migration
- of all types of Ceph backed VMs (without this option, live migration will
- only work with VMs launched from Cinder volumes).
- label: Ceph RBD for ephemeral volumes (Nova)
- restrictions:
- - settings:common.libvirt_type.value == 'vcenter'
- type: checkbox
- value: false
- weight: 75
- images_ceph:
- description: Configures Glance to use the Ceph RBD backend to store images.
- If enabled, this option will prevent Swift from installing.
- label: Ceph RBD for images (Glance)
- type: checkbox
- value: false
- weight: 30
- images_vcenter:
- description: Configures Glance to use the vCenter/ESXi backend to store images.
- If enabled, this option will prevent Swift from installing.
- label: VMWare vCenter/ESXi datastore for images (Glance)
- restrictions:
- - settings:common.libvirt_type.value != 'vcenter'
- type: checkbox
- value: false
- weight: 35
- iser:
- description: 'High performance block storage: Cinder volumes over iSER protocol
- (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC,
- and will use a dedicated virtual function for the storage network.'
- label: iSER protocol for volumes (Cinder)
- restrictions:
- - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
- != 'kvm'
- type: checkbox
- value: false
- weight: 11
- metadata:
- label: Storage
- weight: 60
- objects_ceph:
- description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
- Swift API Interfaces. If enabled, this option will prevent Swift from installing.
- label: Ceph RadosGW for objects (Swift API)
- restrictions:
- - settings:storage.images_ceph.value == false
- type: checkbox
- value: false
- weight: 80
- osd_pool_size:
- description: Configures the default number of object replicas in Ceph. This
- number must be equal to or lower than the number of deployed 'Storage -
- Ceph OSD' nodes.
- label: Ceph object replication factor
- regex:
- error: Invalid number
- source: ^[1-9]\d*$
- restrictions:
- - settings:common.libvirt_type.value == 'vcenter'
- type: text
- value: '2'
- weight: 85
- vc_datacenter:
- description: Inventory path to a datacenter. If you want to use ESXi host
- as datastore, it should be "ha-datacenter".
- label: Datacenter name
- regex:
- error: Empty datacenter
- source: \S
- restrictions:
- - action: hide
- condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
- != 'vcenter'
- type: text
- value: ''
- weight: 65
- vc_datastore:
- description: Datastore associated with the datacenter.
- label: Datastore name
- regex:
- error: Empty datastore
- source: \S
- restrictions:
- - action: hide
- condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
- != 'vcenter'
- type: text
- value: ''
- weight: 60
- vc_host:
- description: IP Address of vCenter/ESXi
- label: vCenter/ESXi IP
- regex:
- error: Specify valid IPv4 address
- source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
- restrictions:
- - action: hide
- condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
- != 'vcenter'
- type: text
- value: ''
- weight: 45
- vc_image_dir:
- description: The name of the directory where the glance images will be stored
- in the VMware datastore.
- label: Datastore Images directory
- regex:
- error: Empty images directory
- source: \S
- restrictions:
- - action: hide
- condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
- != 'vcenter'
- type: text
- value: /openstack_glance
- weight: 70
- vc_password:
- description: vCenter/ESXi admin password
- label: Password
- regex:
- error: Empty password
- source: \S
- restrictions:
- - action: hide
- condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
- != 'vcenter'
- type: password
- value: ''
- weight: 55
- vc_user:
- description: vCenter/ESXi admin username
- label: Username
- regex:
- error: Empty username
- source: \S
- restrictions:
- - action: hide
- condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
- != 'vcenter'
- type: text
- value: ''
- weight: 50
- volumes_ceph:
- description: Configures Cinder to store volumes in Ceph RBD images.
- label: Ceph RBD for volumes (Cinder)
- restrictions:
- - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value
- == 'vcenter'
- type: checkbox
- value: false
- weight: 20
- volumes_lvm:
- description: Requires at least one Storage - Cinder LVM node.
- label: Cinder LVM over iSCSI for volumes
- restrictions:
- - settings:storage.volumes_ceph.value == true
- type: checkbox
- value: false
- weight: 10
- volumes_vmdk:
- description: Configures Cinder to store volumes via VMware vCenter.
- label: VMware vCenter for volumes (Cinder)
- restrictions:
- - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value
- == true
- type: checkbox
- value: false
- weight: 15
- syslog:
- metadata:
- label: Syslog
- weight: 50
- syslog_port:
- description: Remote syslog port
- label: Port
- regex:
- error: Invalid Syslog port
- source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
- type: text
- value: '514'
- weight: 20
- syslog_server:
- description: Remote syslog hostname
- label: Hostname
- type: text
- value: ''
- weight: 10
- syslog_transport:
- label: Syslog transport protocol
- type: radio
- value: tcp
- values:
- - data: udp
- description: ''
- label: UDP
- - data: tcp
- description: ''
- label: TCP
- weight: 30
- vcenter:
- cluster:
- description: vCenter cluster name. If you have multiple clusters, use comma
- to separate names
- label: Cluster
- regex:
- error: Invalid cluster list
- source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$
- type: text
- value: ''
- weight: 40
- datastore_regex:
- description: The Datastore regexp setting specifies the data stores to use
- with Compute. For example, "nas.*". If you want to use all available datastores,
- leave this field blank
- label: Datastore regexp
- regex:
- error: Invalid datastore regexp
- source: ^(\S.*\S|\S|)$
- type: text
- value: ''
- weight: 50
- host_ip:
- description: IP Address of vCenter
- label: vCenter IP
- regex:
- error: Specify valid IPv4 address
- source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
- type: text
- value: ''
- weight: 10
- metadata:
- label: vCenter
- restrictions:
- - action: hide
- condition: settings:common.libvirt_type.value != 'vcenter'
- weight: 20
- use_vcenter:
- description: ''
- label: ''
- type: hidden
- value: true
- weight: 5
- vc_password:
- description: vCenter admin password
- label: Password
- regex:
- error: Empty password
- source: \S
- type: password
- value: admin
- weight: 30
- vc_user:
- description: vCenter admin username
- label: Username
- regex:
- error: Empty username
- source: \S
- type: text
- value: admin
- weight: 20
- vlan_interface:
- description: Physical ESXi host ethernet adapter for VLAN networking (e.g.
- vmnic1). If empty "vmnic0" is used by default
- label: ESXi VLAN interface
- restrictions:
- - action: hide
- condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager
- != 'VlanManager'
- type: text
- value: ''
- weight: 60
- zabbix:
- metadata:
- label: Zabbix Access
- restrictions:
- - action: hide
- condition: not ('experimental' in version:feature_groups)
- weight: 70
- password:
- description: Password for Zabbix Administrator
- label: password
- type: password
- value: zabbix
- weight: 20
- username:
- description: Username for Zabbix Administrator
- label: username
- type: text
- value: admin
- weight: 10
diff --git a/fuel/deploy/baremetal/dha.yaml b/fuel/deploy/baremetal/dha.yaml
deleted file mode 100644
index 6240f0794..000000000
--- a/fuel/deploy/baremetal/dha.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-title: Deployment Hardware Adapter (DHA)
-# DHA API version supported
-version: 1.1
-created: Mon May 4 09:03:46 UTC 2015
-comment: Test environment Ericsson Montreal
-
-# Adapter to use for this definition
-adapter: ipmi
-
-# Node list.
-# Mandatory properties are id and role.
-# The MAC address of the PXE boot interface for Fuel is not
-# mandatory to be defined.
-# All other properties are adapter specific.
-
-nodes:
-- id: 1
- pxeMac: 14:58:D0:54:7A:28
- ipmiIp: 10.118.32.205
- ipmiUser: username
- ipmiPass: password
-- id: 2
- pxeMac: 14:58:D0:55:E2:E0
- ipmiIp: 10.118.32.202
- ipmiUser: username
- ipmiPass: password
-# Adding the Fuel node as node id 3 which may not be correct - please
-# adjust as needed.
-- id: 3
- libvirtName: vFuel
- libvirtTemplate: vFuel
- isFuel: yes
- username: root
- password: r00tme
-
-# Deployment power on strategy
-# all: Turn on all nodes at once. There will be no correlation
-# between the DHA and DEA node numbering. MAC addresses
-# will be used to select the node roles though.
-# sequence: Turn on the nodes in sequence starting with the lowest order
-# node and wait for the node to be detected by Fuel. Not until
-# the node has been detected and assigned a role will the next
-# node be turned on.
-powerOnStrategy: sequence
-
-# If fuelCustomInstall is set to true, Fuel is assumed to be installed by
-# calling the DHA adapter function "dha_fuelCustomInstall()" with two
-# arguments: node ID and the ISO file name to deploy. The custom install
-# function is then to handle all necessary logic to boot the Fuel master
-# from the ISO and then return.
-# Allowed values: true, false
-fuelCustomInstall: true
-
diff --git a/fuel/deploy/cloud/configure_environment.py b/fuel/deploy/cloud/configure_environment.py
index d0037d729..2d68c1ba8 100644
--- a/fuel/deploy/cloud/configure_environment.py
+++ b/fuel/deploy/cloud/configure_environment.py
@@ -1,6 +1,13 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
import common
-import os
-import shutil
from configure_settings import ConfigureSettings
from configure_network import ConfigureNetwork
@@ -14,6 +21,9 @@ exec_cmd = common.exec_cmd
parse = common.parse
err = common.err
log = common.log
+delete = common.delete
+create_dir_if_not_exists = common.create_dir_if_not_exists
+
class ConfigureEnvironment(object):
@@ -21,7 +31,6 @@ class ConfigureEnvironment(object):
self.env_id = None
self.dea = dea
self.yaml_config_dir = yaml_config_dir
- self.env_name = self.dea.get_property('environment_name')
self.release_id = release_id
self.node_id_roles_dict = node_id_roles_dict
self.required_networks = []
@@ -36,21 +45,20 @@ class ConfigureEnvironment(object):
def configure_environment(self):
log('Configure environment')
- if os.path.exists(self.yaml_config_dir):
- log('Deleting existing config directory %s' % self.yaml_config_dir)
- shutil.rmtree(self.yaml_config_dir)
- log('Creating new config directory %s' % self.yaml_config_dir)
- os.makedirs(self.yaml_config_dir)
-
- mode = self.dea.get_property('environment_mode')
+ delete(self.yaml_config_dir)
+ create_dir_if_not_exists(self.yaml_config_dir)
+ env_name = self.dea.get_env_name()
+ env_mode = self.dea.get_env_mode()
+ env_net_segment_type = self.dea.get_env_net_segment_type()
log('Creating environment %s release %s, mode %s, network-mode neutron'
- ', net-segment-type vlan' % (self.env_name, self.release_id, mode))
+ ', net-segment-type %s'
+ % (env_name, self.release_id, env_mode, env_net_segment_type))
exec_cmd('fuel env create --name %s --release %s --mode %s '
- '--network-mode neutron --net-segment-type vlan'
- % (self.env_name, self.release_id, mode))
+ '--network-mode neutron --net-segment-type %s'
+ % (env_name, self.release_id, env_mode, env_net_segment_type))
- if not self.env_exists(self.env_name):
- err('Failed to create environment %s' % self.env_name)
+ if not self.env_exists(env_name):
+ err('Failed to create environment %s' % env_name)
self.config_settings()
self.config_network()
self.config_nodes()
@@ -68,6 +76,3 @@ class ConfigureEnvironment(object):
nodes = ConfigureNodes(self.yaml_config_dir, self.env_id,
self.node_id_roles_dict, self.dea)
nodes.config_nodes()
-
-
-
diff --git a/fuel/deploy/cloud/configure_network.py b/fuel/deploy/cloud/configure_network.py
index 295eb90bd..00278949d 100644
--- a/fuel/deploy/cloud/configure_network.py
+++ b/fuel/deploy/cloud/configure_network.py
@@ -1,3 +1,13 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+
import common
import yaml
import io
@@ -11,6 +21,8 @@ parse = common.parse
err = common.err
check_file_exists = common.check_file_exists
log = common.log
+backup = common.backup
+
class ConfigureNetwork(object):
@@ -41,6 +53,7 @@ class ConfigureNetwork(object):
network_yaml = ('%s/network_%s.yaml'
% (self.yaml_config_dir, self.env_id))
check_file_exists(network_yaml)
+ backup(network_yaml)
network_config = self.dea.get_property('network')
@@ -58,4 +71,4 @@ class ConfigureNetwork(object):
network.update(net_id[network['name']])
with io.open(network_yaml, 'w') as stream:
- yaml.dump(network_config, stream, default_flow_style=False) \ No newline at end of file
+ yaml.dump(network_config, stream, default_flow_style=False)
diff --git a/fuel/deploy/cloud/configure_nodes.py b/fuel/deploy/cloud/configure_nodes.py
index 4d1315a5c..e76d222c0 100644
--- a/fuel/deploy/cloud/configure_nodes.py
+++ b/fuel/deploy/cloud/configure_nodes.py
@@ -1,3 +1,13 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+
import common
import yaml
import io
@@ -12,6 +22,7 @@ parse = common.parse
err = common.err
check_file_exists = common.check_file_exists
log = common.log
+backup = common.backup
class ConfigureNodes(object):
@@ -26,7 +37,7 @@ class ConfigureNodes(object):
log('Configure nodes')
for node_id, roles_blade in self.node_id_roles_dict.iteritems():
exec_cmd('fuel node set --node-id %s --role %s --env %s'
- % (node_id, ','.join(roles_blade[0]), self.env_id))
+ % (node_id, roles_blade[0], self.env_id))
self.download_deployment_config()
for node_id, roles_blade in self.node_id_roles_dict.iteritems():
@@ -37,22 +48,20 @@ class ConfigureNodes(object):
self.upload_deployment_config()
def modify_node_network_schemes(self, node_id, roles_blade):
- log('Modify node network transformations in environment %s'
- % self.env_id)
+ log('Modify network transformations for node %s' % node_id)
type = self.dea.get_node_property(roles_blade[1], 'transformations')
- transformations = self.dea.get_transformations(type)
-
- for node_file in glob.glob('%s/deployment_%s/*_%s.yaml'
- % (self.yaml_config_dir, self.env_id,
- node_id)):
+ transformations = self.dea.get_property(type)
+ deployment_dir = '%s/deployment_%s' % (
+ self.yaml_config_dir, self.env_id)
+ backup(deployment_dir)
+ for node_file in glob.glob(deployment_dir + '/*_%s.yaml' % node_id):
with io.open(node_file) as stream:
- node = yaml.load(stream)
+ node = yaml.load(stream)
- node['network_scheme']['transformations'] = transformations
+ node['network_scheme'].update(transformations)
with io.open(node_file, 'w') as stream:
- yaml.dump(node, stream, default_flow_style=False)
-
+ yaml.dump(node, stream, default_flow_style=False)
def download_deployment_config(self):
log('Download deployment config for environment %s' % self.env_id)
@@ -79,6 +88,7 @@ class ConfigureNodes(object):
interface_yaml = ('%s/node_%s/interfaces.yaml'
% (self.yaml_config_dir, node_id))
check_file_exists(interface_yaml)
+ backup('%s/node_%s' % (self.yaml_config_dir, node_id))
with io.open(interface_yaml) as stream:
interfaces = yaml.load(stream)
@@ -86,10 +96,10 @@ class ConfigureNodes(object):
net_name_id = {}
for interface in interfaces:
for network in interface['assigned_networks']:
- net_name_id[network['name']] = network['id']
+ net_name_id[network['name']] = network['id']
type = self.dea.get_node_property(roles_blade[1], 'interfaces')
- interface_config = self.dea.get_interfaces(type)
+ interface_config = self.dea.get_property(type)
for interface in interfaces:
interface['assigned_networks'] = []
@@ -101,4 +111,4 @@ class ConfigureNodes(object):
interface['assigned_networks'].append(net)
with io.open(interface_yaml, 'w') as stream:
- yaml.dump(interfaces, stream, default_flow_style=False) \ No newline at end of file
+ yaml.dump(interfaces, stream, default_flow_style=False)
diff --git a/fuel/deploy/cloud/configure_settings.py b/fuel/deploy/cloud/configure_settings.py
index ac0afdc57..fa918fd3d 100644
--- a/fuel/deploy/cloud/configure_settings.py
+++ b/fuel/deploy/cloud/configure_settings.py
@@ -1,3 +1,12 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
import common
import yaml
import io
@@ -11,6 +20,8 @@ parse = common.parse
err = common.err
check_file_exists = common.check_file_exists
log = common.log
+backup = common.backup
+
class ConfigureSettings(object):
@@ -40,6 +51,7 @@ class ConfigureSettings(object):
settings_yaml = ('%s/settings_%s.yaml'
% (self.yaml_config_dir, self.env_id))
check_file_exists(settings_yaml)
+ backup(settings_yaml)
settings = self.dea.get_property('settings')
diff --git a/fuel/deploy/cloud/deploy.py b/fuel/deploy/cloud/deploy.py
index c8714f8a6..705dda59c 100644
--- a/fuel/deploy/cloud/deploy.py
+++ b/fuel/deploy/cloud/deploy.py
@@ -1,7 +1,17 @@
-import time
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+
+import os
import yaml
import io
-import sys
+import glob
import common
from dea import DeploymentEnvironmentAdapter
@@ -19,188 +29,79 @@ parse = common.parse
err = common.err
check_file_exists = common.check_file_exists
log = common.log
+commafy = common.commafy
+ArgParser = common.ArgParser
+
class Deploy(object):
- def __init__(self, dea_file, macs_file):
+ def __init__(self, dea_file, blade_node_file, no_health_check):
self.dea = DeploymentEnvironmentAdapter(dea_file)
- self.macs_file = macs_file
+ self.blade_node_file = blade_node_file
+ self.no_health_check = no_health_check
self.macs_per_blade = {}
self.blades = self.dea.get_node_ids()
- self.node_ids_dict = {}
- self.node_id_roles_dict = {}
- self.supported_release = None
+ self.blade_node_dict = {}
+ self.node_roles_dict = {}
self.env_id = None
- self.wanted_release = self.dea.get_wanted_release()
-
- def cleanup_fuel_environments(self, env_list):
- WAIT_LOOP = 60
- SLEEP_TIME = 10
- for env in env_list:
- log('Deleting environment %s' % env[E['id']])
- exec_cmd('fuel env --env %s --delete' % env[E['id']])
- all_env_erased = False
- for i in range(WAIT_LOOP):
- env_list = parse(exec_cmd('fuel env list'))
- if env_list:
- time.sleep(SLEEP_TIME)
- else:
- all_env_erased = True
- break
- if not all_env_erased:
- err('Could not erase these environments %s'
- % [(env[E['id']], env[E['status']]) for env in env_list])
-
- def cleanup_fuel_nodes(self, node_list):
- for node in node_list:
- if node[N['status']] == 'discover':
- log('Deleting node %s' % node[N['id']])
- exec_cmd('fuel node --node-id %s --delete-from-db'
- % node[N['id']])
- exec_cmd('dockerctl shell cobbler cobbler system remove '
- '--name node-%s' % node[N['id']])
-
- def check_previous_installation(self):
- log('Check previous installation')
- env_list = parse(exec_cmd('fuel env list'))
- if env_list:
- self.cleanup_fuel_environments(env_list)
- node_list = parse(exec_cmd('fuel node list'))
- if node_list:
- self.cleanup_fuel_nodes(node_list)
-
- def check_supported_release(self):
- log('Check supported release: %s' % self.wanted_release)
- release_list = parse(exec_cmd('fuel release -l'))
- for release in release_list:
- if release[R['name']] == self.wanted_release:
- self.supported_release = release
- break
- if not self.supported_release:
- err('This Fuel does not contain the following release: %s'
- % self.wanted_release)
-
- def check_prerequisites(self):
- log('Check prerequisites')
- self.check_supported_release()
- self.check_previous_installation()
-
- def get_mac_addresses(self):
- with io.open(self.macs_file, 'r') as stream:
- self.macs_per_blade = yaml.load(stream)
-
- def find_mac_in_dict(self, mac):
- for blade, mac_list in self.macs_per_blade.iteritems():
- if mac in mac_list:
- return blade
-
- def all_blades_discovered(self):
- for blade, node_id in self.node_ids_dict.iteritems():
- if not node_id:
- return False
- return True
-
- def not_discovered_blades_summary(self):
- summary = ''
- for blade, node_id in self.node_ids_dict.iteritems():
- if not node_id:
- summary += '\n[blade %s]' % blade
- return summary
-
- def node_discovery(self, node_list, discovered_macs):
- for node in node_list:
- if (node[N['status']] == 'discover' and
- node[N['online']] == 'True' and
- node[N['mac']] not in discovered_macs):
- discovered_macs.append(node[N['mac']])
- blade = self.find_mac_in_dict(node[N['mac']])
- if blade:
- log('Blade %s discovered as Node %s with MAC %s'
- % (blade, node[N['id']], node[N['mac']]))
- self.node_ids_dict[blade] = node[N['id']]
-
- def discovery_waiting_loop(self, discovered_macs):
- WAIT_LOOP = 180
- SLEEP_TIME = 10
- all_discovered = False
- for i in range(WAIT_LOOP):
- node_list = parse(exec_cmd('fuel node list'))
- if node_list:
- self.node_discovery(node_list, discovered_macs)
- if self.all_blades_discovered():
- all_discovered = True
- break
- else:
- time.sleep(SLEEP_TIME)
- return all_discovered
-
- def wait_for_discovered_blades(self):
- log('Wait for discovered blades')
- discovered_macs = []
- for blade in self.blades:
- self.node_ids_dict[blade] = None
- all_discovered = self.discovery_waiting_loop(discovered_macs)
- if not all_discovered:
- err('Not all blades have been discovered: %s'
- % self.not_discovered_blades_summary())
+ self.wanted_release = self.dea.get_property('wanted_release')
+
+ def get_blade_node_mapping(self):
+ with io.open(self.blade_node_file, 'r') as stream:
+ self.blade_node_dict = yaml.load(stream)
def assign_roles_to_cluster_node_ids(self):
- self.node_id_roles_dict = {}
- for blade, node_id in self.node_ids_dict.iteritems():
- role_list = []
- role = self.dea.get_node_role(blade)
- if role == 'controller':
- role_list.extend(['controller', 'mongo'])
- elif role == 'compute':
- role_list.extend(['compute'])
- self.node_id_roles_dict[node_id] = (role_list, blade)
+ self.node_roles_dict = {}
+ for blade, node in self.blade_node_dict.iteritems():
+ roles = commafy(self.dea.get_node_role(blade))
+ self.node_roles_dict[node] = (roles, blade)
def configure_environment(self):
+ release_list = parse(exec_cmd('fuel release -l'))
+ for release in release_list:
+ if release[R['name']] == self.wanted_release:
+ break
config_env = ConfigureEnvironment(self.dea, YAML_CONF_DIR,
- self.supported_release[R['id']],
- self.node_id_roles_dict)
+ release[R['id']],
+ self.node_roles_dict)
config_env.configure_environment()
self.env_id = config_env.env_id
def deploy_cloud(self):
dep = Deployment(self.dea, YAML_CONF_DIR, self.env_id,
- self.node_id_roles_dict)
+ self.node_roles_dict, self.no_health_check)
dep.deploy()
def deploy(self):
- self.get_mac_addresses()
- self.check_prerequisites()
- self.wait_for_discovered_blades()
+
+ self.get_blade_node_mapping()
+
self.assign_roles_to_cluster_node_ids()
+
self.configure_environment()
- self.deploy_cloud()
-def usage():
- print '''
- Usage:
- python deploy.py <dea_file> <macs_file>
+ self.deploy_cloud()
- Example:
- python deploy.py dea.yaml macs.yaml
- '''
def parse_arguments():
- if len(sys.argv) != 3:
- log('Incorrect number of arguments')
- usage()
- sys.exit(1)
- dea_file = sys.argv[-2]
- macs_file = sys.argv[-1]
- check_file_exists(dea_file)
- check_file_exists(macs_file)
- return dea_file, macs_file
+ parser = ArgParser(prog='python %s' % __file__)
+ parser.add_argument('-nh', dest='no_health_check', action='store_true',
+ default=False,
+ help='Don\'t run health check after deployment')
+ parser.add_argument('dea_file', action='store',
+ help='Deployment Environment Adapter: dea.yaml')
+ parser.add_argument('blade_node_file', action='store',
+ help='Blade Node mapping: blade_node.yaml')
+ args = parser.parse_args()
+ check_file_exists(args.dea_file)
+ check_file_exists(args.blade_node_file)
+ return (args.dea_file, args.blade_node_file, args.no_health_check)
-def main():
- dea_file, macs_file = parse_arguments()
-
- deploy = Deploy(dea_file, macs_file)
+def main():
+ dea_file, blade_node_file, no_health_check = parse_arguments()
+ deploy = Deploy(dea_file, blade_node_file, no_health_check)
deploy.deploy()
if __name__ == '__main__':
- main() \ No newline at end of file
+ main()
diff --git a/fuel/deploy/cloud/deployment.py b/fuel/deploy/cloud/deployment.py
index cf56c3630..90f24fd0b 100644
--- a/fuel/deploy/cloud/deployment.py
+++ b/fuel/deploy/cloud/deployment.py
@@ -1,3 +1,13 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+
import common
import os
import shutil
@@ -19,42 +29,13 @@ log = common.log
class Deployment(object):
- def __init__(self, dea, yaml_config_dir, env_id, node_id_roles_dict):
+ def __init__(self, dea, yaml_config_dir, env_id, node_id_roles_dict,
+ no_health_check):
self.dea = dea
self.yaml_config_dir = yaml_config_dir
self.env_id = env_id
self.node_id_roles_dict = node_id_roles_dict
-
- def download_deployment_info(self):
- log('Download deployment info for environment %s' % self.env_id)
- deployment_dir = '%s/deployment_%s' \
- % (self.yaml_config_dir, self.env_id)
- if os.path.exists(deployment_dir):
- shutil.rmtree(deployment_dir)
- exec_cmd('fuel --env %s deployment --default --dir %s'
- % (self.env_id, self.yaml_config_dir))
-
- def upload_deployment_info(self):
- log('Upload deployment info for environment %s' % self.env_id)
- exec_cmd('fuel --env %s deployment --upload --dir %s'
- % (self.env_id, self.yaml_config_dir))
-
- def config_opnfv(self):
- log('Configure OPNFV settings on environment %s' % self.env_id)
- opnfv_compute = self.dea.get_opnfv('compute')
- opnfv_controller = self.dea.get_opnfv('controller')
- self.download_deployment_info()
- for node_file in glob.glob('%s/deployment_%s/*.yaml'
- % (self.yaml_config_dir, self.env_id)):
- with io.open(node_file) as stream:
- node = yaml.load(stream)
- if node['role'] == 'compute':
- node.update(opnfv_compute)
- else:
- node.update(opnfv_controller)
- with io.open(node_file, 'w') as stream:
- yaml.dump(node, stream, default_flow_style=False)
- self.upload_deployment_info()
+ self.no_health_check = no_health_check
def run_deploy(self):
WAIT_LOOP = 180
@@ -75,7 +56,8 @@ class Deployment(object):
if env[0][E['status']] == 'operational':
ready = True
break
- elif env[0][E['status']] == 'error':
+ elif (env[0][E['status']] == 'error'
+ or env[0][E['status']] == 'stopped'):
break
else:
time.sleep(SLEEP_TIME)
@@ -102,12 +84,14 @@ class Deployment(object):
def health_check(self):
log('Now running sanity and smoke health checks')
- exec_cmd('fuel health --env %s --check sanity,smoke --force'
- % self.env_id)
- log('Health checks passed !')
+ r = exec_cmd('fuel health --env %s --check sanity,smoke --force'
+ % self.env_id)
+ log(r)
+ if 'failure' in r:
+ err('Healthcheck failed!')
def deploy(self):
- self.config_opnfv()
self.run_deploy()
self.verify_node_status()
- self.health_check() \ No newline at end of file
+ if not self.no_health_check:
+ self.health_check()
diff --git a/fuel/deploy/common.py b/fuel/deploy/common.py
index 6dbda67f3..2a8c0d149 100644
--- a/fuel/deploy/common.py
+++ b/fuel/deploy/common.py
@@ -1,7 +1,20 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
import subprocess
import sys
import os
import logging
+import argparse
+import shutil
+import stat
+import errno
N = {'id': 0, 'status': 1, 'name': 2, 'cluster': 3, 'ip': 4, 'mac': 5,
'roles': 6, 'pending_roles': 7, 'online': 8}
@@ -9,7 +22,7 @@ E = {'id': 0, 'status': 1, 'name': 2, 'mode': 3, 'release_id': 4,
'changes': 5, 'pending_release_id': 6}
R = {'id': 0, 'name': 1, 'state': 2, 'operating_system': 3, 'version': 4}
RO = {'name': 0, 'conflicts': 1}
-
+CWD = os.getcwd()
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
@@ -19,6 +32,7 @@ LOG.addHandler(out_handler)
out_handler = logging.FileHandler('autodeploy.log', mode='w')
out_handler.setFormatter(formatter)
LOG.addHandler(out_handler)
+os.chmod('autodeploy.log', stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
def exec_cmd(cmd, check=True):
process = subprocess.Popen(cmd,
@@ -34,6 +48,7 @@ def exec_cmd(cmd, check=True):
return response
return response, return_code
+
def run_proc(cmd):
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
@@ -41,14 +56,16 @@ def run_proc(cmd):
shell=True)
return process
+
def parse(printout):
parsed_list = []
lines = printout.splitlines()
for l in lines[2:]:
- parsed = [e.strip() for e in l.split('|')]
- parsed_list.append(parsed)
+ parsed = [e.strip() for e in l.split('|')]
+ parsed_list.append(parsed)
return parsed_list
+
def clean(lines):
parsed_list = []
parsed = []
@@ -61,22 +78,76 @@ def clean(lines):
parsed_list.append(parsed)
return parsed if len(parsed_list) == 1 else parsed_list
+
def err(message):
LOG.error('%s\n' % message)
sys.exit(1)
+
+def warn(message):
+ LOG.warning('%s\n' % message)
+
+
def check_file_exists(file_path):
+ if not os.path.dirname(file_path):
+ file_path = '%s/%s' % (CWD, file_path)
if not os.path.isfile(file_path):
err('ERROR: File %s not found\n' % file_path)
+
def check_dir_exists(dir_path):
+ if not os.path.dirname(dir_path):
+ dir_path = '%s/%s' % (CWD, dir_path)
if not os.path.isdir(dir_path):
err('ERROR: Directory %s not found\n' % dir_path)
+
+def create_dir_if_not_exists(dir_path):
+ if not os.path.isdir(dir_path):
+ log('Creating directory %s' % dir_path)
+ os.makedirs(dir_path)
+
+
+def delete(f):
+ if os.path.isfile(f):
+ log('Deleting file %s' % f)
+ os.remove(f)
+ elif os.path.isdir(f):
+ log('Deleting directory %s' % f)
+ shutil.rmtree(f)
+
+
+def commafy(comma_separated_list):
+ l = [c.strip() for c in comma_separated_list.split(',')]
+ return ','.join(l)
+
+
def check_if_root():
r = exec_cmd('whoami')
if r != 'root':
err('You need be root to run this application')
+
def log(message):
LOG.debug('%s\n' % message)
+
+
+class ArgParser(argparse.ArgumentParser):
+
+ def error(self, message):
+ sys.stderr.write('ERROR: %s\n' % message)
+ self.print_help()
+ sys.exit(2)
+
+
+def backup(path):
+ src = path
+ dst = path + '_orig'
+ delete(dst)
+ try:
+ shutil.copytree(src, dst)
+ except OSError as e:
+ if e.errno == errno.ENOTDIR:
+ shutil.copy(src, dst)
+ else:
+ raise
diff --git a/fuel/deploy/dea.py b/fuel/deploy/dea.py
index 8066b6ae2..5f1a41547 100644
--- a/fuel/deploy/dea.py
+++ b/fuel/deploy/dea.py
@@ -1,8 +1,20 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+
import yaml
import io
import netaddr
+
class DeploymentEnvironmentAdapter(object):
+
def __init__(self, yaml_path):
self.dea_struct = None
self.parse_yaml(yaml_path)
@@ -19,6 +31,15 @@ class DeploymentEnvironmentAdapter(object):
with io.open(yaml_path) as yaml_file:
self.dea_struct = yaml.load(yaml_file)
+ def get_env_name(self):
+ return self.get_property('environment')['name']
+
+ def get_env_mode(self):
+ return self.get_property('environment')['mode']
+
+ def get_env_net_segment_type(self):
+ return self.get_property('environment')['net_segment_type']
+
def get_fuel_config(self):
return self.dea_struct['fuel']
@@ -67,14 +88,12 @@ class DeploymentEnvironmentAdapter(object):
def get_network_names(self):
return self.network_names
- def get_interfaces(self, type):
- return self.dea_struct['interfaces'][type]
-
- def get_transformations(self, type):
- return self.dea_struct['transformations'][type]
-
- def get_opnfv(self, role):
- return {'opnfv': self.dea_struct['opnfv'][role]}
+ def get_dns_list(self):
+ settings = self.get_property('settings')
+ dns_list = settings['editable']['external_dns']['dns_list']['value']
+ return [d.strip() for d in dns_list.split(',')]
- def get_wanted_release(self):
- return self.dea_struct['wanted_release'] \ No newline at end of file
+ def get_ntp_list(self):
+ settings = self.get_property('settings')
+ ntp_list = settings['editable']['external_ntp']['ntp_list']['value']
+ return [n.strip() for n in ntp_list.split(',')]
diff --git a/fuel/deploy/deploy.py b/fuel/deploy/deploy.py
index 9d1a3d2c3..178ae76e2 100644
--- a/fuel/deploy/deploy.py
+++ b/fuel/deploy/deploy.py
@@ -1,33 +1,51 @@
-import sys
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+
import os
-import shutil
import io
import re
+import sys
import netaddr
+import yaml
from dea import DeploymentEnvironmentAdapter
from dha import DeploymentHardwareAdapter
from install_fuel_master import InstallFuelMaster
from deploy_env import CloudDeploy
+from execution_environment import ExecutionEnvironment
import common
log = common.log
exec_cmd = common.exec_cmd
err = common.err
+warn = common.warn
check_file_exists = common.check_file_exists
+check_dir_exists = common.check_dir_exists
+create_dir_if_not_exists = common.create_dir_if_not_exists
+delete = common.delete
check_if_root = common.check_if_root
+ArgParser = common.ArgParser
FUEL_VM = 'fuel'
-TMP_DIR = '%s/fueltmp' % os.getenv('HOME')
PATCH_DIR = 'fuel_patch'
-WORK_DIR = 'deploy'
+WORK_DIR = '~/deploy'
+CWD = os.getcwd()
+
class cd:
+
def __init__(self, new_path):
self.new_path = os.path.expanduser(new_path)
def __enter__(self):
- self.saved_path = os.getcwd()
+ self.saved_path = CWD
os.chdir(self.new_path)
def __exit__(self, etype, value, traceback):
@@ -36,31 +54,27 @@ class cd:
class AutoDeploy(object):
- def __init__(self, without_fuel, iso_file, dea_file, dha_file):
- self.without_fuel = without_fuel
+ def __init__(self, no_fuel, fuel_only, no_health_check, cleanup_only,
+ cleanup, storage_dir, pxe_bridge, iso_file, dea_file,
+ dha_file, fuel_plugins_dir):
+ self.no_fuel = no_fuel
+ self.fuel_only = fuel_only
+ self.no_health_check = no_health_check
+ self.cleanup_only = cleanup_only
+ self.cleanup = cleanup
+ self.storage_dir = storage_dir
+ self.pxe_bridge = pxe_bridge
self.iso_file = iso_file
self.dea_file = dea_file
self.dha_file = dha_file
- self.dea = DeploymentEnvironmentAdapter(dea_file)
+ self.fuel_plugins_dir = fuel_plugins_dir
+ self.dea = (DeploymentEnvironmentAdapter(dea_file)
+ if not cleanup_only else None)
self.dha = DeploymentHardwareAdapter(dha_file)
self.fuel_conf = {}
self.fuel_node_id = self.dha.get_fuel_node_id()
- self.fuel_custom = self.dha.use_fuel_custom_install()
self.fuel_username, self.fuel_password = self.dha.get_fuel_access()
-
- def setup_dir(self, dir):
- self.cleanup_dir(dir)
- os.makedirs(dir)
-
- def cleanup_dir(self, dir):
- if os.path.isdir(dir):
- shutil.rmtree(dir)
-
- def power_off_blades(self):
- node_ids = self.dha.get_all_node_ids()
- node_ids = list(set(node_ids) - set([self.fuel_node_id]))
- for node_id in node_ids:
- self.dha.node_power_off(node_id)
+ self.tmp_dir = None
def modify_ip(self, ip_addr, index, val):
ip_str = str(netaddr.IPAddress(ip_addr))
@@ -77,11 +91,9 @@ class AutoDeploy(object):
self.fuel_conf['showmenu'] = 'yes'
def install_fuel_master(self):
- if self.without_fuel:
- log('Not Installing Fuel Master')
- return
log('Install Fuel Master')
- new_iso = '%s/deploy-%s' % (TMP_DIR, os.path.basename(self.iso_file))
+ new_iso = '%s/deploy-%s' \
+ % (self.tmp_dir, os.path.basename(self.iso_file))
self.patch_iso(new_iso)
self.iso_file = new_iso
self.install_iso()
@@ -90,40 +102,36 @@ class AutoDeploy(object):
fuel = InstallFuelMaster(self.dea_file, self.dha_file,
self.fuel_conf['ip'], self.fuel_username,
self.fuel_password, self.fuel_node_id,
- self.iso_file, WORK_DIR)
- if self.fuel_custom:
- log('Custom Fuel install')
- fuel.custom_install()
- else:
- log('Ordinary Fuel install')
- fuel.install()
+ self.iso_file, WORK_DIR,
+ self.fuel_plugins_dir)
+ fuel.install()
def patch_iso(self, new_iso):
- tmp_orig_dir = '%s/origiso' % TMP_DIR
- tmp_new_dir = '%s/newiso' % TMP_DIR
+ tmp_orig_dir = '%s/origiso' % self.tmp_dir
+ tmp_new_dir = '%s/newiso' % self.tmp_dir
self.copy(tmp_orig_dir, tmp_new_dir)
self.patch(tmp_new_dir, new_iso)
def copy(self, tmp_orig_dir, tmp_new_dir):
log('Copying...')
- self.setup_dir(tmp_orig_dir)
- self.setup_dir(tmp_new_dir)
+ os.makedirs(tmp_orig_dir)
+ os.makedirs(tmp_new_dir)
exec_cmd('fuseiso %s %s' % (self.iso_file, tmp_orig_dir))
with cd(tmp_orig_dir):
exec_cmd('find . | cpio -pd %s' % tmp_new_dir)
with cd(tmp_new_dir):
exec_cmd('fusermount -u %s' % tmp_orig_dir)
- shutil.rmtree(tmp_orig_dir)
+ delete(tmp_orig_dir)
exec_cmd('chmod -R 755 %s' % tmp_new_dir)
def patch(self, tmp_new_dir, new_iso):
log('Patching...')
- patch_dir = '%s/%s' % (os.getcwd(), PATCH_DIR)
+ patch_dir = '%s/%s' % (CWD, PATCH_DIR)
ks_path = '%s/ks.cfg.patch' % patch_dir
with cd(tmp_new_dir):
exec_cmd('cat %s | patch -p0' % ks_path)
- shutil.rmtree('.rr_moved')
+ delete('.rr_moved')
isolinux = 'isolinux/isolinux.cfg'
log('isolinux.cfg before: %s'
% exec_cmd('grep netmask %s' % isolinux))
@@ -149,51 +157,152 @@ class AutoDeploy(object):
f.write(data)
def deploy_env(self):
- dep = CloudDeploy(self.dha, self.fuel_conf['ip'], self.fuel_username,
- self.fuel_password, self.dea_file, WORK_DIR)
- dep.deploy()
+ dep = CloudDeploy(self.dea, self.dha, self.fuel_conf['ip'],
+ self.fuel_username, self.fuel_password,
+ self.dea_file, WORK_DIR, self.no_health_check)
+ return dep.deploy()
+
+ def setup_execution_environment(self):
+ exec_env = ExecutionEnvironment(self.storage_dir, self.pxe_bridge,
+ self.dha_file, self.dea)
+ exec_env.setup_environment()
+
+ def cleanup_execution_environment(self):
+ exec_env = ExecutionEnvironment(self.storage_dir, self.pxe_bridge,
+ self.dha_file, self.dea)
+ exec_env.cleanup_environment()
+
+ def create_tmp_dir(self):
+ self.tmp_dir = '%s/fueltmp' % CWD
+ delete(self.tmp_dir)
+ create_dir_if_not_exists(self.tmp_dir)
def deploy(self):
- check_if_root()
- self.setup_dir(TMP_DIR)
self.collect_fuel_info()
- self.power_off_blades()
- self.install_fuel_master()
- self.cleanup_dir(TMP_DIR)
- self.deploy_env()
+ if not self.no_fuel:
+ self.setup_execution_environment()
+ self.create_tmp_dir()
+ self.install_fuel_master()
+ if not self.fuel_only:
+ return self.deploy_env()
+ return True
+
+ def run(self):
+ check_if_root()
+ if self.cleanup_only:
+ self.cleanup_execution_environment()
+ else:
+ deploy_success = self.deploy()
+ if self.cleanup:
+ self.cleanup_execution_environment()
+ return deploy_success
+ return True
-def usage():
- print '''
- Usage:
- python deploy.py [-nf] <isofile> <deafile> <dhafile>
+def check_bridge(pxe_bridge, dha_path):
+ with io.open(dha_path) as yaml_file:
+ dha_struct = yaml.load(yaml_file)
+ if dha_struct['adapter'] != 'libvirt':
+ log('Using Linux Bridge %s for booting up the Fuel Master VM'
+ % pxe_bridge)
+ r = exec_cmd('ip link show %s' % pxe_bridge)
+ if pxe_bridge in r and 'state DOWN' in r:
+ err('Linux Bridge {0} is not Active, bring'
+ ' it UP first: [ip link set dev {0} up]'.format(pxe_bridge))
+
+
+def check_fuel_plugins_dir(dir):
+ msg = None
+ if not dir:
+ msg = 'Fuel Plugins Directory not specified!'
+ elif not os.path.isdir(dir):
+ msg = 'Fuel Plugins Directory does not exist!'
+ elif not os.listdir(dir):
+ msg = 'Fuel Plugins Directory is empty!'
+ if msg:
+ warn('%s No external plugins will be installed!' % msg)
- Optional arguments:
- -nf Do not install Fuel master
- '''
def parse_arguments():
- if (len(sys.argv) < 4 or len(sys.argv) > 5
- or (len(sys.argv) == 5 and sys.argv[1] != '-nf')):
- log('Incorrect number of arguments')
- usage()
- sys.exit(1)
- without_fuel = False
- if len(sys.argv) == 5 and sys.argv[1] == '-nf':
- without_fuel = True
- iso_file = sys.argv[-3]
- dea_file = sys.argv[-2]
- dha_file = sys.argv[-1]
- check_file_exists(iso_file)
- check_file_exists(dea_file)
- check_file_exists(dha_file)
- return (without_fuel, iso_file, dea_file, dha_file)
+ parser = ArgParser(prog='python %s' % __file__)
+ parser.add_argument('-nf', dest='no_fuel', action='store_true',
+ default=False,
+ help='Do not install Fuel Master (and Node VMs when '
+ 'using libvirt)')
+ parser.add_argument('-nh', dest='no_health_check', action='store_true',
+ default=False,
+ help='Don\'t run health check after deployment')
+ parser.add_argument('-fo', dest='fuel_only', action='store_true',
+ default=False,
+ help='Install Fuel Master only (and Node VMs when '
+ 'using libvirt)')
+ parser.add_argument('-co', dest='cleanup_only', action='store_true',
+ default=False,
+ help='Cleanup VMs and Virtual Networks according to '
+ 'what is defined in DHA')
+ parser.add_argument('-c', dest='cleanup', action='store_true',
+ default=False,
+ help='Cleanup after deploy')
+ if {'-iso', '-dea', '-dha', '-h'}.intersection(sys.argv):
+ parser.add_argument('-iso', dest='iso_file', action='store', nargs='?',
+ default='%s/OPNFV.iso' % CWD,
+ help='ISO File [default: OPNFV.iso]')
+ parser.add_argument('-dea', dest='dea_file', action='store', nargs='?',
+ default='%s/dea.yaml' % CWD,
+ help='Deployment Environment Adapter: dea.yaml')
+ parser.add_argument('-dha', dest='dha_file', action='store', nargs='?',
+ default='%s/dha.yaml' % CWD,
+ help='Deployment Hardware Adapter: dha.yaml')
+ else:
+ parser.add_argument('iso_file', action='store', nargs='?',
+ default='%s/OPNFV.iso' % CWD,
+ help='ISO File [default: OPNFV.iso]')
+ parser.add_argument('dea_file', action='store', nargs='?',
+ default='%s/dea.yaml' % CWD,
+ help='Deployment Environment Adapter: dea.yaml')
+ parser.add_argument('dha_file', action='store', nargs='?',
+ default='%s/dha.yaml' % CWD,
+ help='Deployment Hardware Adapter: dha.yaml')
+ parser.add_argument('-s', dest='storage_dir', action='store',
+ default='%s/images' % CWD,
+ help='Storage Directory [default: images]')
+ parser.add_argument('-b', dest='pxe_bridge', action='store',
+ default='pxebr',
+ help='Linux Bridge for booting up the Fuel Master VM '
+ '[default: pxebr]')
+ parser.add_argument('-p', dest='fuel_plugins_dir', action='store',
+ help='Fuel Plugins directory')
-def main():
+ args = parser.parse_args()
+ log(args)
- without_fuel, iso_file, dea_file, dha_file = parse_arguments()
+ check_file_exists(args.dha_file)
+
+ if not args.cleanup_only:
+ check_file_exists(args.dea_file)
+ check_fuel_plugins_dir(args.fuel_plugins_dir)
+
+ if not args.no_fuel and not args.cleanup_only:
+ log('Using OPNFV ISO file: %s' % args.iso_file)
+ check_file_exists(args.iso_file)
+ log('Using image directory: %s' % args.storage_dir)
+ create_dir_if_not_exists(args.storage_dir)
+ check_bridge(args.pxe_bridge, args.dha_file)
+
+ kwargs = {'no_fuel': args.no_fuel, 'fuel_only': args.fuel_only,
+ 'no_health_check': args.no_health_check,
+ 'cleanup_only': args.cleanup_only, 'cleanup': args.cleanup,
+ 'storage_dir': args.storage_dir, 'pxe_bridge': args.pxe_bridge,
+ 'iso_file': args.iso_file, 'dea_file': args.dea_file,
+ 'dha_file': args.dha_file,
+ 'fuel_plugins_dir': args.fuel_plugins_dir}
+ return kwargs
+
+
+def main():
+ kwargs = parse_arguments()
- d = AutoDeploy(without_fuel, iso_file, dea_file, dha_file)
- d.deploy()
+ d = AutoDeploy(**kwargs)
+ sys.exit(d.run())
if __name__ == '__main__':
- main() \ No newline at end of file
+ main()
diff --git a/fuel/deploy/deploy_env.py b/fuel/deploy/deploy_env.py
index 9bc8fbb34..be8bed342 100644
--- a/fuel/deploy/deploy_env.py
+++ b/fuel/deploy/deploy_env.py
@@ -1,7 +1,18 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+
import os
import io
import yaml
import glob
+import time
from ssh_client import SSHClient
import common
@@ -10,38 +21,48 @@ exec_cmd = common.exec_cmd
err = common.err
check_file_exists = common.check_file_exists
log = common.log
+parse = common.parse
+commafy = common.commafy
+N = common.N
+E = common.E
+R = common.R
+RO = common.RO
CLOUD_DEPLOY_FILE = 'deploy.py'
+BLADE_RESTART_TIMES = 3
class CloudDeploy(object):
- def __init__(self, dha, fuel_ip, fuel_username, fuel_password, dea_file,
- work_dir):
+ def __init__(self, dea, dha, fuel_ip, fuel_username, fuel_password,
+ dea_file, work_dir, no_health_check):
+ self.dea = dea
self.dha = dha
self.fuel_ip = fuel_ip
self.fuel_username = fuel_username
self.fuel_password = fuel_password
self.dea_file = dea_file
self.work_dir = work_dir
+ self.no_health_check = no_health_check
self.file_dir = os.path.dirname(os.path.realpath(__file__))
self.ssh = SSHClient(self.fuel_ip, self.fuel_username,
self.fuel_password)
- self.macs_file = '%s/macs.yaml' % self.file_dir
+ self.blade_node_file = '%s/blade_node.yaml' % self.file_dir
self.node_ids = self.dha.get_node_ids()
+ self.wanted_release = self.dea.get_property('wanted_release')
+ self.blade_node_dict = {}
+ self.macs_per_blade = {}
def upload_cloud_deployment_files(self):
- dest ='~/%s/' % self.work_dir
-
with self.ssh as s:
- s.exec_cmd('rm -rf %s' % self.work_dir, check=False)
- s.exec_cmd('mkdir ~/%s' % self.work_dir)
- s.scp_put(self.dea_file, dest)
- s.scp_put(self.macs_file, dest)
- s.scp_put('%s/common.py' % self.file_dir, dest)
- s.scp_put('%s/dea.py' % self.file_dir, dest)
+ s.exec_cmd('rm -rf %s' % self.work_dir, False)
+ s.exec_cmd('mkdir %s' % self.work_dir)
+ s.scp_put(self.dea_file, self.work_dir)
+ s.scp_put(self.blade_node_file, self.work_dir)
+ s.scp_put('%s/common.py' % self.file_dir, self.work_dir)
+ s.scp_put('%s/dea.py' % self.file_dir, self.work_dir)
for f in glob.glob('%s/cloud/*' % self.file_dir):
- s.scp_put(f, dest)
+ s.scp_put(f, self.work_dir)
def power_off_nodes(self):
for node_id in self.node_ids:
@@ -53,35 +74,173 @@ class CloudDeploy(object):
def set_boot_order(self, boot_order_list):
for node_id in self.node_ids:
- self.dha.node_set_boot_order(node_id, boot_order_list)
+ self.dha.node_set_boot_order(node_id, boot_order_list[:])
def get_mac_addresses(self):
- macs_per_node = {}
+ self.macs_per_blade = {}
for node_id in self.node_ids:
- macs_per_node[node_id] = self.dha.get_node_pxe_mac(node_id)
- with io.open(self.macs_file, 'w') as stream:
- yaml.dump(macs_per_node, stream, default_flow_style=False)
+ self.macs_per_blade[node_id] = self.dha.get_node_pxe_mac(node_id)
def run_cloud_deploy(self, deploy_app):
log('START CLOUD DEPLOYMENT')
deploy_app = '%s/%s' % (self.work_dir, deploy_app)
dea_file = '%s/%s' % (self.work_dir, os.path.basename(self.dea_file))
- macs_file = '%s/%s' % (self.work_dir, os.path.basename(self.macs_file))
+ blade_node_file = '%s/%s' % (
+ self.work_dir, os.path.basename(self.blade_node_file))
+ with self.ssh as s:
+ status = s.run(
+ 'python %s %s %s %s' % (
+ deploy_app, ('-nh' if self.no_health_check else ''),
+ dea_file, blade_node_file))
+ return status
+
+ def check_supported_release(self):
+ log('Check supported release: %s' % self.wanted_release)
+ found = False
+ release_list = parse(self.ssh.exec_cmd('fuel release -l'))
+ for release in release_list:
+ if release[R['name']] == self.wanted_release:
+ found = True
+ break
+ if not found:
+ err('This Fuel does not contain the following release: %s'
+ % self.wanted_release)
+
+ def check_previous_installation(self):
+ log('Check previous installation')
+ env_list = parse(self.ssh.exec_cmd('fuel env list'))
+ if env_list:
+ self.cleanup_fuel_environments(env_list)
+ node_list = parse(self.ssh.exec_cmd('fuel node list'))
+ if node_list:
+ self.cleanup_fuel_nodes(node_list)
+
+ def cleanup_fuel_environments(self, env_list):
+ WAIT_LOOP = 60
+ SLEEP_TIME = 10
+ for env in env_list:
+ log('Deleting environment %s' % env[E['id']])
+ self.ssh.exec_cmd('fuel env --env %s --delete --force'
+ % env[E['id']])
+ all_env_erased = False
+ for i in range(WAIT_LOOP):
+ env_list = parse(self.ssh.exec_cmd('fuel env list'))
+ if env_list:
+ time.sleep(SLEEP_TIME)
+ else:
+ all_env_erased = True
+ break
+ if not all_env_erased:
+ err('Could not erase these environments %s'
+ % [(env[E['id']], env[E['status']]) for env in env_list])
+
+ def cleanup_fuel_nodes(self, node_list):
+ for node in node_list:
+ if node[N['status']] == 'discover':
+ log('Deleting node %s' % node[N['id']])
+ self.ssh.exec_cmd('fuel node --node-id %s --delete-from-db '
+ '--force' % node[N['id']])
+ self.ssh.exec_cmd('cobbler system remove --name node-%s'
+ % node[N['id']], False)
+
+ def check_prerequisites(self):
+ log('Check prerequisites')
with self.ssh:
- self.ssh.run('python %s %s %s' % (deploy_app, dea_file, macs_file))
+ self.check_supported_release()
+ self.check_previous_installation()
- def deploy(self):
+ def wait_for_discovered_blades(self):
+ log('Wait for discovered blades')
+ discovered_macs = []
+ restart_times = BLADE_RESTART_TIMES
- self.power_off_nodes()
+ for blade in self.node_ids:
+ self.blade_node_dict[blade] = None
+ with self.ssh:
+ all_discovered = self.discovery_waiting_loop(discovered_macs)
+
+ while not all_discovered and restart_times != 0:
+ restart_times -= 1
+ for blade in self.get_not_discovered_blades():
+ self.dha.node_reset(blade)
+ with self.ssh:
+ all_discovered = self.discovery_waiting_loop(discovered_macs)
+
+ if not all_discovered:
+ err('Not all blades have been discovered: %s'
+ % self.not_discovered_blades_summary())
+
+ with io.open(self.blade_node_file, 'w') as stream:
+ yaml.dump(self.blade_node_dict, stream, default_flow_style=False)
+
+ def discovery_waiting_loop(self, discovered_macs):
+ WAIT_LOOP = 360
+ SLEEP_TIME = 10
+ all_discovered = False
+ for i in range(WAIT_LOOP):
+ node_list = parse(self.ssh.exec_cmd('fuel node list'))
+ if node_list:
+ self.node_discovery(node_list, discovered_macs)
+ if self.all_blades_discovered():
+ all_discovered = True
+ break
+ else:
+ time.sleep(SLEEP_TIME)
+ return all_discovered
+
+ def node_discovery(self, node_list, discovered_macs):
+ for node in node_list:
+ if (node[N['status']] == 'discover' and
+ node[N['online']] == 'True' and
+ node[N['mac']] not in discovered_macs):
+ discovered_macs.append(node[N['mac']])
+ blade = self.find_mac_in_dict(node[N['mac']])
+ if blade:
+ log('Blade %s discovered as Node %s with MAC %s'
+ % (blade, node[N['id']], node[N['mac']]))
+ self.blade_node_dict[blade] = node[N['id']]
+
+ def find_mac_in_dict(self, mac):
+ for blade, mac_list in self.macs_per_blade.iteritems():
+ if mac in mac_list:
+ return blade
+
+ def all_blades_discovered(self):
+ for blade, node_id in self.blade_node_dict.iteritems():
+ if not node_id:
+ return False
+ return True
+
+ def not_discovered_blades_summary(self):
+ summary = ''
+ for blade, node_id in self.blade_node_dict.iteritems():
+ if not node_id:
+ summary += '\n[blade %s]' % blade
+ return summary
+
+ def get_not_discovered_blades(self):
+ not_discovered_blades = []
+ for blade, node_id in self.blade_node_dict.iteritems():
+ if not node_id:
+ not_discovered_blades.append(blade)
+ return not_discovered_blades
+
+ def set_boot_order_nodes(self):
+ self.power_off_nodes()
self.set_boot_order(['pxe', 'disk'])
-
self.power_on_nodes()
+ def deploy(self):
+
+ self.set_boot_order_nodes()
+
+ self.check_prerequisites()
+
self.get_mac_addresses()
- check_file_exists(self.macs_file)
+ self.wait_for_discovered_blades()
self.upload_cloud_deployment_files()
- self.run_cloud_deploy(CLOUD_DEPLOY_FILE)
+ return self.run_cloud_deploy(CLOUD_DEPLOY_FILE)
diff --git a/fuel/deploy/dha.py b/fuel/deploy/dha.py
index bf9a9512a..1feee6039 100644
--- a/fuel/deploy/dha.py
+++ b/fuel/deploy/dha.py
@@ -1,3 +1,13 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+
import yaml
import io
@@ -5,15 +15,20 @@ from dha_adapters.libvirt_adapter import LibvirtAdapter
from dha_adapters.ipmi_adapter import IpmiAdapter
from dha_adapters.hp_adapter import HpAdapter
+
class DeploymentHardwareAdapter(object):
+
def __new__(cls, yaml_path):
with io.open(yaml_path) as yaml_file:
dha_struct = yaml.load(yaml_file)
type = dha_struct['adapter']
if cls is DeploymentHardwareAdapter:
- if type == 'libvirt': return LibvirtAdapter(yaml_path)
- if type == 'ipmi': return IpmiAdapter(yaml_path)
- if type == 'hp': return HpAdapter(yaml_path)
+ if type == 'libvirt':
+ return LibvirtAdapter(yaml_path)
+ if type == 'ipmi':
+ return IpmiAdapter(yaml_path)
+ if type == 'hp':
+ return HpAdapter(yaml_path)
return super(DeploymentHardwareAdapter, cls).__new__(cls)
diff --git a/fuel/deploy/dha_adapters/__init__.py b/fuel/deploy/dha_adapters/__init__.py
index e69de29bb..fb73157f9 100644
--- a/fuel/deploy/dha_adapters/__init__.py
+++ b/fuel/deploy/dha_adapters/__init__.py
@@ -0,0 +1,8 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
diff --git a/fuel/deploy/dha_adapters/hardware_adapter.py b/fuel/deploy/dha_adapters/hardware_adapter.py
index 884e9ce98..29e04f182 100644
--- a/fuel/deploy/dha_adapters/hardware_adapter.py
+++ b/fuel/deploy/dha_adapters/hardware_adapter.py
@@ -1,7 +1,18 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
import yaml
import io
+
class HardwareAdapter(object):
+
def __init__(self, yaml_path):
self.dha_struct = None
self.parse_yaml(yaml_path)
@@ -34,18 +45,15 @@ class HardwareAdapter(object):
node_ids.sort()
return node_ids
- def use_fuel_custom_install(self):
- return self.dha_struct['fuelCustomInstall']
-
def get_node_property(self, node_id, property_name):
for node in self.dha_struct['nodes']:
if node['id'] == node_id and property_name in node:
return node[property_name]
- def node_can_zero_mbr(self, node_id):
- return self.get_node_property(node_id, 'nodeCanZeroMBR')
-
def get_fuel_access(self):
for node in self.dha_struct['nodes']:
if 'isFuel' in node and node['isFuel']:
return node['username'], node['password']
+
+ def get_disks(self):
+ return self.dha_struct['disks']
diff --git a/fuel/deploy/dha_adapters/hp_adapter.py b/fuel/deploy/dha_adapters/hp_adapter.py
index 8fc38ad5f..51f55f32b 100644
--- a/fuel/deploy/dha_adapters/hp_adapter.py
+++ b/fuel/deploy/dha_adapters/hp_adapter.py
@@ -1,3 +1,13 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+
import common
from ipmi_adapter import IpmiAdapter
from ssh_client import SSHClient
@@ -10,6 +20,7 @@ DEV = {'pxe': 'bootsource5',
ROOT = '/system1/bootconfig1'
+
class HpAdapter(IpmiAdapter):
def __init__(self, yaml_path):
@@ -19,7 +30,7 @@ class HpAdapter(IpmiAdapter):
log('Set boot order %s on Node %s' % (boot_order_list, node_id))
ip, username, password = self.get_access_info(node_id)
ssh = SSHClient(ip, username, password)
- for order, dev in enumerate(boot_order_list):
- with ssh as s:
+ with ssh as s:
+ for order, dev in enumerate(boot_order_list):
s.exec_cmd('set %s/%s bootorder=%s'
- % (ROOT, DEV[dev], order+1))
+ % (ROOT, DEV[dev], order + 1))
diff --git a/fuel/deploy/dha_adapters/ipmi_adapter.py b/fuel/deploy/dha_adapters/ipmi_adapter.py
index d97fd2ddb..25aa36ec9 100644
--- a/fuel/deploy/dha_adapters/ipmi_adapter.py
+++ b/fuel/deploy/dha_adapters/ipmi_adapter.py
@@ -1,8 +1,21 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+
import common
+import time
from hardware_adapter import HardwareAdapter
log = common.log
exec_cmd = common.exec_cmd
+err = common.err
+
class IpmiAdapter(HardwareAdapter):
@@ -27,28 +40,72 @@ class IpmiAdapter(HardwareAdapter):
return mac_list
def node_power_on(self, node_id):
+ WAIT_LOOP = 200
+ SLEEP_TIME = 3
log('Power ON Node %s' % node_id)
cmd_prefix = self.ipmi_cmd(node_id)
state = exec_cmd('%s chassis power status' % cmd_prefix)
if state == 'Chassis Power is off':
exec_cmd('%s chassis power on' % cmd_prefix)
+ done = False
+ for i in range(WAIT_LOOP):
+ state, _ = exec_cmd('%s chassis power status' % cmd_prefix,
+ False)
+ if state == 'Chassis Power is on':
+ done = True
+ break
+ else:
+ time.sleep(SLEEP_TIME)
+ if not done:
+ err('Could Not Power ON Node %s' % node_id)
def node_power_off(self, node_id):
+ WAIT_LOOP = 200
+ SLEEP_TIME = 3
log('Power OFF Node %s' % node_id)
cmd_prefix = self.ipmi_cmd(node_id)
state = exec_cmd('%s chassis power status' % cmd_prefix)
if state == 'Chassis Power is on':
+ done = False
exec_cmd('%s chassis power off' % cmd_prefix)
+ for i in range(WAIT_LOOP):
+ state, _ = exec_cmd('%s chassis power status' % cmd_prefix,
+ False)
+ if state == 'Chassis Power is off':
+ done = True
+ break
+ else:
+ time.sleep(SLEEP_TIME)
+ if not done:
+ err('Could Not Power OFF Node %s' % node_id)
def node_reset(self, node_id):
- log('Reset Node %s' % node_id)
+ WAIT_LOOP = 600
+ log('RESET Node %s' % node_id)
cmd_prefix = self.ipmi_cmd(node_id)
state = exec_cmd('%s chassis power status' % cmd_prefix)
if state == 'Chassis Power is on':
+ was_shut_off = False
+ done = False
exec_cmd('%s chassis power reset' % cmd_prefix)
+ for i in range(WAIT_LOOP):
+ state, _ = exec_cmd('%s chassis power status' % cmd_prefix,
+ False)
+ if state == 'Chassis Power is off':
+ was_shut_off = True
+ elif state == 'Chassis Power is on' and was_shut_off:
+ done = True
+ break
+ time.sleep(1)
+ if not done:
+ err('Could Not RESET Node %s' % node_id)
+ else:
+ err('Cannot RESET Node %s because it\'s not Active, state: %s'
+ % (node_id, state))
def node_set_boot_order(self, node_id, boot_order_list):
log('Set boot order %s on Node %s' % (boot_order_list, node_id))
+ boot_order_list.reverse()
cmd_prefix = self.ipmi_cmd(node_id)
for dev in boot_order_list:
if dev == 'pxe':
diff --git a/fuel/deploy/dha_adapters/libvirt_adapter.py b/fuel/deploy/dha_adapters/libvirt_adapter.py
index dde494635..b285c1676 100644
--- a/fuel/deploy/dha_adapters/libvirt_adapter.py
+++ b/fuel/deploy/dha_adapters/libvirt_adapter.py
@@ -1,3 +1,13 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+
import common
from lxml import etree
from hardware_adapter import HardwareAdapter
@@ -10,6 +20,7 @@ DEV = {'pxe': 'network',
'disk': 'hd',
'iso': 'cdrom'}
+
class LibvirtAdapter(HardwareAdapter):
def __init__(self, yaml_path):
@@ -88,7 +99,8 @@ class LibvirtAdapter(HardwareAdapter):
def node_eject_iso(self, node_id):
vm_name = self.get_node_property(node_id, 'libvirtName')
device = self.get_name_of_device(vm_name, 'cdrom')
- exec_cmd('virsh change-media %s --eject %s' % (vm_name, device), False)
+ exec_cmd('virsh change-media %s --eject %s --config --live'
+ % (vm_name, device), False)
def node_insert_iso(self, node_id, iso_file):
vm_name = self.get_node_property(node_id, 'libvirtName')
@@ -96,12 +108,6 @@ class LibvirtAdapter(HardwareAdapter):
exec_cmd('virsh change-media %s --insert %s %s'
% (vm_name, device, iso_file))
- def get_disks(self):
- return self.dha_struct['disks']
-
- def get_node_role(self, node_id):
- return self.get_node_property(node_id, 'role')
-
def get_node_pxe_mac(self, node_id):
mac_list = []
vm_name = self.get_node_property(node_id, 'libvirtName')
@@ -125,3 +131,6 @@ class LibvirtAdapter(HardwareAdapter):
device = target.get('dev')
if device:
return device
+
+ def get_virt_net_conf_dir(self):
+ return self.dha_struct['virtNetConfDir']
diff --git a/fuel/build/f_ntp/testing/fake_init.pp b/fuel/deploy/environments/__init__.py
index b9af21836..fb73157f9 100644
--- a/fuel/build/f_ntp/testing/fake_init.pp
+++ b/fuel/deploy/environments/__init__.py
@@ -1,13 +1,8 @@
-##############################################################################
+###############################################################################
# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
+# szilard.cserey@ericsson.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-$fuel_settings = parseyaml($astute_settings_yaml)
-
-include opnfv::ntp
+###############################################################################
diff --git a/fuel/deploy/environments/execution_environment.py b/fuel/deploy/environments/execution_environment.py
new file mode 100644
index 000000000..63be5cd11
--- /dev/null
+++ b/fuel/deploy/environments/execution_environment.py
@@ -0,0 +1,78 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+
+from lxml import etree
+
+import common
+from dha_adapters.libvirt_adapter import LibvirtAdapter
+
+exec_cmd = common.exec_cmd
+err = common.err
+log = common.log
+check_dir_exists = common.check_dir_exists
+check_file_exists = common.check_file_exists
+check_if_root = common.check_if_root
+
+
+class ExecutionEnvironment(object):
+
+ def __init__(self, storage_dir, dha_file, root_dir):
+ self.storage_dir = storage_dir
+ self.dha = LibvirtAdapter(dha_file)
+ self.root_dir = root_dir
+ self.parser = etree.XMLParser(remove_blank_text=True)
+ self.fuel_node_id = self.dha.get_fuel_node_id()
+
+ def delete_vm(self, node_id):
+ vm_name = self.dha.get_node_property(node_id, 'libvirtName')
+ r, c = exec_cmd('virsh dumpxml %s' % vm_name, False)
+ if c:
+ return
+ self.undefine_vm_delete_disk(r, vm_name)
+
+ def undefine_vm_delete_disk(self, printout, vm_name):
+ disk_files = []
+ xml_dump = etree.fromstring(printout, self.parser)
+ disks = xml_dump.xpath('/domain/devices/disk')
+ for disk in disks:
+ sources = disk.xpath('source')
+ for source in sources:
+ source_file = source.get('file')
+ if source_file:
+ disk_files.append(source_file)
+ log('Deleting VM %s with disks %s' % (vm_name, disk_files))
+ exec_cmd('virsh destroy %s' % vm_name, False)
+ exec_cmd('virsh undefine %s' % vm_name, False)
+ for file in disk_files:
+ exec_cmd('rm -f %s' % file)
+
+ def define_vm(self, vm_name, temp_vm_file, disk_path):
+ log('Creating VM %s with disks %s' % (vm_name, disk_path))
+ with open(temp_vm_file) as f:
+ vm_xml = etree.parse(f)
+ names = vm_xml.xpath('/domain/name')
+ for name in names:
+ name.text = vm_name
+ uuids = vm_xml.xpath('/domain/uuid')
+ for uuid in uuids:
+ uuid.getparent().remove(uuid)
+ disks = vm_xml.xpath('/domain/devices/disk')
+ for disk in disks:
+ if (disk.get('type') == 'file' and
+ disk.get('device') == 'disk'):
+ sources = disk.xpath('source')
+ for source in sources:
+ disk.remove(source)
+ source = etree.Element('source')
+ source.set('file', disk_path)
+ disk.append(source)
+ with open(temp_vm_file, 'w') as f:
+ vm_xml.write(f, pretty_print=True, xml_declaration=True)
+ exec_cmd('virsh define %s' % temp_vm_file)
diff --git a/fuel/deploy/environments/libvirt_environment.py b/fuel/deploy/environments/libvirt_environment.py
new file mode 100644
index 000000000..785eeca7b
--- /dev/null
+++ b/fuel/deploy/environments/libvirt_environment.py
@@ -0,0 +1,107 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+
+from lxml import etree
+import glob
+
+import common
+from execution_environment import ExecutionEnvironment
+
+exec_cmd = common.exec_cmd
+err = common.err
+log = common.log
+check_dir_exists = common.check_dir_exists
+check_file_exists = common.check_file_exists
+check_if_root = common.check_if_root
+
+
+class LibvirtEnvironment(ExecutionEnvironment):
+
+ def __init__(self, storage_dir, dha_file, dea, root_dir):
+ super(LibvirtEnvironment, self).__init__(
+ storage_dir, dha_file, root_dir)
+ self.dea = dea
+ self.network_dir = '%s/%s' % (self.root_dir,
+ self.dha.get_virt_net_conf_dir())
+ self.node_ids = self.dha.get_all_node_ids()
+ self.net_names = self.collect_net_names()
+
+ def create_storage(self, node_id, disk_path, disk_sizes):
+ if node_id == self.fuel_node_id:
+ disk_size = disk_sizes['fuel']
+ else:
+ roles = self.dea.get_node_role(node_id)
+ role = 'controller' if 'controller' in roles else 'compute'
+ disk_size = disk_sizes[role]
+ exec_cmd('fallocate -l %s %s' % (disk_size, disk_path))
+
+ def create_vms(self):
+ temp_dir = exec_cmd('mktemp -d')
+ disk_sizes = self.dha.get_disks()
+ for node_id in self.node_ids:
+ vm_name = self.dha.get_node_property(node_id, 'libvirtName')
+ vm_template = '%s/%s' % (self.root_dir,
+ self.dha.get_node_property(
+ node_id, 'libvirtTemplate'))
+ check_file_exists(vm_template)
+ disk_path = '%s/%s.raw' % (self.storage_dir, vm_name)
+ self.create_storage(node_id, disk_path, disk_sizes)
+ temp_vm_file = '%s/%s' % (temp_dir, vm_name)
+ exec_cmd('cp %s %s' % (vm_template, temp_vm_file))
+ self.define_vm(vm_name, temp_vm_file, disk_path)
+ exec_cmd('rm -fr %s' % temp_dir)
+
+ def start_vms(self):
+ for node_id in self.node_ids:
+ self.dha.node_power_on(node_id)
+
+ def create_networks(self):
+ for net_file in glob.glob('%s/*' % self.network_dir):
+ exec_cmd('virsh net-define %s' % net_file)
+ for net in self.net_names:
+ log('Creating network %s' % net)
+ exec_cmd('virsh net-autostart %s' % net)
+ exec_cmd('virsh net-start %s' % net)
+
+ def delete_networks(self):
+ for net in self.net_names:
+ log('Deleting network %s' % net)
+ exec_cmd('virsh net-destroy %s' % net, False)
+ exec_cmd('virsh net-undefine %s' % net, False)
+
+ def get_net_name(self, net_file):
+ with open(net_file) as f:
+ net_xml = etree.parse(f)
+ name_list = net_xml.xpath('/network/name')
+ for name in name_list:
+ net_name = name.text
+ return net_name
+
+ def collect_net_names(self):
+ net_list = []
+ for net_file in glob.glob('%s/*' % self.network_dir):
+ name = self.get_net_name(net_file)
+ net_list.append(name)
+ return net_list
+
+ def delete_vms(self):
+ for node_id in self.node_ids:
+ self.delete_vm(node_id)
+
+ def setup_environment(self):
+ check_dir_exists(self.network_dir)
+ self.cleanup_environment()
+ self.create_networks()
+ self.create_vms()
+ self.start_vms()
+
+ def cleanup_environment(self):
+ self.delete_vms()
+ self.delete_networks()
diff --git a/fuel/deploy/environments/virtual_fuel.py b/fuel/deploy/environments/virtual_fuel.py
new file mode 100644
index 000000000..cb8be6371
--- /dev/null
+++ b/fuel/deploy/environments/virtual_fuel.py
@@ -0,0 +1,70 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+
+from lxml import etree
+
+import common
+from execution_environment import ExecutionEnvironment
+
+exec_cmd = common.exec_cmd
+log = common.log
+check_file_exists = common.check_file_exists
+check_if_root = common.check_if_root
+
+
+class VirtualFuel(ExecutionEnvironment):
+
+ def __init__(self, storage_dir, pxe_bridge, dha_file, root_dir):
+ super(VirtualFuel, self).__init__(storage_dir, dha_file, root_dir)
+ self.pxe_bridge = pxe_bridge
+
+ def set_vm_nic(self, temp_vm_file):
+ with open(temp_vm_file) as f:
+ vm_xml = etree.parse(f)
+ interfaces = vm_xml.xpath('/domain/devices/interface')
+ for interface in interfaces:
+ interface.getparent().remove(interface)
+ interface = etree.Element('interface')
+ interface.set('type', 'bridge')
+ source = etree.SubElement(interface, 'source')
+ source.set('bridge', self.pxe_bridge)
+ model = etree.SubElement(interface, 'model')
+ model.set('type', 'virtio')
+ devices = vm_xml.xpath('/domain/devices')
+ if devices:
+ device = devices[0]
+ device.append(interface)
+ with open(temp_vm_file, 'w') as f:
+ vm_xml.write(f, pretty_print=True, xml_declaration=True)
+
+ def create_vm(self):
+ temp_dir = exec_cmd('mktemp -d')
+ vm_name = self.dha.get_node_property(self.fuel_node_id, 'libvirtName')
+ vm_template = '%s/%s' % (self.root_dir,
+ self.dha.get_node_property(
+ self.fuel_node_id, 'libvirtTemplate'))
+ check_file_exists(vm_template)
+ disk_path = '%s/%s.raw' % (self.storage_dir, vm_name)
+ disk_sizes = self.dha.get_disks()
+ disk_size = disk_sizes['fuel']
+ exec_cmd('fallocate -l %s %s' % (disk_size, disk_path))
+ temp_vm_file = '%s/%s' % (temp_dir, vm_name)
+ exec_cmd('cp %s %s' % (vm_template, temp_vm_file))
+ self.set_vm_nic(temp_vm_file)
+ self.define_vm(vm_name, temp_vm_file, disk_path)
+ exec_cmd('rm -fr %s' % temp_dir)
+
+ def setup_environment(self):
+ check_if_root()
+ self.cleanup_environment()
+ self.create_vm()
+
+ def cleanup_environment(self):
+ self.delete_vm(self.fuel_node_id)
diff --git a/fuel/deploy/execution_environment.py b/fuel/deploy/execution_environment.py
new file mode 100644
index 000000000..e671463e4
--- /dev/null
+++ b/fuel/deploy/execution_environment.py
@@ -0,0 +1,46 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+
+import yaml
+import io
+import os
+
+import common
+from environments.libvirt_environment import LibvirtEnvironment
+from environments.virtual_fuel import VirtualFuel
+
+exec_cmd = common.exec_cmd
+err = common.err
+log = common.log
+check_dir_exists = common.check_dir_exists
+check_file_exists = common.check_file_exists
+check_if_root = common.check_if_root
+ArgParser = common.ArgParser
+
+
+class ExecutionEnvironment(object):
+
+ def __new__(cls, storage_dir, pxe_bridge, dha_path, dea):
+
+ with io.open(dha_path) as yaml_file:
+ dha_struct = yaml.load(yaml_file)
+
+ type = dha_struct['adapter']
+
+ root_dir = os.path.dirname(os.path.realpath(__file__))
+
+ if cls is ExecutionEnvironment:
+ if type == 'libvirt':
+ return LibvirtEnvironment(storage_dir, dha_path, dea, root_dir)
+
+ if type == 'ipmi' or type == 'hp':
+ return VirtualFuel(storage_dir, pxe_bridge, dha_path, root_dir)
+
+ return super(ExecutionEnvironment, cls).__new__(cls)
diff --git a/fuel/deploy/fuel_patch/ks.cfg.patch b/fuel/deploy/fuel_patch/ks.cfg.patch
new file mode 100644
index 000000000..189695792
--- /dev/null
+++ b/fuel/deploy/fuel_patch/ks.cfg.patch
@@ -0,0 +1,19 @@
+*** ks.cfg.orig Wed Apr 15 21:47:09 2015
+--- ks.cfg Wed Apr 15 21:47:24 2015
+***************
+*** 35,41 ****
+ default_drive=`echo ${drives} ${removable_drives} | awk '{print $1}'`
+
+ installdrive="undefined"
+! forceformat="no"
+ for I in `cat /proc/cmdline`; do case "$I" in *=*) eval $I;; esac ; done
+
+ set ${drives} ${removable_drives}
+--- 35,41 ----
+ default_drive=`echo ${drives} ${removable_drives} | awk '{print $1}'`
+
+ installdrive="undefined"
+! forceformat="yes"
+ for I in `cat /proc/cmdline`; do case "$I" in *=*) eval $I;; esac ; done
+
+ set ${drives} ${removable_drives}
diff --git a/fuel/deploy/install-ubuntu-packages.sh b/fuel/deploy/install-ubuntu-packages.sh
deleted file mode 100755
index 1ebd7c023..000000000
--- a/fuel/deploy/install-ubuntu-packages.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Tools for installation on the libvirt server/base host
-#
-apt-get install -y libvirt-bin qemu-kvm tightvncserver virt-manager \
- sshpass fuseiso genisoimage blackbox xterm python-yaml python-netaddr \
- python-paramiko python-lxml python-pip
-pip install scp
-restart libvirt-bin \ No newline at end of file
diff --git a/fuel/deploy/install_fuel_master.py b/fuel/deploy/install_fuel_master.py
index bb8e7e175..0e3c1c044 100644
--- a/fuel/deploy/install_fuel_master.py
+++ b/fuel/deploy/install_fuel_master.py
@@ -1,20 +1,37 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+
import common
import time
import os
+import glob
from ssh_client import SSHClient
from dha_adapters.libvirt_adapter import LibvirtAdapter
log = common.log
err = common.err
clean = common.clean
+delete = common.delete
TRANSPLANT_FUEL_SETTINGS = 'transplant_fuel_settings.py'
BOOTSTRAP_ADMIN = '/usr/local/sbin/bootstrap_admin_node'
+FUEL_CLIENT_CONFIG = '/etc/fuel/client/config.yaml'
+PLUGINS_DIR = '~/plugins'
+LOCAL_PLUGIN_FOLDER = '/opt/opnfv'
+
class InstallFuelMaster(object):
- def __init__(self, dea_file, dha_file, fuel_ip, fuel_username, fuel_password,
- fuel_node_id, iso_file, work_dir):
+ def __init__(self, dea_file, dha_file, fuel_ip, fuel_username,
+ fuel_password, fuel_node_id, iso_file, work_dir,
+ fuel_plugins_dir):
self.dea_file = dea_file
self.dha = LibvirtAdapter(dha_file)
self.fuel_ip = fuel_ip
@@ -22,7 +39,9 @@ class InstallFuelMaster(object):
self.fuel_password = fuel_password
self.fuel_node_id = fuel_node_id
self.iso_file = iso_file
+ self.iso_dir = os.path.dirname(self.iso_file)
self.work_dir = work_dir
+ self.fuel_plugins_dir = fuel_plugins_dir
self.file_dir = os.path.dirname(os.path.realpath(__file__))
self.ssh = SSHClient(self.fuel_ip, self.fuel_username,
self.fuel_password)
@@ -32,21 +51,16 @@ class InstallFuelMaster(object):
self.dha.node_power_off(self.fuel_node_id)
- self.zero_mbr_set_boot_order()
-
- self.proceed_with_installation()
-
- def custom_install(self):
- log('Start Custom Fuel Installation')
-
- self.dha.node_power_off(self.fuel_node_id)
-
log('Zero the MBR')
self.dha.node_zero_mbr(self.fuel_node_id)
self.dha.node_set_boot_order(self.fuel_node_id, ['disk', 'iso'])
- self.proceed_with_installation()
+ try:
+ self.proceed_with_installation()
+ except Exception as e:
+ self.post_install_cleanup()
+ err(e)
def proceed_with_installation(self):
log('Eject ISO')
@@ -68,7 +82,7 @@ class InstallFuelMaster(object):
log('Let the Fuel deployment continue')
log('Found FUEL menu as PID %s, now killing it' % fuel_menu_pid)
- self.ssh_exec_cmd('kill %s' % fuel_menu_pid)
+ self.ssh_exec_cmd('kill %s' % fuel_menu_pid, False)
log('Wait until installation complete')
self.wait_until_installation_completed()
@@ -76,22 +90,36 @@ class InstallFuelMaster(object):
log('Waiting for one minute for Fuel to stabilize')
time.sleep(60)
- log('Eject ISO')
- self.dha.node_eject_iso(self.fuel_node_id)
+ self.delete_deprecated_fuel_client_config_from_fuel_6_1()
+
+ self.collect_plugin_files()
+
+ self.install_plugins()
+
+ self.post_install_cleanup()
log('Fuel Master installed successfully !')
- def zero_mbr_set_boot_order(self):
- if self.dha.node_can_zero_mbr(self.fuel_node_id):
- log('Fuel Node %s capable of zeroing MBR so doing that...'
- % self.fuel_node_id)
- self.dha.node_zero_mbr(self.fuel_node_id)
- self.dha.node_set_boot_order(self.fuel_node_id, ['disk', 'iso'])
- elif self.dha.node_can_set_boot_order_live(self.fuel_node_id):
- log('Node %s can change ISO boot order live' % self.fuel_node_id)
- self.dha.node_set_boot_order(self.fuel_node_id, ['iso', 'disk'])
- else:
- err('No way to install Fuel node')
+ def collect_plugin_files(self):
+ with self.ssh as s:
+ s.exec_cmd('mkdir %s' % PLUGINS_DIR)
+ if self.fuel_plugins_dir:
+ for f in glob.glob('%s/*.rpm' % self.fuel_plugins_dir):
+ s.scp_put(f, PLUGINS_DIR)
+ else:
+ s.exec_cmd('cp %s/*.rpm %s' % (LOCAL_PLUGIN_FOLDER,
+ PLUGINS_DIR))
+
+ def install_plugins(self):
+ log('Installing Fuel Plugins')
+ with self.ssh as s:
+ r = s.exec_cmd('find %s -type f -name \'*.rpm\'' % PLUGINS_DIR)
+ for f in r.splitlines():
+ log('Found plugin %s, installing ...' % f)
+ r, e = s.exec_cmd('fuel plugins --install %s' % f, False)
+ if e and 'does not update installed package' not in r:
+ raise Exception('Installation of Fuel Plugin %s '
+ 'failed: %s' % (f, e))
def wait_for_node_up(self):
WAIT_LOOP = 60
@@ -103,14 +131,14 @@ class InstallFuelMaster(object):
success = True
break
except Exception as e:
- log('EXCEPTION [%s] received when SSH-ing into Fuel VM %s ... '
- 'sleeping %s seconds' % (e, self.fuel_ip, SLEEP_TIME))
+ log('Trying to SSH into Fuel VM %s ... sleeping %s seconds'
+ % (self.fuel_ip, SLEEP_TIME))
time.sleep(SLEEP_TIME)
finally:
self.ssh.close()
if not success:
- err('Could not SSH into Fuel VM %s' % self.fuel_ip)
+ raise Exception('Could not SSH into Fuel VM %s' % self.fuel_ip)
def wait_until_fuel_menu_up(self):
WAIT_LOOP = 60
@@ -127,39 +155,35 @@ class InstallFuelMaster(object):
else:
break
if not fuel_menu_pid:
- err('Could not find the Fuel Menu Process ID')
+ raise Exception('Could not find the Fuel Menu Process ID')
return fuel_menu_pid
def get_fuel_menu_pid(self, printout, search):
- fuel_menu_pid = None
for line in printout.splitlines():
- if search in line:
- fuel_menu_pid = clean(line)[1]
- break
- return fuel_menu_pid
+ if line.endswith(search):
+ return clean(line)[1]
- def ssh_exec_cmd(self, cmd):
+ def ssh_exec_cmd(self, cmd, check=True):
with self.ssh:
- ret = self.ssh.exec_cmd(cmd)
+ ret = self.ssh.exec_cmd(cmd, check=check)
return ret
def inject_own_astute_yaml(self):
- dest ='~/%s/' % self.work_dir
-
with self.ssh as s:
- s.exec_cmd('rm -rf %s' % self.work_dir, check=False)
- s.exec_cmd('mkdir ~/%s' % self.work_dir)
- s.scp_put(self.dea_file, dest)
- s.scp_put('%s/common.py' % self.file_dir, dest)
- s.scp_put('%s/dea.py' % self.file_dir, dest)
- s.scp_put('%s/transplant_fuel_settings.py' % self.file_dir, dest)
+ s.exec_cmd('rm -rf %s' % self.work_dir, False)
+ s.exec_cmd('mkdir %s' % self.work_dir)
+ s.scp_put(self.dea_file, self.work_dir)
+ s.scp_put('%s/common.py' % self.file_dir, self.work_dir)
+ s.scp_put('%s/dea.py' % self.file_dir, self.work_dir)
+ s.scp_put('%s/transplant_fuel_settings.py'
+ % self.file_dir, self.work_dir)
log('Modifying Fuel astute')
- s.run('python ~/%s/%s ~/%s/%s'
+ s.run('python %s/%s %s/%s'
% (self.work_dir, TRANSPLANT_FUEL_SETTINGS,
self.work_dir, os.path.basename(self.dea_file)))
def wait_until_installation_completed(self):
- WAIT_LOOP = 180
+ WAIT_LOOP = 360
SLEEP_TIME = 10
CMD = 'ps -ef | grep %s | grep -v grep' % BOOTSTRAP_ADMIN
@@ -174,4 +198,21 @@ class InstallFuelMaster(object):
time.sleep(SLEEP_TIME)
if not install_completed:
- err('Fuel installation did not complete')
+ raise Exception('Fuel installation did not complete')
+
+ def post_install_cleanup(self):
+ log('Eject ISO file %s' % self.iso_file)
+ self.dha.node_eject_iso(self.fuel_node_id)
+ log('Remove ISO directory %s' % self.iso_dir)
+ delete(self.iso_dir)
+
+ def delete_deprecated_fuel_client_config_from_fuel_6_1(self):
+ with self.ssh as s:
+ response, error = s.exec_cmd('fuel -v', False)
+ if (error and
+ 'DEPRECATION WARNING' in error and
+ '6.1.0' in error and
+ FUEL_CLIENT_CONFIG in error):
+ log('Delete deprecated fuel client config %s' % FUEL_CLIENT_CONFIG)
+ with self.ssh as s:
+ s.exec_cmd('rm %s' % FUEL_CLIENT_CONFIG, False)
diff --git a/fuel/deploy/libvirt/dha.yaml b/fuel/deploy/libvirt/dha.yaml
deleted file mode 100644
index ce61e534a..000000000
--- a/fuel/deploy/libvirt/dha.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
-title: Deployment Hardware Adapter (DHA)
-# DHA API version supported
-version: 1.1
-created: Sat Apr 25 16:26:22 UTC 2015
-comment: Small libvirt setup
-
-# Adapter to use for this definition
-adapter: libvirt
-
-# Node list.
-# Mandatory fields are id and role.
-# The MAC address of the PXE boot interface is not mandatory
-# to be set, but the field must be present.
-# All other fields are adapter specific.
-
-nodes:
-- id: 1
- pxeMac: 52:54:00:aa:dd:84
- libvirtName: controller1
- libvirtTemplate: controller
- role: controller
-- id: 2
- pxeMac: 52:54:00:aa:dd:84
- libvirtName: controller2
- libvirtTemplate: controller
- role: controller
-- id: 3
- pxeMac: 52:54:00:aa:dd:84
- libvirtName: controller3
- libvirtTemplate: controller
- role: controller
-- id: 4
- pxeMac: 52:54:00:41:64:f3
- libvirtName: compute1
- libvirtTemplate: compute
- role: compute
-- id: 5
- pxeMac: 52:54:00:69:a0:79
- libvirtName: compute2
- libvirtTemplate: compute
- role: compute
-- id: 6
- pxeMac: 52:54:00:69:a0:79
- libvirtName: compute3
- libvirtTemplate: compute
- role: compute
-- id: 7
- pxeMac: 52:54:00:f8:b0:75
- libvirtName: fuel-master
- libvirtTemplate: fuel-master
- isFuel: yes
- nodeCanZeroMBR: yes
- nodeCanSetBootOrderLive: yes
- username: root
- password: r00tme
-
-disks:
- fuel: 30G
- controller: 30G
- compute: 30G
-
-# Deployment power on strategy
-# all: Turn on all nodes at once. There will be no correlation
-# between the DHA and DEA node numbering. MAC addresses
-# will be used to select the node roles though.
-# sequence: Turn on the nodes in sequence starting with the lowest order
-# node and wait for the node to be detected by Fuel. Not until
-# the node has been detected and assigned a role will the next
-# node be turned on.
-powerOnStrategy: all
-
-# If fuelCustomInstall is set to true, Fuel is assumed to be installed by
-# calling the DHA adapter function "dha_fuelCustomInstall()" with two
-# arguments: node ID and the ISO file name to deploy. The custom install
-# function is then to handle all necessary logic to boot the Fuel master
-# from the ISO and then return.
-# Allowed values: true, false
-
-fuelCustomInstall: false
-
diff --git a/fuel/deploy/reap.py b/fuel/deploy/reap.py
new file mode 100644
index 000000000..c72b33cf9
--- /dev/null
+++ b/fuel/deploy/reap.py
@@ -0,0 +1,339 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+
+import common
+import time
+import os
+import yaml
+import glob
+import shutil
+
+N = common.N
+E = common.E
+R = common.R
+ArgParser = common.ArgParser
+exec_cmd = common.exec_cmd
+parse = common.parse
+err = common.err
+log = common.log
+delete = common.delete
+commafy = common.commafy
+
+DEA_1 = '''
+title: Deployment Environment Adapter (DEA)
+# DEA API version supported
+version: 1.1
+created: {date}
+comment: {comment}
+'''
+
+DHA_1 = '''
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: {date}
+comment: {comment}
+
+# Adapter to use for this definition
+# adapter: [ipmi|libvirt]
+adapter:
+
+# Node list.
+# Mandatory properties are id and role.
+# All other properties are adapter specific.
+# For Non-Fuel nodes controlled by:
+# - ipmi adapter you need to provide:
+# pxeMac
+# ipmiIp
+# ipmiUser
+# ipmiPass
+# - libvirt adapter you need to provide:
+# libvirtName: <whatever>
+# libvirtTemplate: [libvirt/vms/controller.xml | libvirt/vms/compute.xml]
+#
+# For the Fuel Node you need to provide:
+# libvirtName: <whatever>
+# libvirtTemplate: libvirt/vms/fuel.xml
+# isFuel: yes
+# username: root
+# password: r00tme
+'''
+
+DHA_2 = '''
+# Adding the Fuel node as node id {node_id}
+# which may not be correct - please adjust as needed.
+'''
+
+DISKS = {'fuel': '30G',
+ 'controller': '30G',
+ 'compute': '30G'}
+
+
+class Reap(object):
+
+ def __init__(self, dea_file, dha_file, comment):
+ self.dea_file = dea_file
+ self.dha_file = dha_file
+ self.comment = comment
+ self.temp_dir = None
+ self.env = None
+ self.env_id = None
+ self.last_node = None
+
+ def get_env(self):
+ env_list = parse(exec_cmd('fuel env'))
+ if len(env_list) > 1:
+ err('Not exactly one environment')
+ self.env = env_list[0]
+ self.env_id = self.env[E['id']]
+
+ def download_config(self, config_type):
+ log('Download %s config for environment %s'
+ % (config_type, self.env_id))
+ exec_cmd('fuel %s --env %s --download --dir %s'
+ % (config_type, self.env_id, self.temp_dir))
+
+ def write(self, file, text, newline=True):
+ mode = 'a' if os.path.isfile(file) else 'w'
+ with open(file, mode) as f:
+ f.write('%s%s' % (text, ('\n' if newline else '')))
+
+ def write_yaml(self, file, data, newline=True):
+ self.write(file, yaml.dump(data, default_flow_style=False).strip(),
+ newline)
+
+ def get_node_by_id(self, node_list, node_id):
+ for node in node_list:
+ if node[N['id']] == node_id:
+ return node
+
+ def reap_interface(self, node_id, interfaces):
+ interface, mac = self.get_interface(node_id)
+ if_name = None
+ if interfaces:
+ if_name = self.check_dict_exists(interfaces, interface)
+ if not if_name:
+ if_name = 'interfaces_%s' % str(len(interfaces) + 1)
+ interfaces[if_name] = interface
+ return if_name, mac
+
+ def reap_transformation(self, node_id, roles, transformations):
+ main_role = 'controller' if 'controller' in roles else 'compute'
+ node_file = glob.glob('%s/deployment_%s/*%s_%s.yaml'
+ % (self.temp_dir, self.env_id,
+ main_role, node_id))
+ tr_name = None
+ with open(node_file[0]) as f:
+ node_config = yaml.load(f)
+ transformation = {'transformations':
+ node_config['network_scheme']['transformations']}
+ if transformations:
+ tr_name = self.check_dict_exists(transformations, transformation)
+ if not tr_name:
+ tr_name = 'transformations_%s' % str(len(transformations) + 1)
+ transformations[tr_name] = transformation
+ return tr_name
+
+ def check_dict_exists(self, main_dict, dict):
+ for key, val in main_dict.iteritems():
+ if cmp(dict, val) == 0:
+ return key
+
+ def reap_nodes_interfaces_transformations(self):
+ node_list = parse(exec_cmd('fuel node'))
+ real_node_ids = [node[N['id']] for node in node_list]
+ real_node_ids.sort()
+ min_node = real_node_ids[0]
+
+ interfaces = {}
+ transformations = {}
+ dea_nodes = []
+ dha_nodes = []
+
+ for real_node_id in real_node_ids:
+ node_id = int(real_node_id) - int(min_node) + 1
+ self.last_node = node_id
+ node = self.get_node_by_id(node_list, real_node_id)
+ roles = commafy(node[N['roles']])
+ if not roles:
+ err('Fuel Node %s has no role' % real_node_id)
+ dea_node = {'id': node_id,
+ 'role': roles}
+ dha_node = {'id': node_id}
+ if_name, mac = self.reap_interface(real_node_id, interfaces)
+ tr_name = self.reap_transformation(real_node_id, roles,
+ transformations)
+ dea_node.update(
+ {'interfaces': if_name,
+ 'transformations': tr_name})
+
+ dha_node.update(
+ {'pxeMac': mac if mac else None,
+ 'ipmiIp': None,
+ 'ipmiUser': None,
+ 'ipmiPass': None,
+ 'libvirtName': None,
+ 'libvirtTemplate': None})
+
+ dea_nodes.append(dea_node)
+ dha_nodes.append(dha_node)
+
+ self.write_yaml(self.dha_file, {'nodes': dha_nodes}, False)
+ self.write_yaml(self.dea_file, {'nodes': dea_nodes})
+ self.write_yaml(self.dea_file, interfaces)
+ self.write_yaml(self.dea_file, transformations)
+ self.reap_fuel_node_info()
+ self.write_yaml(self.dha_file, {'disks': DISKS})
+
+ def reap_fuel_node_info(self):
+ dha_nodes = []
+ dha_node = {
+ 'id': self.last_node + 1,
+ 'libvirtName': None,
+ 'libvirtTemplate': None,
+ 'isFuel': True,
+ 'username': 'root',
+ 'password': 'r00tme'}
+
+ dha_nodes.append(dha_node)
+
+ self.write(self.dha_file, DHA_2.format(node_id=dha_node['id']), False)
+ self.write_yaml(self.dha_file, dha_nodes)
+
+ def reap_environment_info(self):
+ network_file = ('%s/network_%s.yaml'
+ % (self.temp_dir, self.env_id))
+ network = self.read_yaml(network_file)
+ env = {'environment':
+ {'name': self.env[E['name']],
+ 'mode': self.env[E['mode']],
+ 'net_segment_type':
+ network['networking_parameters']['segmentation_type']}}
+ self.write_yaml(self.dea_file, env)
+ wanted_release = None
+ rel_list = parse(exec_cmd('fuel release'))
+ for rel in rel_list:
+ if rel[R['id']] == self.env[E['release_id']]:
+ wanted_release = rel[R['name']]
+ self.write_yaml(self.dea_file, {'wanted_release': wanted_release})
+
+ def reap_fuel_settings(self):
+ data = self.read_yaml('/etc/fuel/astute.yaml')
+ fuel = {}
+ del data['ADMIN_NETWORK']['mac']
+ del data['ADMIN_NETWORK']['interface']
+ for key in ['ADMIN_NETWORK', 'HOSTNAME', 'DNS_DOMAIN', 'DNS_SEARCH',
+ 'DNS_UPSTREAM', 'NTP1', 'NTP2', 'NTP3', 'FUEL_ACCESS']:
+ fuel[key] = data[key]
+ for key in fuel['ADMIN_NETWORK'].keys():
+ if key not in ['ipaddress', 'netmask',
+ 'dhcp_pool_start', 'dhcp_pool_end']:
+ del fuel['ADMIN_NETWORK'][key]
+ self.write_yaml(self.dea_file, {'fuel': fuel})
+
+ def reap_network_settings(self):
+ network_file = ('%s/network_%s.yaml'
+ % (self.temp_dir, self.env_id))
+ data = self.read_yaml(network_file)
+ network = {}
+ network['networking_parameters'] = data['networking_parameters']
+ network['networks'] = data['networks']
+ for net in network['networks']:
+ del net['id']
+ del net['group_id']
+ self.write_yaml(self.dea_file, {'network': network})
+
+ def reap_settings(self):
+ settings_file = '%s/settings_%s.yaml' % (self.temp_dir, self.env_id)
+ settings = self.read_yaml(settings_file)
+ self.write_yaml(self.dea_file, {'settings': settings})
+
+ def get_interface(self, real_node_id):
+ exec_cmd('fuel node --node-id %s --network --download --dir %s'
+ % (real_node_id, self.temp_dir))
+ interface_file = ('%s/node_%s/interfaces.yaml'
+ % (self.temp_dir, real_node_id))
+ interfaces = self.read_yaml(interface_file)
+ interface_config = {}
+ pxe_mac = None
+ for interface in interfaces:
+ networks = []
+ for network in interface['assigned_networks']:
+ networks.append(network['name'])
+ if network['name'] == 'fuelweb_admin':
+ pxe_mac = interface['mac']
+ if networks:
+ interface_config[interface['name']] = networks
+ return interface_config, pxe_mac
+
+ def read_yaml(self, yaml_file):
+ with open(yaml_file) as f:
+ data = yaml.load(f)
+ return data
+
+ def intro(self):
+ delete(self.dea_file)
+ delete(self.dha_file)
+ self.temp_dir = exec_cmd('mktemp -d')
+ date = time.strftime('%c')
+ self.write(self.dea_file,
+ DEA_1.format(date=date, comment=self.comment), False)
+ self.write(self.dha_file,
+ DHA_1.format(date=date, comment=self.comment))
+ self.get_env()
+ self.download_config('deployment')
+ self.download_config('settings')
+ self.download_config('network')
+
+ def finale(self):
+ log('DEA file is available at %s' % self.dea_file)
+ log('DHA file is available at %s (this is just a template)'
+ % self.dha_file)
+ shutil.rmtree(self.temp_dir)
+
+ def reap(self):
+ self.intro()
+ self.reap_environment_info()
+ self.reap_nodes_interfaces_transformations()
+ self.reap_fuel_settings()
+ self.reap_network_settings()
+ self.reap_settings()
+ self.finale()
+
+
+def usage():
+ print '''
+ Usage:
+ python reap.py <dea_file> <dha_file> <comment>
+ '''
+
+
+def parse_arguments():
+ parser = ArgParser(prog='python %s' % __file__)
+ parser.add_argument('dea_file', nargs='?', action='store',
+ default='dea.yaml',
+ help='Deployment Environment Adapter: dea.yaml')
+ parser.add_argument('dha_file', nargs='?', action='store',
+ default='dha.yaml',
+ help='Deployment Hardware Adapter: dha.yaml')
+ parser.add_argument('comment', nargs='?', action='store', help='Comment')
+ args = parser.parse_args()
+ return (args.dea_file, args.dha_file, args.comment)
+
+
+def main():
+ dea_file, dha_file, comment = parse_arguments()
+
+ r = Reap(dea_file, dha_file, comment)
+ r.reap()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/fuel/deploy/setup_environment.py b/fuel/deploy/setup_environment.py
deleted file mode 100644
index 4e0e7ba37..000000000
--- a/fuel/deploy/setup_environment.py
+++ /dev/null
@@ -1,165 +0,0 @@
-import sys
-from lxml import etree
-import os
-import glob
-import common
-
-from dha import DeploymentHardwareAdapter
-
-exec_cmd = common.exec_cmd
-err = common.err
-log = common.log
-check_dir_exists = common.check_dir_exists
-check_file_exists = common.check_file_exists
-check_if_root = common.check_if_root
-
-
-class LibvirtEnvironment(object):
-
- def __init__(self, storage_dir, dha_file):
- self.dha = DeploymentHardwareAdapter(dha_file)
- self.storage_dir = storage_dir
- self.parser = etree.XMLParser(remove_blank_text=True)
- self.file_dir = os.path.dirname(os.path.realpath(__file__))
- self.network_dir = '%s/libvirt/networks' % self.file_dir
- self.vm_dir = '%s/libvirt/vms' % self.file_dir
- self.node_ids = self.dha.get_all_node_ids()
- self.fuel_node_id = self.dha.get_fuel_node_id()
- self.net_names = self.collect_net_names()
-
- def create_storage(self, node_id, disk_path, disk_sizes):
- if node_id == self.fuel_node_id:
- disk_size = disk_sizes['fuel']
- else:
- role = self.dha.get_node_role(node_id)
- disk_size = disk_sizes[role]
- exec_cmd('fallocate -l %s %s' % (disk_size, disk_path))
-
- def create_vms(self):
- temp_dir = exec_cmd('mktemp -d')
- disk_sizes = self.dha.get_disks()
- for node_id in self.node_ids:
- vm_name = self.dha.get_node_property(node_id, 'libvirtName')
- vm_template = self.dha.get_node_property(node_id,
- 'libvirtTemplate')
- disk_path = '%s/%s.raw' % (self.storage_dir, vm_name)
- self.create_storage(node_id, disk_path, disk_sizes)
- self.define_vm(vm_name, vm_template, temp_dir, disk_path)
- exec_cmd('rm -fr %s' % temp_dir)
-
- def define_vm(self, vm_name, vm_template, temp_dir, disk_path):
- log('Creating VM %s with disks %s' % (vm_name, disk_path))
- temp_vm_file = '%s/%s' % (temp_dir, vm_name)
- exec_cmd('cp %s/%s %s' % (self.vm_dir, vm_template, temp_vm_file))
- with open(temp_vm_file) as f:
- vm_xml = etree.parse(f)
- names = vm_xml.xpath('/domain/name')
- for name in names:
- name.text = vm_name
- uuids = vm_xml.xpath('/domain/uuid')
- for uuid in uuids:
- uuid.getparent().remove(uuid)
- disks = vm_xml.xpath('/domain/devices/disk')
- for disk in disks:
- sources = disk.xpath('source')
- for source in sources:
- source.set('file', disk_path)
- with open(temp_vm_file, 'w') as f:
- vm_xml.write(f, pretty_print=True, xml_declaration=True)
- exec_cmd('virsh define %s' % temp_vm_file)
-
- def create_networks(self):
- for net_file in glob.glob('%s/*' % self.network_dir):
- exec_cmd('virsh net-define %s' % net_file)
- for net in self.net_names:
- log('Creating network %s' % net)
- exec_cmd('virsh net-autostart %s' % net)
- exec_cmd('virsh net-start %s' % net)
-
- def delete_networks(self):
- for net in self.net_names:
- log('Deleting network %s' % net)
- exec_cmd('virsh net-destroy %s' % net, False)
- exec_cmd('virsh net-undefine %s' % net, False)
-
- def get_net_name(self, net_file):
- with open(net_file) as f:
- net_xml = etree.parse(f)
- name_list = net_xml.xpath('/network/name')
- for name in name_list:
- net_name = name.text
- return net_name
-
- def collect_net_names(self):
- net_list = []
- for net_file in glob.glob('%s/*' % self.network_dir):
- name = self.get_net_name(net_file)
- net_list.append(name)
- return net_list
-
- def delete_vms(self):
- for node_id in self.node_ids:
- vm_name = self.dha.get_node_property(node_id, 'libvirtName')
- r, c = exec_cmd('virsh dumpxml %s' % vm_name, False)
- if c > 0:
- log(r)
- continue
- self.undefine_vm_delete_disk(r, vm_name)
-
- def undefine_vm_delete_disk(self, printout, vm_name):
- disk_files = []
- xml_dump = etree.fromstring(printout, self.parser)
- disks = xml_dump.xpath('/domain/devices/disk')
- for disk in disks:
- sources = disk.xpath('source')
- for source in sources:
- source_file = source.get('file')
- if source_file:
- disk_files.append(source_file)
- log('Deleting VM %s with disks %s' % (vm_name, disk_files))
- exec_cmd('virsh destroy %s' % vm_name, False)
- exec_cmd('virsh undefine %s' % vm_name, False)
- for file in disk_files:
- exec_cmd('rm -f %s' % file)
-
- def setup_environment(self):
- check_if_root()
- check_dir_exists(self.network_dir)
- check_dir_exists(self.vm_dir)
- self.cleanup_environment()
- self.create_vms()
- self.create_networks()
-
- def cleanup_environment(self):
- self.delete_vms()
- self.delete_networks()
-
-
-def usage():
- print '''
- Usage:
- python setup_environment.py <storage_directory> <dha_file>
-
- Example:
- python setup_environment.py /mnt/images dha.yaml
- '''
-
-def parse_arguments():
- if len(sys.argv) != 3:
- log('Incorrect number of arguments')
- usage()
- sys.exit(1)
- storage_dir = sys.argv[-2]
- dha_file = sys.argv[-1]
- check_dir_exists(storage_dir)
- check_file_exists(dha_file)
- return storage_dir, dha_file
-
-def main():
- storage_dir, dha_file = parse_arguments()
-
- virt = LibvirtEnvironment(storage_dir, dha_file)
- virt.setup_environment()
-
-if __name__ == '__main__':
- main() \ No newline at end of file
diff --git a/fuel/deploy/setup_vfuel.py b/fuel/deploy/setup_vfuel.py
deleted file mode 100644
index 65ee01341..000000000
--- a/fuel/deploy/setup_vfuel.py
+++ /dev/null
@@ -1,143 +0,0 @@
-import sys
-from lxml import etree
-import os
-
-import common
-from dha import DeploymentHardwareAdapter
-
-exec_cmd = common.exec_cmd
-err = common.err
-log = common.log
-check_dir_exists = common.check_dir_exists
-check_file_exists = common.check_file_exists
-check_if_root = common.check_if_root
-
-VFUELNET = '''
-iface vfuelnet inet static
- bridge_ports em1
- address 10.40.0.1
- netmask 255.255.255.0
- pre-down iptables -t nat -D POSTROUTING --out-interface p1p1.20 -j MASQUERADE -m comment --comment "vfuelnet"
- pre-down iptables -D FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet"
- post-up iptables -t nat -A POSTROUTING --out-interface p1p1.20 -j MASQUERADE -m comment --comment "vfuelnet"
- post-up iptables -A FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet"
-'''
-VM_DIR = 'baremetal/vm'
-FUEL_DISK_SIZE = '30G'
-IFACE = 'vfuelnet'
-INTERFACE_CONFIG = '/etc/network/interfaces'
-
-class VFuel(object):
-
- def __init__(self, storage_dir, dha_file):
- self.dha = DeploymentHardwareAdapter(dha_file)
- self.storage_dir = storage_dir
- self.parser = etree.XMLParser(remove_blank_text=True)
- self.fuel_node_id = self.dha.get_fuel_node_id()
- self.file_dir = os.path.dirname(os.path.realpath(__file__))
- self.vm_dir = '%s/%s' % (self.file_dir, VM_DIR)
-
- def setup_environment(self):
- check_if_root()
- check_dir_exists(self.vm_dir)
- self.setup_networking()
- self.delete_vm()
- self.create_vm()
-
- def setup_networking(self):
- with open(INTERFACE_CONFIG) as f:
- data = f.read()
- if VFUELNET not in data:
- log('Appending to file %s:\n %s' % (INTERFACE_CONFIG, VFUELNET))
- with open(INTERFACE_CONFIG, 'a') as f:
- f.write('\n%s\n' % VFUELNET)
- if exec_cmd('ip link show | grep %s' % IFACE):
- log('Bring DOWN interface %s' % IFACE)
- exec_cmd('ifdown %s' % IFACE, False)
- log('Bring UP interface %s' % IFACE)
- exec_cmd('ifup %s' % IFACE, False)
-
- def delete_vm(self):
- vm_name = self.dha.get_node_property(self.fuel_node_id, 'libvirtName')
- r, c = exec_cmd('virsh dumpxml %s' % vm_name, False)
- if c > 0:
- log(r)
- return
- self.undefine_vm_delete_disk(r, vm_name)
-
- def undefine_vm_delete_disk(self, printout, vm_name):
- disk_files = []
- xml_dump = etree.fromstring(printout, self.parser)
- disks = xml_dump.xpath('/domain/devices/disk')
- for disk in disks:
- sources = disk.xpath('source')
- for source in sources:
- source_file = source.get('file')
- if source_file:
- disk_files.append(source_file)
- log('Deleting VM %s with disks %s' % (vm_name, disk_files))
- exec_cmd('virsh destroy %s' % vm_name, False)
- exec_cmd('virsh undefine %s' % vm_name, False)
- for file in disk_files:
- exec_cmd('rm -f %s' % file)
-
- def create_vm(self):
- temp_dir = exec_cmd('mktemp -d')
- vm_name = self.dha.get_node_property(self.fuel_node_id, 'libvirtName')
- vm_template = self.dha.get_node_property(self.fuel_node_id,
- 'libvirtTemplate')
- disk_path = '%s/%s.raw' % (self.storage_dir, vm_name)
- exec_cmd('fallocate -l %s %s' % (FUEL_DISK_SIZE, disk_path))
- self.define_vm(vm_name, vm_template, temp_dir, disk_path)
- exec_cmd('rm -fr %s' % temp_dir)
-
- def define_vm(self, vm_name, vm_template, temp_dir, disk_path):
- log('Creating VM %s with disks %s' % (vm_name, disk_path))
- temp_vm_file = '%s/%s' % (temp_dir, vm_name)
- exec_cmd('cp %s/%s %s' % (self.vm_dir, vm_template, temp_vm_file))
- with open(temp_vm_file) as f:
- vm_xml = etree.parse(f)
- names = vm_xml.xpath('/domain/name')
- for name in names:
- name.text = vm_name
- uuids = vm_xml.xpath('/domain/uuid')
- for uuid in uuids:
- uuid.getparent().remove(uuid)
- disks = vm_xml.xpath('/domain/devices/disk')
- for disk in disks:
- sources = disk.xpath('source')
- for source in sources:
- source.set('file', disk_path)
- with open(temp_vm_file, 'w') as f:
- vm_xml.write(f, pretty_print=True, xml_declaration=True)
- exec_cmd('virsh define %s' % temp_vm_file)
-
-
-def usage():
- print '''
- Usage:
- python setup_vfuel.py <storage_directory> <dha_file>
-
- Example:
- python setup_vfuel.py /mnt/images dha.yaml
- '''
-
-def parse_arguments():
- if len(sys.argv) != 3:
- log('Incorrect number of arguments')
- usage()
- sys.exit(1)
- storage_dir = sys.argv[-2]
- dha_file = sys.argv[-1]
- check_dir_exists(storage_dir)
- check_file_exists(dha_file)
- return storage_dir, dha_file
-
-def main():
- storage_dir, dha_file = parse_arguments()
-
- vfuel = VFuel(storage_dir, dha_file)
- vfuel.setup_environment()
-
-if __name__ == '__main__':
- main()
diff --git a/fuel/deploy/ssh_client.py b/fuel/deploy/ssh_client.py
index 9ea227aea..0f6b8c7ea 100644
--- a/fuel/deploy/ssh_client.py
+++ b/fuel/deploy/ssh_client.py
@@ -1,3 +1,13 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+
import paramiko
import common
import scp
@@ -6,6 +16,7 @@ TIMEOUT = 600
log = common.log
err = common.err
+
class SSHClient(object):
def __init__(self, host, username, password):
@@ -18,7 +29,8 @@ class SSHClient(object):
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.client.connect(self.host, username=self.username,
- password=self.password, timeout=timeout)
+ password=self.password, look_for_keys=False,
+ timeout=timeout)
def close(self):
if self.client is not None:
@@ -32,7 +44,7 @@ class SSHClient(object):
def __exit__(self, type, value, traceback):
self.close()
- def exec_cmd(self, command, sudo=False, timeout=TIMEOUT, check=True):
+ def exec_cmd(self, command, check=True, sudo=False, timeout=TIMEOUT):
if sudo and self.username != 'root':
command = "sudo -S -p '' %s" % command
stdin, stdout, stderr = self.client.exec_command(command,
@@ -60,16 +72,15 @@ class SSHClient(object):
if chan.recv_ready():
data = chan.recv(1024)
while data:
- print data
+ log(data.strip())
data = chan.recv(1024)
if chan.recv_stderr_ready():
error_buff = chan.recv_stderr(1024)
while error_buff:
- print error_buff
+ log(error_buff.strip())
error_buff = chan.recv_stderr(1024)
- exit_status = chan.recv_exit_status()
- log('Exit status %s' % exit_status)
+ return chan.recv_exit_status()
def scp_get(self, remote, local='.', dir=False):
try:
diff --git a/fuel/deploy/templates/hardware_environment/conf/ericsson_montreal_lab/dea.yaml b/fuel/deploy/templates/hardware_environment/conf/ericsson_montreal_lab/dea.yaml
new file mode 100644
index 000000000..23b2809ae
--- /dev/null
+++ b/fuel/deploy/templates/hardware_environment/conf/ericsson_montreal_lab/dea.yaml
@@ -0,0 +1,844 @@
+title: Deployment Environment Adapter (DEA)
+# DEA API version supported
+version:
+created:
+comment: Config for Ericsson Montreal Lab - HA deployment with Ceph and Opendaylight
+environment:
+ name: opnfv
+ mode: ha
+ net_segment_type: gre
+wanted_release: Juno on Ubuntu 14.04.1
+nodes:
+- id: 1
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
+- id: 2
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
+- id: 3
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
+- id: 4
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+- id: 5
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+- id: 6
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+fuel:
+ ADMIN_NETWORK:
+ ipaddress: 10.20.0.2
+ netmask: 255.255.0.0
+ dhcp_pool_start: 10.20.0.3
+ dhcp_pool_end: 10.20.0.254
+ DNS_UPSTREAM: 10.118.32.193
+ DNS_DOMAIN: opnfvericsson.ca
+ DNS_SEARCH: opnfvericsson.ca
+ FUEL_ACCESS:
+ user: admin
+ password: admin
+ HOSTNAME: opnfv
+ NTP1: 10.118.34.219
+ NTP2:
+ NTP3:
+interfaces_1:
+ eth0:
+ - fuelweb_admin
+ eth2:
+ - public
+ - management
+ - storage
+ - private
+transformations_1:
+ transformations:
+ - action: add-br
+ name: br-fw-admin
+ - action: add-br
+ name: br-mgmt
+ - action: add-br
+ name: br-storage
+ - action: add-br
+ name: br-ex
+ - action: add-br
+ name: br-floating
+ provider: ovs
+ - action: add-patch
+ bridges:
+ - br-floating
+ - br-ex
+ mtu: 65000
+ provider: ovs
+ - action: add-br
+ name: br-mesh
+ - action: add-port
+ bridge: br-fw-admin
+ name: eth0
+ - action: add-port
+ bridge: br-mgmt
+ name: eth2.320
+ - action: add-port
+ bridge: br-storage
+ name: eth2.220
+ - action: add-port
+ bridge: br-mesh
+ name: eth2.20
+ - action: add-port
+ bridge: br-ex
+ name: eth0
+transformations_2:
+ transformations:
+ - action: add-br
+ name: br-fw-admin
+ - action: add-br
+ name: br-mgmt
+ - action: add-br
+ name: br-storage
+ - action: add-br
+ name: br-mesh
+ - action: add-port
+ bridge: br-fw-admin
+ name: eth0
+ - action: add-port
+ bridge: br-mgmt
+ name: eth2.320
+ - action: add-port
+ bridge: br-storage
+ name: eth2.220
+ - action: add-port
+ bridge: br-mesh
+ name: eth2.20
+network:
+ management_vip: 192.168.0.2
+ management_vrouter_vip: 192.168.0.3
+ networking_parameters:
+ base_mac: fa:16:3e:00:00:00
+ dns_nameservers:
+ - 10.118.32.193
+ floating_ranges:
+ - - 10.118.34.226
+ - 10.118.34.230
+ gre_id_range:
+ - 2
+ - 65535
+ internal_cidr: 192.168.111.0/24
+ internal_gateway: 192.168.111.1
+ net_l23_provider: ovs
+ segmentation_type: gre
+ vlan_range:
+ - 2022
+ - 2023
+ networks:
+ - cidr: 10.118.34.192/24
+ gateway: 10.118.34.193
+ ip_ranges:
+ - - 10.118.34.220
+ - 10.118.34.225
+ meta:
+ cidr: 172.16.0.0/24
+ configurable: true
+ floating_range_var: floating_ranges
+ ip_range:
+ - 172.16.0.2
+ - 172.16.0.126
+ map_priority: 1
+ name: public
+ notation: ip_ranges
+ render_addr_mask: public
+ render_type: null
+ use_gateway: true
+ vips:
+ - haproxy
+ - vrouter
+ vlan_start: null
+ name: public
+ vlan_start: null
+ - cidr: 192.168.2.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.2.2
+ - 192.168.2.254
+ meta:
+ assign_vip: 192.168.2.0/24
+ configurable: true
+ map_priority: 2
+ name: private
+ notation: cidr
+ render_addr_mask: private
+ render_type: cidr
+ seg_type: gre
+ use_gateway: false
+ vlan_start: 103
+ name: private
+ vlan_start: 20
+ - cidr: 192.168.0.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.0.1
+ - 192.168.0.254
+ meta:
+ cidr: 192.168.0.0/24
+ configurable: true
+ map_priority: 2
+ name: management
+ notation: cidr
+ render_addr_mask: internal
+ render_type: cidr
+ use_gateway: false
+ vips:
+ - haproxy
+ - vrouter
+ vlan_start: 101
+ name: management
+ vlan_start: 320
+ - cidr: 192.168.1.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.1.2
+ - 192.168.1.254
+ meta:
+ cidr: 192.168.1.0/24
+ configurable: true
+ map_priority: 2
+ name: storage
+ notation: cidr
+ render_addr_mask: storage
+ render_type: cidr
+ use_gateway: false
+ vlan_start: 102
+ name: storage
+ vlan_start: 220
+ - cidr: 10.20.0.0/16
+ gateway: 10.20.0.2
+ ip_ranges:
+ - - 10.20.0.3
+ - 10.20.0.254
+ meta:
+ configurable: false
+ map_priority: 0
+ notation: ip_ranges
+ render_addr_mask: null
+ render_type: null
+ unmovable: true
+ use_gateway: true
+ name: fuelweb_admin
+ vlan_start: null
+ public_vip: 10.118.34.220
+ public_vrouter_vip: 10.118.34.221
+settings:
+ editable:
+ access:
+ email:
+ description: Email address for Administrator
+ label: Email
+ regex:
+ error: Invalid email
+ source: ^\S+@\S+$
+ type: text
+ value: admin@localhost
+ weight: 40
+ metadata:
+ label: Access
+ weight: 10
+ password:
+ description: Password for Administrator
+ label: Password
+ regex:
+ error: Empty password
+ source: \S
+ type: password
+ value: admin
+ weight: 20
+ tenant:
+ description: Tenant (project) name for Administrator
+ label: Tenant
+ regex:
+ error: Invalid tenant name
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$)(?!.*
+ +.*$).+
+ type: text
+ value: admin
+ weight: 30
+ user:
+ description: Username for Administrator
+ label: Username
+ regex:
+ error: Invalid username
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$)(?!.*
+ +.*$).+
+ type: text
+ value: admin
+ weight: 10
+ additional_components:
+ ceilometer:
+ description: If selected, Ceilometer component will be installed
+ label: Install Ceilometer
+ type: checkbox
+ value: false
+ weight: 40
+ heat:
+ description: ''
+ label: ''
+ type: hidden
+ value: true
+ weight: 30
+ metadata:
+ label: Additional Components
+ weight: 20
+ mongo:
+ description: If selected, You can use external Mongo DB as ceilometer backend
+ label: Use external Mongo DB
+ restrictions:
+ - settings:additional_components.ceilometer.value == false
+ type: checkbox
+ value: false
+ weight: 40
+ murano:
+ description: If selected, Murano component will be installed
+ label: Install Murano
+ restrictions:
+ - cluster:net_provider != 'neutron'
+ type: checkbox
+ value: false
+ weight: 20
+ sahara:
+ description: If selected, Sahara component will be installed
+ label: Install Sahara
+ type: checkbox
+ value: false
+ weight: 10
+ common:
+ auth_key:
+ description: Public key(s) to include in authorized_keys on deployed nodes
+ label: Public Key
+ type: textarea
+ value: ''
+ weight: 70
+ auto_assign_floating_ip:
+ description: If selected, OpenStack will automatically assign a floating IP
+ to a new instance
+ label: Auto assign floating IP
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider == 'neutron'
+ type: checkbox
+ value: false
+ weight: 40
+ debug:
+ description: Debug logging mode provides more information, but requires more
+ disk space.
+ label: OpenStack debug logging
+ type: checkbox
+ value: false
+ weight: 20
+ libvirt_type:
+ label: Hypervisor type
+ type: radio
+ value: kvm
+ values:
+ - data: kvm
+ description: Choose this type of hypervisor if you run OpenStack on hardware
+ label: KVM
+ - data: qemu
+ description: Choose this type of hypervisor if you run OpenStack on virtual
+ hosts.
+ label: QEMU
+ weight: 30
+ metadata:
+ label: Common
+ weight: 30
+ nova_quota:
+ description: Quotas are used to limit CPU and memory usage for tenants. Enabling
+ quotas will increase load on the Nova database.
+ label: Nova quotas
+ type: checkbox
+ value: false
+ weight: 25
+ puppet_debug:
+ description: Debug puppet logging mode provides more information, but requires
+ more disk space.
+ label: Puppet debug logging
+ type: checkbox
+ value: true
+ weight: 20
+ resume_guests_state_on_host_boot:
+ description: Whether to resume previous guests state when the host reboots.
+ If enabled, this option causes guests assigned to the host to resume their
+ previous state. If the guest was running a restart will be attempted when
+ nova-compute starts. If the guest was not running previously, a restart will
+ not be attempted.
+ label: Resume guests state on host boot
+ type: checkbox
+ value: true
+ weight: 60
+ use_cow_images:
+ description: For most cases you will want qcow format. If it's disabled, raw
+ image format will be used to run VMs. OpenStack with raw format currently
+ does not support snapshotting.
+ label: Use qcow format for images
+ type: checkbox
+ value: true
+ weight: 50
+ use_vcenter:
+ type: hidden
+ value: false
+ weight: 30
+ corosync:
+ group:
+ description: ''
+ label: Group
+ type: text
+ value: 226.94.1.1
+ weight: 10
+ metadata:
+ label: Corosync
+ restrictions:
+ - action: hide
+ condition: 'true'
+ weight: 50
+ port:
+ description: ''
+ label: Port
+ type: text
+ value: '12000'
+ weight: 20
+ verified:
+ description: Set True only if multicast is configured correctly on router.
+ label: Need to pass network verification.
+ type: checkbox
+ value: false
+ weight: 10
+ external_dns:
+ dns_list:
+ description: List of upstream DNS servers, separated by comma
+ label: DNS list
+ regex:
+ error: Invalid IP address list
+ source: ^\*$|^(?:\d|1?\d\d|2[0-4]\d|25[0-5])(?:\.(?:\d|1?\d\d|2[0-4]\d|25[0-5])){3}(?:\s*,\s*(?:\d|1?\d\d|2[0-4]\d|25[0-5])(?:\.(?:\d|1?\d\d|2[0-4]\d|25[0-5])){3})*$
+ type: text
+ value: 10.118.32.193
+ weight: 10
+ metadata:
+ label: Host OS DNS Servers
+ weight: 90
+ external_mongo:
+ hosts_ip:
+ description: IP Addresses of MongoDB. Use comma to split IPs
+ label: MongoDB hosts IP
+ regex:
+ error: Invalid hosts ip sequence
+ source: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?),)*((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$
+ type: text
+ value: ''
+ weight: 30
+ metadata:
+ label: External MongoDB
+ restrictions:
+ - action: hide
+ condition: settings:additional_components.mongo.value == false
+ weight: 20
+ mongo_db_name:
+ description: Mongo database name
+ label: Database name
+ regex:
+ error: Invalid database name
+ source: ^\w+$
+ type: text
+ value: ceilometer
+ weight: 30
+ mongo_password:
+ description: Mongo database password
+ label: Password
+ regex:
+ error: Password contains spaces
+ source: ^\S*$
+ type: password
+ value: ceilometer
+ weight: 30
+ mongo_replset:
+ description: Name for Mongo replication set
+ label: Replset
+ type: text
+ value: ''
+ weight: 30
+ mongo_user:
+ description: Mongo database username
+ label: Username
+ regex:
+ error: Empty username
+ source: ^\w+$
+ type: text
+ value: ceilometer
+ weight: 30
+ external_ntp:
+ metadata:
+ label: Host OS NTP Servers
+ weight: 100
+ ntp_list:
+ description: List of upstream NTP servers, separated by comma
+ label: NTP server list
+ regex:
+ error: Invalid NTP server list
+ source: ^\s*(?:(?:\w+(?:-+\w+)*\.)+[a-z]+|\d{1,3}(?:\.\d{1,3}){3})\s*(?:,\s*(?:(?:\w+(?:-+\w+)*\.)+[a-z]+|\d{1,3}(\.\d{1,3}){3})\s*)*$
+ type: text
+ value: 10.118.34.219
+ weight: 10
+ kernel_params:
+ kernel:
+ description: Default kernel parameters
+ label: Initial parameters
+ type: text
+ value: console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90
+ nomodeset
+ weight: 45
+ metadata:
+ label: Kernel parameters
+ weight: 40
+ murano_settings:
+ metadata:
+ label: Murano Settings
+ restrictions:
+ - action: hide
+ condition: settings:additional_components.murano.value == false
+ weight: 20
+ murano_repo_url:
+ description: ''
+ label: Murano Repository URL
+ type: text
+ value: http://storage.apps.openstack.org/
+ weight: 10
+ neutron_mellanox:
+ metadata:
+ enabled: true
+ label: Mellanox Neutron components
+ restrictions:
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
+ toggleable: false
+ weight: 50
+ plugin:
+ label: Mellanox drivers and SR-IOV plugin
+ type: radio
+ value: disabled
+ values:
+ - data: disabled
+ description: If selected, Mellanox drivers, Neutron and Cinder plugin will
+ not be installed.
+ label: Mellanox drivers and plugins disabled
+ restrictions:
+ - settings:storage.iser.value == true
+ - data: drivers_only
+ description: If selected, Mellanox Ethernet drivers will be installed to support
+ networking over Mellanox NIC. Mellanox Neutron plugin will not be installed.
+ label: Install only Mellanox drivers
+ restrictions:
+ - settings:common.libvirt_type.value != 'kvm'
+ - data: ethernet
+ description: If selected, both Mellanox Ethernet drivers and Mellanox network
+ acceleration (Neutron) plugin will be installed.
+ label: Install Mellanox drivers and SR-IOV plugin
+ restrictions:
+ - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
+ == 'neutron' and networking_parameters:segmentation_type == 'vlan')
+ weight: 60
+ vf_num:
+ description: Note that one virtual function will be reserved to the storage
+ network, in case of choosing iSER.
+ label: Number of virtual NICs
+ restrictions:
+ - settings:neutron_mellanox.plugin.value != 'ethernet'
+ type: text
+ value: '16'
+ weight: 70
+ opendaylight:
+ metadata:
+ enabled: true
+ label: OpenDaylight plugin
+ plugin_id: 1
+ restrictions:
+ - cluster:net_provider != 'neutron': Only neutron is supported by OpenDaylight
+ toggleable: true
+ weight: 70
+ rest_api_port:
+ description: Port on which ODL REST API will be available.
+ label: Port number
+ regex:
+ error: Invalid port number
+ source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: text
+ value: '8282'
+ weight: 40
+ use_vxlan:
+ description: Configure neutron to use VXLAN tunneling
+ label: Use vxlan
+ restrictions:
+ - action: disable
+ condition: networking_parameters:segmentation_type == 'vlan'
+ message: Neutron with GRE segmentation required
+ type: checkbox
+ value: true
+ weight: 20
+ vni_range_end:
+ description: VXLAN VNI IDs range end
+ label: VNI range end
+ regex:
+ error: Invalid ID number
+ source: ^\d+$
+ restrictions:
+ - action: hide
+ condition: networking_parameters:segmentation_type == 'vlan'
+ type: text
+ value: '10000'
+ weight: 31
+ vni_range_start:
+ description: VXLAN VNI IDs range start
+ label: VNI range start
+ regex:
+ error: Invalid ID number
+ source: ^\d+$
+ restrictions:
+ - action: hide
+ condition: networking_parameters:segmentation_type == 'vlan'
+ type: text
+ value: '10'
+ weight: 30
+ provision:
+ metadata:
+ label: Provision
+ weight: 80
+ method:
+ description: Which provision method to use for this cluster.
+ label: Provision method
+ type: radio
+ value: image
+ values:
+ - data: image
+ description: Copying pre-built images on a disk.
+ label: Image
+ - data: cobbler
+ description: Install from scratch using anaconda or debian-installer.
+ label: (DEPRECATED) Classic (use anaconda or debian-installer)
+ public_network_assignment:
+ assign_to_all_nodes:
+ description: When disabled, public network will be assigned to controllers only
+ label: Assign public network to all nodes
+ type: checkbox
+ value: false
+ weight: 10
+ metadata:
+ label: Public network assignment
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'neutron'
+ weight: 50
+ repo_setup:
+ metadata:
+ always_editable: true
+ label: Repositories
+ weight: 50
+ repos:
+ description: 'Please note: the first repository will be considered the operating
+ system mirror that will be used during node provisioning.
+
+ To create a local repository mirror on the Fuel master node, please follow
+ the instructions provided by running "fuel-createmirror --help" on the Fuel
+ master node.
+
+ Please make sure your Fuel master node has Internet access to the repository
+ before attempting to create a mirror.
+
+ For more details, please refer to the documentation (https://docs.mirantis.com/openstack/fuel/fuel-6.1/operations.html#external-ubuntu-ops).
+
+ '
+ extra_priority: null
+ type: custom_repo_configuration
+ value:
+ - name: ubuntu
+ priority: null
+ section: main
+ suite: trusty
+ type: deb
+ uri: http://10.20.0.2:8080/ubuntu-part
+ - name: ubuntu-updates
+ priority: null
+ section: main
+ suite: trusty
+ type: deb
+ uri: http://10.20.0.2:8080/ubuntu-part
+ - name: ubuntu-security
+ priority: null
+ section: main
+ suite: trusty
+ type: deb
+ uri: http://10.20.0.2:8080/ubuntu-part
+ - name: mos
+ priority: 1050
+ section: main restricted
+ suite: mos6.1
+ type: deb
+ uri: http://10.20.0.2:8080/2014.2.2-6.1/ubuntu/x86_64
+ - name: mos-updates
+ priority: 1050
+ section: main restricted
+ suite: mos6.1-updates
+ type: deb
+ uri: http://10.20.0.2:8080/mos-ubuntu
+ - name: mos-security
+ priority: 1050
+ section: main restricted
+ suite: mos6.1-security
+ type: deb
+ uri: http://10.20.0.2:8080/mos-ubuntu
+ - name: mos-holdback
+ priority: 1100
+ section: main restricted
+ suite: mos6.1-holdback
+ type: deb
+ uri: http://10.20.0.2:8080/mos-ubuntu
+ - name: Auxiliary
+ priority: 1150
+ section: main restricted
+ suite: auxiliary
+ type: deb
+ uri: http://10.20.0.2:8080/2014.2.2-6.1/ubuntu/auxiliary
+ storage:
+ ephemeral_ceph:
+ description: Configures Nova to store ephemeral volumes in RBD. This works best
+ if Ceph is enabled for volumes and images, too. Enables live migration of
+ all types of Ceph backed VMs (without this option, live migration will only
+ work with VMs launched from Cinder volumes).
+ label: Ceph RBD for ephemeral volumes (Nova)
+ type: checkbox
+ value: true
+ weight: 75
+ images_ceph:
+ description: Configures Glance to use the Ceph RBD backend to store images.
+ If enabled, this option will prevent Swift from installing.
+ label: Ceph RBD for images (Glance)
+ restrictions:
+ - settings:storage.images_vcenter.value == true: Only one Glance backend could
+ be selected.
+ type: checkbox
+ value: true
+ weight: 30
+ images_vcenter:
+ description: Configures Glance to use the vCenter/ESXi backend to store images.
+ If enabled, this option will prevent Swift from installing.
+ label: VMWare vCenter/ESXi datastore for images (Glance)
+ restrictions:
+ - action: hide
+ condition: settings:common.use_vcenter.value != true
+ - condition: settings:storage.images_ceph.value == true
+ message: Only one Glance backend could be selected.
+ type: checkbox
+ value: false
+ weight: 35
+ iser:
+ description: 'High performance block storage: Cinder volumes over iSER protocol
+ (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC, and
+ will use a dedicated virtual function for the storage network.'
+ label: iSER protocol for volumes (Cinder)
+ restrictions:
+ - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
+ != 'kvm'
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
+ type: checkbox
+ value: false
+ weight: 11
+ metadata:
+ label: Storage
+ weight: 60
+ objects_ceph:
+ description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
+ Swift API Interfaces. If enabled, this option will prevent Swift from installing.
+ label: Ceph RadosGW for objects (Swift API)
+ restrictions:
+ - settings:storage.images_ceph.value == false
+ type: checkbox
+ value: false
+ weight: 80
+ osd_pool_size:
+ description: Configures the default number of object replicas in Ceph. This
+ number must be equal to or lower than the number of deployed 'Storage - Ceph
+ OSD' nodes.
+ label: Ceph object replication factor
+ regex:
+ error: Invalid number
+ source: ^[1-9]\d*$
+ type: text
+ value: '2'
+ weight: 85
+ volumes_ceph:
+ description: Configures Cinder to store volumes in Ceph RBD images.
+ label: Ceph RBD for volumes (Cinder)
+ restrictions:
+ - settings:storage.volumes_lvm.value == true
+ type: checkbox
+ value: true
+ weight: 20
+ volumes_lvm:
+ description: It is recommended to have at least one Storage - Cinder LVM node.
+ label: Cinder LVM over iSCSI for volumes
+ restrictions:
+ - settings:storage.volumes_ceph.value == true
+ type: checkbox
+ value: false
+ weight: 10
+ syslog:
+ metadata:
+ label: Syslog
+ weight: 50
+ syslog_port:
+ description: Remote syslog port
+ label: Port
+ regex:
+ error: Invalid Syslog port
+ source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: text
+ value: '514'
+ weight: 20
+ syslog_server:
+ description: Remote syslog hostname
+ label: Hostname
+ type: text
+ value: ''
+ weight: 10
+ syslog_transport:
+ label: Syslog transport protocol
+ type: radio
+ value: tcp
+ values:
+ - data: udp
+ description: ''
+ label: UDP
+ - data: tcp
+ description: ''
+ label: TCP
+ weight: 30
+ workloads_collector:
+ enabled:
+ type: hidden
+ value: true
+ metadata:
+ label: Workloads Collector User
+ restrictions:
+ - action: hide
+ condition: 'true'
+ weight: 10
+ password:
+ type: password
+ value: pBkLbu1k
+ tenant:
+ type: text
+ value: services
+ user:
+ type: text
+ value: fuel_stats_user
diff --git a/fuel/deploy/templates/hardware_environment/conf/ericsson_montreal_lab/dha.yaml b/fuel/deploy/templates/hardware_environment/conf/ericsson_montreal_lab/dha.yaml
new file mode 100644
index 000000000..ca446f680
--- /dev/null
+++ b/fuel/deploy/templates/hardware_environment/conf/ericsson_montreal_lab/dha.yaml
@@ -0,0 +1,54 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version:
+created:
+comment: Config for Ericsson Montreal Lab
+
+# Adapter to use for this definition
+adapter: hp
+
+# Node list.
+# Mandatory property is id, all other properties are adapter specific.
+
+nodes:
+- id: 1
+ pxeMac: 14:58:D0:54:7A:D8
+ ipmiIp: 10.118.32.198
+ ipmiUser: <username>
+ ipmiPass: <password>
+- id: 2
+ pxeMac: 14:58:D0:55:E2:E0
+ ipmiIp: 10.118.32.202
+ ipmiUser: <username>
+ ipmiPass: <password>
+- id: 3
+ pxeMac: 9C:B6:54:8A:25:C0
+ ipmiIp: 10.118.32.213
+ ipmiUser: <username>
+ ipmiPass: <password>
+- id: 4
+ pxeMac: 14:58:D0:54:28:80
+ ipmiIp: 10.118.32.201
+ ipmiUser: <username>
+ ipmiPass: <password>
+- id: 5
+ pxeMac: 14:58:D0:54:E7:88
+ ipmiIp: 10.118.32.203
+ ipmiUser: <username>
+ ipmiPass: <password>
+- id: 6
+ pxeMac: 14:58:D0:54:7A:28
+ ipmiIp: 10.118.32.205
+ ipmiUser: <username>
+ ipmiPass: <password>
+# Adding the Fuel node as node id 7 which may not be correct - please
+# adjust as needed.
+- id: 7
+ libvirtName: fuel-opnfv
+ libvirtTemplate: templates/hardware_environment/vms/fuel.xml
+ isFuel: yes
+ username: root
+ password: r00tme
+
+disks:
+ fuel: 50G \ No newline at end of file
diff --git a/fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod1/dea.yaml b/fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod1/dea.yaml
new file mode 100644
index 000000000..db29fe977
--- /dev/null
+++ b/fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod1/dea.yaml
@@ -0,0 +1,841 @@
+title: Deployment Environment Adapter (DEA)
+# DEA API version supported
+version:
+created:
+comment: Config for LF POD1 - HA deployment with Ceph and Opendaylight
+environment:
+ name: opnfv
+ mode: ha
+ net_segment_type: gre
+wanted_release: Juno on Ubuntu 14.04.1
+nodes:
+- id: 1
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
+- id: 2
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
+- id: 3
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
+- id: 4
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+- id: 5
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+fuel:
+ ADMIN_NETWORK:
+ ipaddress: 10.20.0.2
+ netmask: 255.255.0.0
+ dhcp_pool_start: 10.20.0.3
+ dhcp_pool_end: 10.20.0.254
+ DNS_UPSTREAM: 8.8.8.8
+ DNS_DOMAIN: domain.tld
+ DNS_SEARCH: domain.tld
+ FUEL_ACCESS:
+ user: admin
+ password: admin
+ HOSTNAME: opnfv
+ NTP1: 0.pool.ntp.org
+ NTP2: 1.pool.ntp.org
+ NTP3: 2.pool.ntp.org
+interfaces_1:
+ eth0:
+ - public
+ eth1:
+ - fuelweb_admin
+ - management
+ - storage
+ - private
+transformations_1:
+ transformations:
+ - action: add-br
+ name: br-fw-admin
+ - action: add-br
+ name: br-mgmt
+ - action: add-br
+ name: br-storage
+ - action: add-br
+ name: br-ex
+ - action: add-br
+ name: br-floating
+ provider: ovs
+ - action: add-patch
+ bridges:
+ - br-floating
+ - br-ex
+ mtu: 65000
+ provider: ovs
+ - action: add-br
+ name: br-mesh
+ - action: add-port
+ bridge: br-fw-admin
+ name: eth0
+ - action: add-port
+ bridge: br-mgmt
+ name: eth1.300
+ - action: add-port
+ bridge: br-storage
+ name: eth1.301
+ - action: add-port
+ bridge: br-mesh
+ name: eth1.302
+ - action: add-port
+ bridge: br-ex
+ name: eth0
+transformations_2:
+ transformations:
+ - action: add-br
+ name: br-fw-admin
+ - action: add-br
+ name: br-mgmt
+ - action: add-br
+ name: br-storage
+ - action: add-br
+ name: br-mesh
+ - action: add-port
+ bridge: br-fw-admin
+ name: eth0
+ - action: add-port
+ bridge: br-mgmt
+ name: eth1.300
+ - action: add-port
+ bridge: br-storage
+ name: eth1.301
+ - action: add-port
+ bridge: br-mesh
+ name: eth1.302
+network:
+ management_vip: 192.168.0.2
+ management_vrouter_vip: 192.168.0.3
+ networking_parameters:
+ base_mac: fa:16:3e:00:00:00
+ dns_nameservers:
+ - 8.8.4.4
+ - 8.8.8.8
+ floating_ranges:
+ - - 172.30.9.160
+ - 172.30.9.254
+ gre_id_range:
+ - 2
+ - 65535
+ internal_cidr: 192.168.111.0/24
+ internal_gateway: 192.168.111.1
+ net_l23_provider: ovs
+ segmentation_type: gre
+ vlan_range:
+ - 1000
+ - 1030
+ networks:
+ - cidr: 172.30.9.0/24
+ gateway: 172.30.9.1
+ ip_ranges:
+ - - 172.30.9.64
+ - 172.30.9.159
+ meta:
+ cidr: 172.16.0.0/24
+ configurable: true
+ floating_range_var: floating_ranges
+ ip_range:
+ - 172.16.0.2
+ - 172.16.0.126
+ map_priority: 1
+ name: public
+ notation: ip_ranges
+ render_addr_mask: public
+ render_type: null
+ use_gateway: true
+ vips:
+ - haproxy
+ - vrouter
+ vlan_start: null
+ name: public
+ vlan_start: null
+ - cidr: 192.168.2.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.2.2
+ - 192.168.2.254
+ meta:
+ assign_vip: 192.168.2.0/24
+ configurable: true
+ map_priority: 2
+ name: private
+ notation: cidr
+ render_addr_mask: private
+ render_type: cidr
+ seg_type: gre
+ use_gateway: false
+ vlan_start: 103
+ name: private
+ vlan_start: 302
+ - cidr: 192.168.0.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.0.2
+ - 192.168.0.254
+ meta:
+ cidr: 192.168.0.0/24
+ configurable: true
+ map_priority: 2
+ name: management
+ notation: cidr
+ render_addr_mask: internal
+ render_type: cidr
+ use_gateway: false
+ vips:
+ - haproxy
+ - vrouter
+ vlan_start: 101
+ name: management
+ vlan_start: 300
+ - cidr: 192.168.1.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.1.2
+ - 192.168.1.254
+ meta:
+ cidr: 192.168.1.0/24
+ configurable: true
+ map_priority: 2
+ name: storage
+ notation: cidr
+ render_addr_mask: storage
+ render_type: cidr
+ use_gateway: false
+ vlan_start: 102
+ name: storage
+ vlan_start: 301
+ - cidr: 10.20.0.0/16
+ gateway: 10.20.0.2
+ ip_ranges:
+ - - 10.20.0.3
+ - 10.20.0.254
+ meta:
+ configurable: false
+ map_priority: 0
+ notation: ip_ranges
+ render_addr_mask: null
+ render_type: null
+ unmovable: true
+ use_gateway: true
+ name: fuelweb_admin
+ vlan_start: null
+ public_vip: 172.30.9.64
+ public_vrouter_vip: 172.30.9.65
+settings:
+ editable:
+ access:
+ email:
+ description: Email address for Administrator
+ label: Email
+ regex:
+ error: Invalid email
+ source: ^\S+@\S+$
+ type: text
+ value: admin@localhost
+ weight: 40
+ metadata:
+ label: Access
+ weight: 10
+ password:
+ description: Password for Administrator
+ label: Password
+ regex:
+ error: Empty password
+ source: \S
+ type: password
+ value: admin
+ weight: 20
+ tenant:
+ description: Tenant (project) name for Administrator
+ label: Tenant
+ regex:
+ error: Invalid tenant name
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$)(?!.*
+ +.*$).+
+ type: text
+ value: admin
+ weight: 30
+ user:
+ description: Username for Administrator
+ label: Username
+ regex:
+ error: Invalid username
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$)(?!.*
+ +.*$).+
+ type: text
+ value: admin
+ weight: 10
+ additional_components:
+ ceilometer:
+ description: If selected, Ceilometer component will be installed
+ label: Install Ceilometer
+ type: checkbox
+ value: false
+ weight: 40
+ heat:
+ description: ''
+ label: ''
+ type: hidden
+ value: true
+ weight: 30
+ metadata:
+ label: Additional Components
+ weight: 20
+ mongo:
+ description: If selected, You can use external Mongo DB as ceilometer backend
+ label: Use external Mongo DB
+ restrictions:
+ - settings:additional_components.ceilometer.value == false
+ type: checkbox
+ value: false
+ weight: 40
+ murano:
+ description: If selected, Murano component will be installed
+ label: Install Murano
+ restrictions:
+ - cluster:net_provider != 'neutron'
+ type: checkbox
+ value: false
+ weight: 20
+ sahara:
+ description: If selected, Sahara component will be installed
+ label: Install Sahara
+ type: checkbox
+ value: false
+ weight: 10
+ common:
+ auth_key:
+ description: Public key(s) to include in authorized_keys on deployed nodes
+ label: Public Key
+ type: textarea
+ value: ''
+ weight: 70
+ auto_assign_floating_ip:
+ description: If selected, OpenStack will automatically assign a floating IP
+ to a new instance
+ label: Auto assign floating IP
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider == 'neutron'
+ type: checkbox
+ value: false
+ weight: 40
+ debug:
+ description: Debug logging mode provides more information, but requires more
+ disk space.
+ label: OpenStack debug logging
+ type: checkbox
+ value: false
+ weight: 20
+ libvirt_type:
+ label: Hypervisor type
+ type: radio
+ value: kvm
+ values:
+ - data: kvm
+ description: Choose this type of hypervisor if you run OpenStack on hardware
+ label: KVM
+ - data: qemu
+ description: Choose this type of hypervisor if you run OpenStack on virtual
+ hosts.
+ label: QEMU
+ weight: 30
+ metadata:
+ label: Common
+ weight: 30
+ nova_quota:
+ description: Quotas are used to limit CPU and memory usage for tenants. Enabling
+ quotas will increase load on the Nova database.
+ label: Nova quotas
+ type: checkbox
+ value: false
+ weight: 25
+ puppet_debug:
+ description: Debug puppet logging mode provides more information, but requires
+ more disk space.
+ label: Puppet debug logging
+ type: checkbox
+ value: true
+ weight: 20
+ resume_guests_state_on_host_boot:
+ description: Whether to resume previous guests state when the host reboots.
+ If enabled, this option causes guests assigned to the host to resume their
+ previous state. If the guest was running a restart will be attempted when
+ nova-compute starts. If the guest was not running previously, a restart will
+ not be attempted.
+ label: Resume guests state on host boot
+ type: checkbox
+ value: true
+ weight: 60
+ use_cow_images:
+ description: For most cases you will want qcow format. If it's disabled, raw
+ image format will be used to run VMs. OpenStack with raw format currently
+ does not support snapshotting.
+ label: Use qcow format for images
+ type: checkbox
+ value: true
+ weight: 50
+ use_vcenter:
+ type: hidden
+ value: false
+ weight: 30
+ corosync:
+ group:
+ description: ''
+ label: Group
+ type: text
+ value: 226.94.1.1
+ weight: 10
+ metadata:
+ label: Corosync
+ restrictions:
+ - action: hide
+ condition: 'true'
+ weight: 50
+ port:
+ description: ''
+ label: Port
+ type: text
+ value: '12000'
+ weight: 20
+ verified:
+ description: Set True only if multicast is configured correctly on router.
+ label: Need to pass network verification.
+ type: checkbox
+ value: false
+ weight: 10
+ external_dns:
+ dns_list:
+ description: List of upstream DNS servers, separated by comma
+ label: DNS list
+ regex:
+ error: Invalid IP address list
+ source: ^\*$|^(?:\d|1?\d\d|2[0-4]\d|25[0-5])(?:\.(?:\d|1?\d\d|2[0-4]\d|25[0-5])){3}(?:\s*,\s*(?:\d|1?\d\d|2[0-4]\d|25[0-5])(?:\.(?:\d|1?\d\d|2[0-4]\d|25[0-5])){3})*$
+ type: text
+ value: 8.8.4.4, 8.8.8.8
+ weight: 10
+ metadata:
+ label: Host OS DNS Servers
+ weight: 90
+ external_mongo:
+ hosts_ip:
+ description: IP Addresses of MongoDB. Use comma to split IPs
+ label: MongoDB hosts IP
+ regex:
+ error: Invalid hosts ip sequence
+ source: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?),)*((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$
+ type: text
+ value: ''
+ weight: 30
+ metadata:
+ label: External MongoDB
+ restrictions:
+ - action: hide
+ condition: settings:additional_components.mongo.value == false
+ weight: 20
+ mongo_db_name:
+ description: Mongo database name
+ label: Database name
+ regex:
+ error: Invalid database name
+ source: ^\w+$
+ type: text
+ value: ceilometer
+ weight: 30
+ mongo_password:
+ description: Mongo database password
+ label: Password
+ regex:
+ error: Password contains spaces
+ source: ^\S*$
+ type: password
+ value: ceilometer
+ weight: 30
+ mongo_replset:
+ description: Name for Mongo replication set
+ label: Replset
+ type: text
+ value: ''
+ weight: 30
+ mongo_user:
+ description: Mongo database username
+ label: Username
+ regex:
+ error: Empty username
+ source: ^\w+$
+ type: text
+ value: ceilometer
+ weight: 30
+ external_ntp:
+ metadata:
+ label: Host OS NTP Servers
+ weight: 100
+ ntp_list:
+ description: List of upstream NTP servers, separated by comma
+ label: NTP server list
+ regex:
+ error: Invalid NTP server list
+ source: ^\s*(?:(?:\w+(?:-+\w+)*\.)+[a-z]+|\d{1,3}(?:\.\d{1,3}){3})\s*(?:,\s*(?:(?:\w+(?:-+\w+)*\.)+[a-z]+|\d{1,3}(\.\d{1,3}){3})\s*)*$
+ type: text
+ value: 0.pool.ntp.org, 1.pool.ntp.org
+ weight: 10
+ kernel_params:
+ kernel:
+ description: Default kernel parameters
+ label: Initial parameters
+ type: text
+ value: console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90
+ nomodeset
+ weight: 45
+ metadata:
+ label: Kernel parameters
+ weight: 40
+ murano_settings:
+ metadata:
+ label: Murano Settings
+ restrictions:
+ - action: hide
+ condition: settings:additional_components.murano.value == false
+ weight: 20
+ murano_repo_url:
+ description: ''
+ label: Murano Repository URL
+ type: text
+ value: http://storage.apps.openstack.org/
+ weight: 10
+ neutron_mellanox:
+ metadata:
+ enabled: true
+ label: Mellanox Neutron components
+ restrictions:
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
+ toggleable: false
+ weight: 50
+ plugin:
+ label: Mellanox drivers and SR-IOV plugin
+ type: radio
+ value: disabled
+ values:
+ - data: disabled
+ description: If selected, Mellanox drivers, Neutron and Cinder plugin will
+ not be installed.
+ label: Mellanox drivers and plugins disabled
+ restrictions:
+ - settings:storage.iser.value == true
+ - data: drivers_only
+ description: If selected, Mellanox Ethernet drivers will be installed to support
+ networking over Mellanox NIC. Mellanox Neutron plugin will not be installed.
+ label: Install only Mellanox drivers
+ restrictions:
+ - settings:common.libvirt_type.value != 'kvm'
+ - data: ethernet
+ description: If selected, both Mellanox Ethernet drivers and Mellanox network
+ acceleration (Neutron) plugin will be installed.
+ label: Install Mellanox drivers and SR-IOV plugin
+ restrictions:
+ - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
+ == 'neutron' and networking_parameters:segmentation_type == 'vlan')
+ weight: 60
+ vf_num:
+ description: Note that one virtual function will be reserved to the storage
+ network, in case of choosing iSER.
+ label: Number of virtual NICs
+ restrictions:
+ - settings:neutron_mellanox.plugin.value != 'ethernet'
+ type: text
+ value: '16'
+ weight: 70
+ opendaylight:
+ metadata:
+ enabled: true
+ label: OpenDaylight plugin
+ plugin_id: 1
+ restrictions:
+ - cluster:net_provider != 'neutron': Only neutron is supported by OpenDaylight
+ toggleable: true
+ weight: 70
+ rest_api_port:
+ description: Port on which ODL REST API will be available.
+ label: Port number
+ regex:
+ error: Invalid port number
+ source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: text
+ value: '8282'
+ weight: 40
+ use_vxlan:
+ description: Configure neutron to use VXLAN tunneling
+ label: Use vxlan
+ restrictions:
+ - action: disable
+ condition: networking_parameters:segmentation_type == 'vlan'
+ message: Neutron with GRE segmentation required
+ type: checkbox
+ value: true
+ weight: 20
+ vni_range_end:
+ description: VXLAN VNI IDs range end
+ label: VNI range end
+ regex:
+ error: Invalid ID number
+ source: ^\d+$
+ restrictions:
+ - action: hide
+ condition: networking_parameters:segmentation_type == 'vlan'
+ type: text
+ value: '10000'
+ weight: 31
+ vni_range_start:
+ description: VXLAN VNI IDs range start
+ label: VNI range start
+ regex:
+ error: Invalid ID number
+ source: ^\d+$
+ restrictions:
+ - action: hide
+ condition: networking_parameters:segmentation_type == 'vlan'
+ type: text
+ value: '10'
+ weight: 30
+ provision:
+ metadata:
+ label: Provision
+ weight: 80
+ method:
+ description: Which provision method to use for this cluster.
+ label: Provision method
+ type: radio
+ value: image
+ values:
+ - data: image
+ description: Copying pre-built images on a disk.
+ label: Image
+ - data: cobbler
+ description: Install from scratch using anaconda or debian-installer.
+ label: (DEPRECATED) Classic (use anaconda or debian-installer)
+ public_network_assignment:
+ assign_to_all_nodes:
+ description: When disabled, public network will be assigned to controllers only
+ label: Assign public network to all nodes
+ type: checkbox
+ value: false
+ weight: 10
+ metadata:
+ label: Public network assignment
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'neutron'
+ weight: 50
+ repo_setup:
+ metadata:
+ always_editable: true
+ label: Repositories
+ weight: 50
+ repos:
+ description: 'Please note: the first repository will be considered the operating
+ system mirror that will be used during node provisioning.
+
+ To create a local repository mirror on the Fuel master node, please follow
+ the instructions provided by running "fuel-createmirror --help" on the Fuel
+ master node.
+
+ Please make sure your Fuel master node has Internet access to the repository
+ before attempting to create a mirror.
+
+ For more details, please refer to the documentation (https://docs.mirantis.com/openstack/fuel/fuel-6.1/operations.html#external-ubuntu-ops).
+
+ '
+ extra_priority: null
+ type: custom_repo_configuration
+ value:
+ - name: ubuntu
+ priority: null
+ section: main
+ suite: trusty
+ type: deb
+ uri: http://10.20.0.2:8080/ubuntu-part
+ - name: ubuntu-updates
+ priority: null
+ section: main
+ suite: trusty
+ type: deb
+ uri: http://10.20.0.2:8080/ubuntu-part
+ - name: ubuntu-security
+ priority: null
+ section: main
+ suite: trusty
+ type: deb
+ uri: http://10.20.0.2:8080/ubuntu-part
+ - name: mos
+ priority: 1050
+ section: main restricted
+ suite: mos6.1
+ type: deb
+ uri: http://10.20.0.2:8080/2014.2.2-6.1/ubuntu/x86_64
+ - name: mos-updates
+ priority: 1050
+ section: main restricted
+ suite: mos6.1-updates
+ type: deb
+ uri: http://10.20.0.2:8080/mos-ubuntu
+ - name: mos-security
+ priority: 1050
+ section: main restricted
+ suite: mos6.1-security
+ type: deb
+ uri: hhttp://10.20.0.2:8080/mos-ubuntu
+ - name: mos-holdback
+ priority: 1100
+ section: main restricted
+ suite: mos6.1-holdback
+ type: deb
+ uri: http://10.20.0.2:8080/mos-ubuntu
+ - name: Auxiliary
+ priority: 1150
+ section: main restricted
+ suite: auxiliary
+ type: deb
+ uri: http://10.20.0.2:8080/2014.2.2-6.1/ubuntu/auxiliary
+ storage:
+ ephemeral_ceph:
+ description: Configures Nova to store ephemeral volumes in RBD. This works best
+ if Ceph is enabled for volumes and images, too. Enables live migration of
+ all types of Ceph backed VMs (without this option, live migration will only
+ work with VMs launched from Cinder volumes).
+ label: Ceph RBD for ephemeral volumes (Nova)
+ type: checkbox
+ value: true
+ weight: 75
+ images_ceph:
+ description: Configures Glance to use the Ceph RBD backend to store images.
+ If enabled, this option will prevent Swift from installing.
+ label: Ceph RBD for images (Glance)
+ restrictions:
+ - settings:storage.images_vcenter.value == true: Only one Glance backend could
+ be selected.
+ type: checkbox
+ value: true
+ weight: 30
+ images_vcenter:
+ description: Configures Glance to use the vCenter/ESXi backend to store images.
+ If enabled, this option will prevent Swift from installing.
+ label: VMWare vCenter/ESXi datastore for images (Glance)
+ restrictions:
+ - action: hide
+ condition: settings:common.use_vcenter.value != true
+ - condition: settings:storage.images_ceph.value == true
+ message: Only one Glance backend could be selected.
+ type: checkbox
+ value: false
+ weight: 35
+ iser:
+ description: 'High performance block storage: Cinder volumes over iSER protocol
+ (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC, and
+ will use a dedicated virtual function for the storage network.'
+ label: iSER protocol for volumes (Cinder)
+ restrictions:
+ - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
+ != 'kvm'
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
+ type: checkbox
+ value: false
+ weight: 11
+ metadata:
+ label: Storage
+ weight: 60
+ objects_ceph:
+ description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
+ Swift API Interfaces. If enabled, this option will prevent Swift from installing.
+ label: Ceph RadosGW for objects (Swift API)
+ restrictions:
+ - settings:storage.images_ceph.value == false
+ type: checkbox
+ value: false
+ weight: 80
+ osd_pool_size:
+ description: Configures the default number of object replicas in Ceph. This
+ number must be equal to or lower than the number of deployed 'Storage - Ceph
+ OSD' nodes.
+ label: Ceph object replication factor
+ regex:
+ error: Invalid number
+ source: ^[1-9]\d*$
+ type: text
+ value: '2'
+ weight: 85
+ volumes_ceph:
+ description: Configures Cinder to store volumes in Ceph RBD images.
+ label: Ceph RBD for volumes (Cinder)
+ restrictions:
+ - settings:storage.volumes_lvm.value == true
+ type: checkbox
+ value: true
+ weight: 20
+ volumes_lvm:
+ description: It is recommended to have at least one Storage - Cinder LVM node.
+ label: Cinder LVM over iSCSI for volumes
+ restrictions:
+ - settings:storage.volumes_ceph.value == true
+ type: checkbox
+ value: false
+ weight: 10
+ syslog:
+ metadata:
+ label: Syslog
+ weight: 50
+ syslog_port:
+ description: Remote syslog port
+ label: Port
+ regex:
+ error: Invalid Syslog port
+ source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: text
+ value: '514'
+ weight: 20
+ syslog_server:
+ description: Remote syslog hostname
+ label: Hostname
+ type: text
+ value: ''
+ weight: 10
+ syslog_transport:
+ label: Syslog transport protocol
+ type: radio
+ value: tcp
+ values:
+ - data: udp
+ description: ''
+ label: UDP
+ - data: tcp
+ description: ''
+ label: TCP
+ weight: 30
+ workloads_collector:
+ enabled:
+ type: hidden
+ value: true
+ metadata:
+ label: Workloads Collector User
+ restrictions:
+ - action: hide
+ condition: 'true'
+ weight: 10
+ password:
+ type: password
+ value: pBkLbu1k
+ tenant:
+ type: text
+ value: services
+ user:
+ type: text
+ value: fuel_stats_user
diff --git a/fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod1/dha.yaml b/fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod1/dha.yaml
new file mode 100644
index 000000000..724d6d833
--- /dev/null
+++ b/fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod1/dha.yaml
@@ -0,0 +1,49 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version:
+created:
+comment: Config for LF POD1
+
+# Adapter to use for this definition
+adapter: ipmi
+
+# Node list.
+# Mandatory property is id, all other properties are adapter specific.
+
+nodes:
+- id: 1
+ pxeMac: 00:25:b5:b0:00:ef
+ ipmiIp: 172.30.8.69
+ ipmiUser: admin
+ ipmiPass: octopus
+- id: 2
+ pxeMac: 00:25:b5:b0:00:cf
+ ipmiIp: 172.30.8.78
+ ipmiUser: admin
+ ipmiPass: octopus
+- id: 3
+ pxeMac: 00:25:b5:b0:00:8f
+ ipmiIp: 172.30.8.68
+ ipmiUser: admin
+ ipmiPass: octopus
+- id: 4
+ pxeMac: 00:25:b5:b0:00:6f
+ ipmiIp: 172.30.8.77
+ ipmiUser: admin
+ ipmiPass: octopus
+- id: 5
+ pxeMac: 00:25:b5:b0:00:4f
+ ipmiIp: 172.30.8.67
+ ipmiUser: admin
+ ipmiPass: octopus
+# Adding the Fuel node as node id 6 which may not be correct - please
+# adjust as needed.
+- id: 6
+ libvirtName: fuel-opnfv
+ libvirtTemplate: templates/hardware_environment/vms/fuel.xml
+ isFuel: yes
+ username: root
+ password: r00tme
+
+disks:
+ fuel: 50G \ No newline at end of file
diff --git a/fuel/deploy/libvirt/dea.yaml b/fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod2/dea.yaml
index 802293f62..81cbcbf5f 100644
--- a/fuel/deploy/libvirt/dea.yaml
+++ b/fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod2/dea.yaml
@@ -1,40 +1,38 @@
title: Deployment Environment Adapter (DEA)
# DEA API version supported
-version: 1.1
-created: Sat Apr 25 16:26:22 UTC 2015
-comment: Small libvirt setup
-environment_name: opnfv59-b
-environment_mode: multinode
-wanted_release: Juno on Ubuntu 12.04.4
+version:
+created:
+comment: Config for LF POD2 - HA deployment with Ceph and Opendaylight
+environment:
+ name: opnfv
+ mode: ha
+ net_segment_type: gre
+wanted_release: Juno on Ubuntu 14.04.1
nodes:
- id: 1
- interfaces: interface1
- transformations: controller1
- role: controller
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
- id: 2
- interfaces: interface1
- transformations: controller1
- role: controller
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
- id: 3
- interfaces: interface1
- transformations: controller1
- role: controller
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
- id: 4
- interfaces: interface1
- transformations: compute1
- role: compute
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
- id: 5
- interfaces: interface1
- transformations: compute1
- role: compute
-- id: 6
- interfaces: interface1
- transformations: compute1
- role: compute
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
fuel:
ADMIN_NETWORK:
ipaddress: 10.20.0.2
- netmask: 255.255.255.0
+ netmask: 255.255.0.0
dhcp_pool_start: 10.20.0.3
dhcp_pool_end: 10.20.0.254
DNS_UPSTREAM: 8.8.8.8
@@ -43,178 +41,104 @@ fuel:
FUEL_ACCESS:
user: admin
password: admin
- HOSTNAME: opnfv59
+ HOSTNAME: opnfv
NTP1: 0.pool.ntp.org
NTP2: 1.pool.ntp.org
NTP3: 2.pool.ntp.org
-interfaces:
- interface1:
- eth0:
- - fuelweb_admin
- - management
- eth1:
- - storage
- eth2:
- - private
- eth3:
- - public
-transformations:
- controller1:
- - action: add-br
- name: br-eth0
- - action: add-port
- bridge: br-eth0
- name: eth0
- - action: add-br
- name: br-eth1
- - action: add-port
- bridge: br-eth1
- name: eth1
- - action: add-br
- name: br-eth2
- - action: add-port
- bridge: br-eth2
- name: eth2
- - action: add-br
- name: br-eth3
- - action: add-port
- bridge: br-eth3
- name: eth3
- - action: add-br
- name: br-ex
- - action: add-br
- name: br-mgmt
- - action: add-br
- name: br-storage
- - action: add-br
- name: br-fw-admin
- - action: add-patch
- bridges:
- - br-eth1
- - br-storage
- tags:
- - 102
- - 0
- vlan_ids:
- - 102
- - 0
- - action: add-patch
- bridges:
- - br-eth0
- - br-mgmt
- tags:
- - 101
- - 0
- vlan_ids:
- - 101
- - 0
- - action: add-patch
- bridges:
- - br-eth0
- - br-fw-admin
- trunks:
- - 0
- - action: add-patch
- bridges:
- - br-eth3
- - br-ex
- trunks:
- - 0
- - action: add-br
- name: br-prv
- - action: add-patch
- bridges:
- - br-eth2
- - br-prv
- compute1:
- - action: add-br
- name: br-eth0
- - action: add-port
- bridge: br-eth0
- name: eth0
- - action: add-br
- name: br-eth1
- - action: add-port
- bridge: br-eth1
- name: eth1
- - action: add-br
- name: br-eth2
- - action: add-port
- bridge: br-eth2
- name: eth2
- - action: add-br
- name: br-eth3
- - action: add-port
- bridge: br-eth3
- name: eth3
- - action: add-br
- name: br-mgmt
- - action: add-br
- name: br-storage
- - action: add-br
- name: br-fw-admin
- - action: add-patch
- bridges:
- - br-eth1
- - br-storage
- tags:
- - 102
- - 0
- vlan_ids:
- - 102
- - 0
- - action: add-patch
- bridges:
- - br-eth0
- - br-mgmt
- tags:
- - 101
- - 0
- vlan_ids:
- - 101
- - 0
- - action: add-patch
- bridges:
- - br-eth0
- - br-fw-admin
- trunks:
- - 0
- - action: add-br
- name: br-prv
- - action: add-patch
- bridges:
- - br-eth2
- - br-prv
-opnfv:
- compute: {}
- controller: {}
+interfaces_1:
+ eth0:
+ - fuelweb_admin
+ - management
+ - storage
+ - private
+ eth2:
+ - public
+transformations_1:
+ transformations:
+ - action: add-br
+ name: br-fw-admin
+ - action: add-br
+ name: br-mgmt
+ - action: add-br
+ name: br-storage
+ - action: add-br
+ name: br-ex
+ - action: add-br
+ name: br-floating
+ provider: ovs
+ - action: add-patch
+ bridges:
+ - br-floating
+ - br-ex
+ mtu: 65000
+ provider: ovs
+ - action: add-br
+ name: br-mesh
+ - action: add-port
+ bridge: br-fw-admin
+ name: eth0
+ - action: add-port
+ bridge: br-mgmt
+ name: eth0.300
+ - action: add-port
+ bridge: br-storage
+ name: eth0.301
+ - action: add-port
+ bridge: br-mesh
+ name: eth0.302
+ - action: add-port
+ bridge: br-ex
+ name: eth2
+transformations_2:
+ transformations:
+ - action: add-br
+ name: br-fw-admin
+ - action: add-br
+ name: br-mgmt
+ - action: add-br
+ name: br-storage
+ - action: add-br
+ name: br-mesh
+ - action: add-port
+ bridge: br-fw-admin
+ name: eth0
+ - action: add-port
+ bridge: br-mgmt
+ name: eth0.300
+ - action: add-port
+ bridge: br-storage
+ name: eth0.301
+ - action: add-port
+ bridge: br-mesh
+ name: eth0.302
network:
+ management_vip: 192.168.1.2
+ management_vrouter_vip: 192.168.1.3
networking_parameters:
base_mac: fa:16:3e:00:00:00
dns_nameservers:
- 8.8.4.4
- 8.8.8.8
floating_ranges:
- - - 172.16.0.130
- - 172.16.0.254
+ - - 172.30.10.160
+ - 172.30.10.254
gre_id_range:
- 2
- 65535
internal_cidr: 192.168.111.0/24
internal_gateway: 192.168.111.1
net_l23_provider: ovs
- segmentation_type: vlan
+ segmentation_type: gre
vlan_range:
- 1000
- 1030
networks:
- - cidr: 172.16.0.0/24
- gateway: 172.16.0.1
+ - cidr: 172.30.10.0/24
+ gateway: 172.30.10.1
ip_ranges:
- - - 172.16.0.2
- - 172.16.0.126
+ - - 172.30.10.64
+ - 172.30.10.159
meta:
- assign_vip: true
cidr: 172.16.0.0/24
configurable: true
floating_range_var: floating_ranges
@@ -227,16 +151,36 @@ network:
render_addr_mask: public
render_type: null
use_gateway: true
+ vips:
+ - haproxy
+ - vrouter
vlan_start: null
name: public
vlan_start: null
- - cidr: 192.168.0.0/24
+ - cidr: 192.168.2.0/24
gateway: null
ip_ranges:
- - - 192.168.0.1
- - 192.168.0.254
+ - - 192.168.2.2
+ - 192.168.2.254
+ meta:
+ cidr: 192.168.2.0/24
+ configurable: true
+ map_priority: 2
+ name: private
+ notation: cidr
+ render_addr_mask: private
+ render_type: cidr
+ seg_type: gre
+ use_gateway: false
+ vlan_start: 103
+ name: private
+ vlan_start: 302
+ - cidr: 192.168.1.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.1.2
+ - 192.168.1.254
meta:
- assign_vip: true
cidr: 192.168.0.0/24
configurable: true
map_priority: 2
@@ -245,16 +189,18 @@ network:
render_addr_mask: internal
render_type: cidr
use_gateway: false
+ vips:
+ - haproxy
+ - vrouter
vlan_start: 101
name: management
- vlan_start: 101
- - cidr: 192.168.1.0/24
+ vlan_start: 300
+ - cidr: 192.168.0.0/24
gateway: null
ip_ranges:
- - - 192.168.1.1
- - 192.168.1.254
+ - - 192.168.0.2
+ - 192.168.0.254
meta:
- assign_vip: false
cidr: 192.168.1.0/24
configurable: true
map_priority: 2
@@ -265,31 +211,13 @@ network:
use_gateway: false
vlan_start: 102
name: storage
- vlan_start: 102
- - cidr: null
- gateway: null
- ip_ranges: []
- meta:
- assign_vip: false
- configurable: false
- map_priority: 2
- name: private
- neutron_vlan_range: true
- notation: null
- render_addr_mask: null
- render_type: null
- seg_type: vlan
- use_gateway: false
- vlan_start: null
- name: private
- vlan_start: null
- - cidr: 10.20.0.0/24
- gateway: null
+ vlan_start: 301
+ - cidr: 10.20.0.0/16
+ gateway: 10.20.0.2
ip_ranges:
- - 10.20.0.3
- 10.20.0.254
meta:
- assign_vip: false
configurable: false
map_priority: 0
notation: ip_ranges
@@ -299,12 +227,17 @@ network:
use_gateway: true
name: fuelweb_admin
vlan_start: null
+ public_vip: 172.30.10.64
+ public_vrouter_vip: 172.30.10.65
settings:
editable:
access:
email:
description: Email address for Administrator
- label: email
+ label: Email
+ regex:
+ error: Invalid email
+ source: ^\S+@\S+$
type: text
value: admin@localhost
weight: 40
@@ -313,25 +246,30 @@ settings:
weight: 10
password:
description: Password for Administrator
- label: password
+ label: Password
+ regex:
+ error: Empty password
+ source: \S
type: password
value: admin
weight: 20
tenant:
description: Tenant (project) name for Administrator
- label: tenant
+ label: Tenant
regex:
error: Invalid tenant name
- source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$)(?!.*
+ +.*$).+
type: text
value: admin
weight: 30
user:
description: Username for Administrator
- label: username
+ label: Username
regex:
error: Invalid username
- source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$)(?!.*
+ +.*$).+
type: text
value: admin
weight: 10
@@ -351,6 +289,14 @@ settings:
metadata:
label: Additional Components
weight: 20
+ mongo:
+ description: If selected, You can use external Mongo DB as ceilometer backend
+ label: Use external Mongo DB
+ restrictions:
+ - settings:additional_components.ceilometer.value == false
+ type: checkbox
+ value: false
+ weight: 40
murano:
description: If selected, Murano component will be installed
label: Install Murano
@@ -369,7 +315,7 @@ settings:
auth_key:
description: Public key(s) to include in authorized_keys on deployed nodes
label: Public Key
- type: text
+ type: textarea
value: ''
weight: 70
auto_assign_floating_ip:
@@ -377,24 +323,11 @@ settings:
to a new instance
label: Auto assign floating IP
restrictions:
- - cluster:net_provider == 'neutron'
+ - action: hide
+ condition: cluster:net_provider == 'neutron'
type: checkbox
value: false
weight: 40
- compute_scheduler_driver:
- label: Scheduler driver
- type: radio
- value: nova.scheduler.filter_scheduler.FilterScheduler
- values:
- - data: nova.scheduler.filter_scheduler.FilterScheduler
- description: Currently the most advanced OpenStack scheduler. See the OpenStack
- documentation for details.
- label: Filter scheduler
- - data: nova.scheduler.simple.SimpleScheduler
- description: This is 'naive' scheduler which tries to find the least loaded
- host
- label: Simple scheduler
- weight: 40
debug:
description: Debug logging mode provides more information, but requires more
disk space.
@@ -402,17 +335,6 @@ settings:
type: checkbox
value: false
weight: 20
- disable_offload:
- description: If set, generic segmentation offload (gso) and generic receive
- offload (gro) on physical nics will be disabled. See ethtool man.
- label: Disable generic offload on physical nics
- restrictions:
- - action: hide
- condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type
- == 'gre'
- type: checkbox
- value: true
- weight: 80
libvirt_type:
label: Hypervisor type
type: radio
@@ -421,21 +343,10 @@ settings:
- data: kvm
description: Choose this type of hypervisor if you run OpenStack on hardware
label: KVM
- restrictions:
- - settings:common.libvirt_type.value == 'vcenter'
- data: qemu
description: Choose this type of hypervisor if you run OpenStack on virtual
hosts.
label: QEMU
- restrictions:
- - settings:common.libvirt_type.value == 'vcenter'
- - data: vcenter
- description: Choose this type of hypervisor if you run OpenStack in a vCenter
- environment.
- label: vCenter
- restrictions:
- - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider
- == 'neutron'
weight: 30
metadata:
label: Common
@@ -447,12 +358,19 @@ settings:
type: checkbox
value: false
weight: 25
+ puppet_debug:
+ description: Debug puppet logging mode provides more information, but requires
+ more disk space.
+ label: Puppet debug logging
+ type: checkbox
+ value: true
+ weight: 20
resume_guests_state_on_host_boot:
description: Whether to resume previous guests state when the host reboots.
If enabled, this option causes guests assigned to the host to resume their
previous state. If the guest was running a restart will be attempted when
- nova-compute starts. If the guest was not running previously, a restart
- will not be attempted.
+ nova-compute starts. If the guest was not running previously, a restart will
+ not be attempted.
label: Resume guests state on host boot
type: checkbox
value: true
@@ -465,6 +383,10 @@ settings:
type: checkbox
value: true
weight: 50
+ use_vcenter:
+ type: hidden
+ value: false
+ weight: 30
corosync:
group:
description: ''
@@ -494,19 +416,74 @@ settings:
dns_list:
description: List of upstream DNS servers, separated by comma
label: DNS list
+ regex:
+ error: Invalid IP address list
+ source: ^\*$|^(?:\d|1?\d\d|2[0-4]\d|25[0-5])(?:\.(?:\d|1?\d\d|2[0-4]\d|25[0-5])){3}(?:\s*,\s*(?:\d|1?\d\d|2[0-4]\d|25[0-5])(?:\.(?:\d|1?\d\d|2[0-4]\d|25[0-5])){3})*$
type: text
- value: 8.8.8.8, 8.8.4.4
+ value: 8.8.4.4, 8.8.8.8
weight: 10
metadata:
- label: Upstream DNS
+ label: Host OS DNS Servers
weight: 90
+ external_mongo:
+ hosts_ip:
+ description: IP Addresses of MongoDB. Use comma to split IPs
+ label: MongoDB hosts IP
+ regex:
+ error: Invalid hosts ip sequence
+ source: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?),)*((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$
+ type: text
+ value: ''
+ weight: 30
+ metadata:
+ label: External MongoDB
+ restrictions:
+ - action: hide
+ condition: settings:additional_components.mongo.value == false
+ weight: 20
+ mongo_db_name:
+ description: Mongo database name
+ label: Database name
+ regex:
+ error: Invalid database name
+ source: ^\w+$
+ type: text
+ value: ceilometer
+ weight: 30
+ mongo_password:
+ description: Mongo database password
+ label: Password
+ regex:
+ error: Password contains spaces
+ source: ^\S*$
+ type: password
+ value: ceilometer
+ weight: 30
+ mongo_replset:
+ description: Name for Mongo replication set
+ label: Replset
+ type: text
+ value: ''
+ weight: 30
+ mongo_user:
+ description: Mongo database username
+ label: Username
+ regex:
+ error: Empty username
+ source: ^\w+$
+ type: text
+ value: ceilometer
+ weight: 30
external_ntp:
metadata:
- label: Upstream NTP
+ label: Host OS NTP Servers
weight: 100
ntp_list:
description: List of upstream NTP servers, separated by comma
- label: NTP servers list
+ label: NTP server list
+ regex:
+ error: Invalid NTP server list
+ source: ^\s*(?:(?:\w+(?:-+\w+)*\.)+[a-z]+|\d{1,3}(?:\.\d{1,3}){3})\s*(?:,\s*(?:(?:\w+(?:-+\w+)*\.)+[a-z]+|\d{1,3}(\.\d{1,3}){3})\s*)*$
type: text
value: 0.pool.ntp.org, 1.pool.ntp.org
weight: 10
@@ -515,15 +492,32 @@ settings:
description: Default kernel parameters
label: Initial parameters
type: text
- value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset
+ value: console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90
+ nomodeset
weight: 45
metadata:
label: Kernel parameters
weight: 40
+ murano_settings:
+ metadata:
+ label: Murano Settings
+ restrictions:
+ - action: hide
+ condition: settings:additional_components.murano.value == false
+ weight: 20
+ murano_repo_url:
+ description: ''
+ label: Murano Repository URL
+ type: text
+ value: http://storage.apps.openstack.org/
+ weight: 10
neutron_mellanox:
metadata:
enabled: true
label: Mellanox Neutron components
+ restrictions:
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
toggleable: false
weight: 50
plugin:
@@ -538,9 +532,8 @@ settings:
restrictions:
- settings:storage.iser.value == true
- data: drivers_only
- description: If selected, Mellanox Ethernet drivers will be installed to
- support networking over Mellanox NIC. Mellanox Neutron plugin will not
- be installed.
+ description: If selected, Mellanox Ethernet drivers will be installed to support
+ networking over Mellanox NIC. Mellanox Neutron plugin will not be installed.
label: Install only Mellanox drivers
restrictions:
- settings:common.libvirt_type.value != 'kvm'
@@ -561,117 +554,77 @@ settings:
type: text
value: '16'
weight: 70
- nsx_plugin:
- connector_type:
- description: Default network transport type to use
- label: NSX connector type
- type: select
- value: stt
- values:
- - data: gre
- label: GRE
- - data: ipsec_gre
- label: GRE over IPSec
- - data: stt
- label: STT
- - data: ipsec_stt
- label: STT over IPSec
- - data: bridge
- label: Bridge
- weight: 80
- l3_gw_service_uuid:
- description: UUID for the default L3 gateway service to use with this cluster
- label: L3 service UUID
- regex:
- error: Invalid L3 gateway service UUID
- source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
- type: text
- value: ''
- weight: 50
+ opendaylight:
metadata:
- enabled: false
- label: VMware NSX
+ enabled: true
+ label: OpenDaylight plugin
+ plugin_id: 1
restrictions:
- - action: hide
- condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider
- != 'nsx'
- weight: 20
- nsx_controllers:
- description: One or more IPv4[:port] addresses of NSX controller node, separated
- by comma (e.g. 10.30.30.2,192.168.110.254:443)
- label: NSX controller endpoint
- regex:
- error: Invalid controller endpoints, specify valid IPv4[:port] pair
- source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$
- type: text
- value: ''
- weight: 60
- nsx_password:
- description: Password for Administrator
- label: NSX password
- regex:
- error: Empty password
- source: \S
- type: password
- value: ''
- weight: 30
- nsx_username:
- description: NSX administrator's username
- label: NSX username
+ - cluster:net_provider != 'neutron': Only neutron is supported by OpenDaylight
+ toggleable: true
+ weight: 70
+ rest_api_port:
+ description: Port on which ODL REST API will be available.
+ label: Port number
regex:
- error: Empty username
- source: \S
+ error: Invalid port number
+ source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
type: text
- value: admin
+ value: '8282'
+ weight: 40
+ use_vxlan:
+ description: Configure neutron to use VXLAN tunneling
+ label: Use vxlan
+ restrictions:
+ - action: disable
+ condition: networking_parameters:segmentation_type == 'vlan'
+ message: Neutron with GRE segmentation required
+ type: checkbox
+ value: true
weight: 20
- packages_url:
- description: URL to NSX specific packages
- label: URL to NSX bits
+ vni_range_end:
+ description: VXLAN VNI IDs range end
+ label: VNI range end
regex:
- error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g.
- http://10.20.0.2/nsx)
- source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$
+ error: Invalid ID number
+ source: ^\d+$
+ restrictions:
+ - action: hide
+ condition: networking_parameters:segmentation_type == 'vlan'
type: text
- value: ''
- weight: 70
- replication_mode:
- description: ''
- label: NSX cluster has Service nodes
- type: checkbox
- value: true
- weight: 90
- transport_zone_uuid:
- description: UUID of the pre-existing default NSX Transport zone
- label: Transport zone UUID
+ value: '10000'
+ weight: 31
+ vni_range_start:
+ description: VXLAN VNI IDs range start
+ label: VNI range start
regex:
- error: Invalid transport zone UUID
- source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+ error: Invalid ID number
+ source: ^\d+$
+ restrictions:
+ - action: hide
+ condition: networking_parameters:segmentation_type == 'vlan'
type: text
- value: ''
- weight: 40
+ value: '10'
+ weight: 30
provision:
metadata:
label: Provision
- restrictions:
- - action: hide
- condition: not ('experimental' in version:feature_groups)
weight: 80
method:
description: Which provision method to use for this cluster.
label: Provision method
type: radio
- value: cobbler
+ value: image
values:
- data: image
description: Copying pre-built images on a disk.
label: Image
- data: cobbler
description: Install from scratch using anaconda or debian-installer.
- label: Classic (use anaconda or debian-installer)
+ label: (DEPRECATED) Classic (use anaconda or debian-installer)
public_network_assignment:
assign_to_all_nodes:
- description: When disabled, public network will be assigned to controllers
- and zabbix-server only
+ description: When disabled, public network will be assigned to controllers only
label: Assign public network to all nodes
type: checkbox
value: false
@@ -682,42 +635,118 @@ settings:
- action: hide
condition: cluster:net_provider != 'neutron'
weight: 50
+ repo_setup:
+ metadata:
+ always_editable: true
+ label: Repositories
+ weight: 50
+ repos:
+ description: 'Please note: the first repository will be considered the operating
+ system mirror that will be used during node provisioning.
+
+ To create a local repository mirror on the Fuel master node, please follow
+ the instructions provided by running "fuel-createmirror --help" on the Fuel
+ master node.
+
+ Please make sure your Fuel master node has Internet access to the repository
+ before attempting to create a mirror.
+
+ For more details, please refer to the documentation (https://docs.mirantis.com/openstack/fuel/fuel-6.1/operations.html#external-ubuntu-ops).
+
+ '
+ extra_priority: null
+ type: custom_repo_configuration
+ value:
+ - name: ubuntu
+ priority: null
+ section: main
+ suite: trusty
+ type: deb
+ uri: http://10.20.0.2:8080/ubuntu-part
+ - name: ubuntu-updates
+ priority: null
+ section: main
+ suite: trusty
+ type: deb
+ uri: http://10.20.0.2:8080/ubuntu-part
+ - name: ubuntu-security
+ priority: null
+ section: main
+ suite: trusty
+ type: deb
+ uri: http://10.20.0.2:8080/ubuntu-part
+ - name: mos
+ priority: 1050
+ section: main restricted
+ suite: mos6.1
+ type: deb
+ uri: http://10.20.0.2:8080/2014.2.2-6.1/ubuntu/x86_64
+ - name: mos-updates
+ priority: 1050
+ section: main restricted
+ suite: mos6.1-updates
+ type: deb
+ uri: http://10.20.0.2:8080/mos-ubuntu
+ - name: mos-security
+ priority: 1050
+ section: main restricted
+ suite: mos6.1-security
+ type: deb
+ uri: http://10.20.0.2:8080/mos-ubuntu
+ - name: mos-holdback
+ priority: 1100
+ section: main restricted
+ suite: mos6.1-holdback
+ type: deb
+ uri: http://10.20.0.2:8080/mos-ubuntu
+ - name: Auxiliary
+ priority: 1150
+ section: main restricted
+ suite: auxiliary
+ type: deb
+ uri: http://10.20.0.2:8080/2014.2.2-6.1/ubuntu/auxiliary
storage:
ephemeral_ceph:
- description: Configures Nova to store ephemeral volumes in RBD. This works
- best if Ceph is enabled for volumes and images, too. Enables live migration
- of all types of Ceph backed VMs (without this option, live migration will
- only work with VMs launched from Cinder volumes).
+ description: Configures Nova to store ephemeral volumes in RBD. This works best
+ if Ceph is enabled for volumes and images, too. Enables live migration of
+ all types of Ceph backed VMs (without this option, live migration will only
+ work with VMs launched from Cinder volumes).
label: Ceph RBD for ephemeral volumes (Nova)
- restrictions:
- - settings:common.libvirt_type.value == 'vcenter'
type: checkbox
- value: false
+ value: true
weight: 75
images_ceph:
description: Configures Glance to use the Ceph RBD backend to store images.
If enabled, this option will prevent Swift from installing.
label: Ceph RBD for images (Glance)
+ restrictions:
+ - settings:storage.images_vcenter.value == true: Only one Glance backend could
+ be selected.
type: checkbox
- value: false
+ value: true
weight: 30
images_vcenter:
description: Configures Glance to use the vCenter/ESXi backend to store images.
If enabled, this option will prevent Swift from installing.
label: VMWare vCenter/ESXi datastore for images (Glance)
restrictions:
- - settings:common.libvirt_type.value != 'vcenter'
+ - action: hide
+ condition: settings:common.use_vcenter.value != true
+ - condition: settings:storage.images_ceph.value == true
+ message: Only one Glance backend could be selected.
type: checkbox
value: false
weight: 35
iser:
description: 'High performance block storage: Cinder volumes over iSER protocol
- (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC,
- and will use a dedicated virtual function for the storage network.'
+ (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC, and
+ will use a dedicated virtual function for the storage network.'
label: iSER protocol for volumes (Cinder)
restrictions:
- settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
!= 'kvm'
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
type: checkbox
value: false
weight: 11
@@ -735,123 +764,31 @@ settings:
weight: 80
osd_pool_size:
description: Configures the default number of object replicas in Ceph. This
- number must be equal to or lower than the number of deployed 'Storage -
- Ceph OSD' nodes.
+ number must be equal to or lower than the number of deployed 'Storage - Ceph
+ OSD' nodes.
label: Ceph object replication factor
regex:
error: Invalid number
source: ^[1-9]\d*$
- restrictions:
- - settings:common.libvirt_type.value == 'vcenter'
type: text
value: '2'
weight: 85
- vc_datacenter:
- description: Inventory path to a datacenter. If you want to use ESXi host
- as datastore, it should be "ha-datacenter".
- label: Datacenter name
- regex:
- error: Empty datacenter
- source: \S
- restrictions:
- - action: hide
- condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
- != 'vcenter'
- type: text
- value: ''
- weight: 65
- vc_datastore:
- description: Datastore associated with the datacenter.
- label: Datastore name
- regex:
- error: Empty datastore
- source: \S
- restrictions:
- - action: hide
- condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
- != 'vcenter'
- type: text
- value: ''
- weight: 60
- vc_host:
- description: IP Address of vCenter/ESXi
- label: vCenter/ESXi IP
- regex:
- error: Specify valid IPv4 address
- source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
- restrictions:
- - action: hide
- condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
- != 'vcenter'
- type: text
- value: ''
- weight: 45
- vc_image_dir:
- description: The name of the directory where the glance images will be stored
- in the VMware datastore.
- label: Datastore Images directory
- regex:
- error: Empty images directory
- source: \S
- restrictions:
- - action: hide
- condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
- != 'vcenter'
- type: text
- value: /openstack_glance
- weight: 70
- vc_password:
- description: vCenter/ESXi admin password
- label: Password
- regex:
- error: Empty password
- source: \S
- restrictions:
- - action: hide
- condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
- != 'vcenter'
- type: password
- value: ''
- weight: 55
- vc_user:
- description: vCenter/ESXi admin username
- label: Username
- regex:
- error: Empty username
- source: \S
- restrictions:
- - action: hide
- condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
- != 'vcenter'
- type: text
- value: ''
- weight: 50
volumes_ceph:
description: Configures Cinder to store volumes in Ceph RBD images.
label: Ceph RBD for volumes (Cinder)
restrictions:
- - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value
- == 'vcenter'
+ - settings:storage.volumes_lvm.value == true
type: checkbox
- value: false
+ value: true
weight: 20
volumes_lvm:
- description: Requires at least one Storage - Cinder LVM node.
+ description: It is recommended to have at least one Storage - Cinder LVM node.
label: Cinder LVM over iSCSI for volumes
restrictions:
- settings:storage.volumes_ceph.value == true
type: checkbox
- value: true
- weight: 10
- volumes_vmdk:
- description: Configures Cinder to store volumes via VMware vCenter.
- label: VMware vCenter for volumes (Cinder)
- restrictions:
- - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value
- == true
- type: checkbox
value: false
- weight: 15
+ weight: 10
syslog:
metadata:
label: Syslog
@@ -883,94 +820,22 @@ settings:
description: ''
label: TCP
weight: 30
- vcenter:
- cluster:
- description: vCenter cluster name. If you have multiple clusters, use comma
- to separate names
- label: Cluster
- regex:
- error: Invalid cluster list
- source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$
- type: text
- value: ''
- weight: 40
- datastore_regex:
- description: The Datastore regexp setting specifies the data stores to use
- with Compute. For example, "nas.*". If you want to use all available datastores,
- leave this field blank
- label: Datastore regexp
- regex:
- error: Invalid datastore regexp
- source: ^(\S.*\S|\S|)$
- type: text
- value: ''
- weight: 50
- host_ip:
- description: IP Address of vCenter
- label: vCenter IP
- regex:
- error: Specify valid IPv4 address
- source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
- type: text
- value: ''
- weight: 10
- metadata:
- label: vCenter
- restrictions:
- - action: hide
- condition: settings:common.libvirt_type.value != 'vcenter'
- weight: 20
- use_vcenter:
- description: ''
- label: ''
+ workloads_collector:
+ enabled:
type: hidden
value: true
- weight: 5
- vc_password:
- description: vCenter admin password
- label: Password
- regex:
- error: Empty password
- source: \S
- type: password
- value: admin
- weight: 30
- vc_user:
- description: vCenter admin username
- label: Username
- regex:
- error: Empty username
- source: \S
- type: text
- value: admin
- weight: 20
- vlan_interface:
- description: Physical ESXi host ethernet adapter for VLAN networking (e.g.
- vmnic1). If empty "vmnic0" is used by default
- label: ESXi VLAN interface
- restrictions:
- - action: hide
- condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager
- != 'VlanManager'
- type: text
- value: ''
- weight: 60
- zabbix:
metadata:
- label: Zabbix Access
+ label: Workloads Collector User
restrictions:
- action: hide
- condition: not ('experimental' in version:feature_groups)
- weight: 70
+ condition: 'true'
+ weight: 10
password:
- description: Password for Zabbix Administrator
- label: password
type: password
- value: zabbix
- weight: 20
- username:
- description: Username for Zabbix Administrator
- label: username
+ value: pBkLbu1k
+ tenant:
type: text
- value: admin
- weight: 10
+ value: services
+ user:
+ type: text
+ value: fuel_stats_user
diff --git a/fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod2/dha.yaml b/fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod2/dha.yaml
new file mode 100644
index 000000000..cfc97094c
--- /dev/null
+++ b/fuel/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod2/dha.yaml
@@ -0,0 +1,49 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version:
+created:
+comment: Config for LF POD2 and Opendaylight
+
+# Adapter to use for this definition
+adapter: ipmi
+
+# Node list.
+# Mandatory property is id, all other properties are adapter specific.
+
+nodes:
+- id: 1
+ pxeMac: 00:25:b5:a0:00:2a
+ ipmiIp: 172.30.8.75
+ ipmiUser: admin
+ ipmiPass: octopus
+- id: 2
+ pxeMac: 00:25:b5:a0:00:3a
+ ipmiIp: 172.30.8.65
+ ipmiUser: admin
+ ipmiPass: octopus
+- id: 3
+ pxeMac: 00:25:b5:a0:00:4a
+ ipmiIp: 172.30.8.74
+ ipmiUser: admin
+ ipmiPass: octopus
+- id: 4
+ pxeMac: 00:25:b5:a0:00:5a
+ ipmiIp: 172.30.8.73
+ ipmiUser: admin
+ ipmiPass: octopus
+- id: 5
+ pxeMac: 00:25:b5:a0:00:6a
+ ipmiIp: 172.30.8.72
+ ipmiUser: admin
+ ipmiPass: octopus
+# Adding the Fuel node as node id 6 which may not be correct - please
+# adjust as needed.
+- id: 6
+ libvirtName: fuel-opnfv
+ libvirtTemplate: templates/hardware_environment/vms/fuel.xml
+ isFuel: yes
+ username: root
+ password: r00tme
+
+disks:
+ fuel: 50G \ No newline at end of file
diff --git a/fuel/deploy/templates/hardware_environment/conf/opnfv_box/dea.yaml b/fuel/deploy/templates/hardware_environment/conf/opnfv_box/dea.yaml
new file mode 100644
index 000000000..0895e4f1d
--- /dev/null
+++ b/fuel/deploy/templates/hardware_environment/conf/opnfv_box/dea.yaml
@@ -0,0 +1,842 @@
+title: Deployment Environment Adapter (DEA)
+# DEA API version supported
+version:
+created:
+comment: Config for OPNFV BOX - HA deployment with Ceph
+environment:
+ name: opnfv_virt
+ mode: ha
+ net_segment_type: gre
+wanted_release: Juno on Ubuntu 14.04.1
+nodes:
+- id: 1
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
+- id: 2
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
+- id: 3
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
+- id: 4
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+- id: 5
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+opnfv:
+ hosts:
+ - name:
+ address:
+ fqdn:
+fuel:
+ ADMIN_NETWORK:
+ ipaddress: 10.20.0.2
+ netmask: 255.255.0.0
+ dhcp_pool_start: 10.20.0.3
+ dhcp_pool_end: 10.20.0.254
+ DNS_UPSTREAM: 8.8.8.8
+ DNS_DOMAIN: domain.tld
+ DNS_SEARCH: domain.tld
+ FUEL_ACCESS:
+ user: admin
+ password: admin
+ HOSTNAME: opnfv
+ NTP1: 0.pool.ntp.org
+ NTP2: 1.pool.ntp.org
+ NTP3: 2.pool.ntp.org
+interfaces_1:
+ eth0:
+ - public
+ eth1:
+ - fuelweb_admin
+ - management
+ - storage
+ - private
+transformations_1:
+ transformations:
+ - action: add-br
+ name: br-fw-admin
+ - action: add-br
+ name: br-mgmt
+ - action: add-br
+ name: br-storage
+ - action: add-br
+ name: br-ex
+ - action: add-br
+ name: br-floating
+ provider: ovs
+ - action: add-patch
+ bridges:
+ - br-floating
+ - br-ex
+ mtu: 65000
+ provider: ovs
+ - action: add-br
+ name: br-mesh
+ - action: add-port
+ bridge: br-fw-admin
+ name: eth0
+ - action: add-port
+ bridge: br-mgmt
+ name: eth1.300
+ - action: add-port
+ bridge: br-storage
+ name: eth1.301
+ - action: add-port
+ bridge: br-mesh
+ name: eth1.302
+ - action: add-port
+ bridge: br-ex
+ name: eth0
+transformations_2:
+ transformations:
+ - action: add-br
+ name: br-fw-admin
+ - action: add-br
+ name: br-mgmt
+ - action: add-br
+ name: br-storage
+ - action: add-br
+ name: br-mesh
+ - action: add-port
+ bridge: br-fw-admin
+ name: eth0
+ - action: add-port
+ bridge: br-mgmt
+ name: eth1.300
+ - action: add-port
+ bridge: br-storage
+ name: eth1.301
+ - action: add-port
+ bridge: br-mesh
+ name: eth1.302
+network:
+ networking_parameters:
+ base_mac: fa:16:3e:00:00:00
+ dns_nameservers:
+ - 8.8.4.4
+ - 8.8.8.8
+ floating_ranges:
+ - - 172.30.10.83
+ - 172.30.10.92
+ gre_id_range:
+ - 2
+ - 65535
+ internal_cidr: 192.168.111.0/24
+ internal_gateway: 192.168.111.1
+ net_l23_provider: ovs
+ segmentation_type: gre
+ vlan_range:
+ - 1000
+ - 1030
+ networks:
+ - cidr: 172.30.10.0/24
+ gateway: 172.30.10.1
+ ip_ranges:
+ - - 172.30.10.73
+ - 172.30.10.82
+ meta:
+ cidr: 172.30.10.0/24
+ configurable: true
+ floating_range_var: floating_ranges
+ ip_range:
+ - 172.30.10.73
+ - 172.30.10.82
+ map_priority: 1
+ name: public
+ notation: ip_ranges
+ render_addr_mask: public
+ render_type: null
+ use_gateway: true
+ vips:
+ - haproxy
+ - vrouter
+ vlan_start: null
+ name: public
+ vlan_start: null
+ - cidr: 192.168.0.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.0.1
+ - 192.168.0.254
+ meta:
+ cidr: 192.168.0.0/24
+ configurable: true
+ map_priority: 2
+ name: management
+ notation: cidr
+ render_addr_mask: internal
+ render_type: cidr
+ use_gateway: false
+ vips:
+ - haproxy
+ - vrouter
+ vlan_start: 300
+ name: management
+ vlan_start: 300
+ - cidr: 192.168.1.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.1.1
+ - 192.168.1.254
+ meta:
+ cidr: 192.168.1.0/24
+ configurable: true
+ map_priority: 2
+ name: storage
+ notation: cidr
+ render_addr_mask: storage
+ render_type: cidr
+ use_gateway: false
+ vlan_start: 301
+ name: storage
+ vlan_start: 301
+ - cidr: 192.168.2.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.2.1
+ - 192.168.2.254
+ meta:
+ assign_vip: 192.168.2.0/24
+ configurable: true
+ map_priority: 2
+ name: private
+ notation: cidr
+ render_addr_mask: private
+ render_type: cidr
+ seg_type: gre
+ use_gateway: false
+ vlan_start: 302
+ name: private
+ vlan_start: 302
+ - cidr: 10.20.0.0/24
+ gateway: 10.20.0.2
+ ip_ranges:
+ - - 10.20.0.3
+ - 10.20.255.254
+ meta:
+ configurable: false
+ map_priority: 0
+ notation: ip_ranges
+ render_addr_mask: null
+ render_type: null
+ unmovable: true
+ use_gateway: true
+ name: fuelweb_admin
+ vlan_start: null
+settings:
+ editable:
+ access:
+ email:
+ description: Email address for Administrator
+ label: Email
+ regex:
+ error: Invalid email
+ source: ^\S+@\S+$
+ type: text
+ value: admin@localhost
+ weight: 40
+ metadata:
+ label: Access
+ weight: 10
+ password:
+ description: Password for Administrator
+ label: Password
+ regex:
+ error: Empty password
+ source: \S
+ type: password
+ value: admin
+ weight: 20
+ tenant:
+ description: Tenant (project) name for Administrator
+ label: Tenant
+ regex:
+ error: Invalid tenant name
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$)(?!.*
+ +.*$).+
+ type: text
+ value: admin
+ weight: 30
+ user:
+ description: Username for Administrator
+ label: Username
+ regex:
+ error: Invalid username
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$)(?!.*
+ +.*$).+
+ type: text
+ value: admin
+ weight: 10
+ additional_components:
+ ceilometer:
+ description: If selected, Ceilometer component will be installed
+ label: Install Ceilometer
+ type: checkbox
+ value: false
+ weight: 40
+ heat:
+ description: ''
+ label: ''
+ type: hidden
+ value: true
+ weight: 30
+ metadata:
+ label: Additional Components
+ weight: 20
+ mongo:
+ description: If selected, You can use external Mongo DB as ceilometer backend
+ label: Use external Mongo DB
+ restrictions:
+ - settings:additional_components.ceilometer.value == false
+ type: checkbox
+ value: false
+ weight: 40
+ murano:
+ description: If selected, Murano component will be installed
+ label: Install Murano
+ restrictions:
+ - cluster:net_provider != 'neutron'
+ type: checkbox
+ value: false
+ weight: 20
+ sahara:
+ description: If selected, Sahara component will be installed
+ label: Install Sahara
+ type: checkbox
+ value: false
+ weight: 10
+ common:
+ auth_key:
+ description: Public key(s) to include in authorized_keys on deployed nodes
+ label: Public Key
+ type: textarea
+ value: ''
+ weight: 70
+ auto_assign_floating_ip:
+ description: If selected, OpenStack will automatically assign a floating IP
+ to a new instance
+ label: Auto assign floating IP
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider == 'neutron'
+ type: checkbox
+ value: false
+ weight: 40
+ debug:
+ description: Debug logging mode provides more information, but requires more
+ disk space.
+ label: OpenStack debug logging
+ type: checkbox
+ value: false
+ weight: 20
+ libvirt_type:
+ label: Hypervisor type
+ type: radio
+ value: kvm
+ values:
+ - data: kvm
+ description: Choose this type of hypervisor if you run OpenStack on hardware
+ label: KVM
+ - data: qemu
+ description: Choose this type of hypervisor if you run OpenStack on virtual
+ hosts.
+ label: QEMU
+ weight: 30
+ metadata:
+ label: Common
+ weight: 30
+ nova_quota:
+ description: Quotas are used to limit CPU and memory usage for tenants. Enabling
+ quotas will increase load on the Nova database.
+ label: Nova quotas
+ type: checkbox
+ value: false
+ weight: 25
+ puppet_debug:
+ description: Debug puppet logging mode provides more information, but requires
+ more disk space.
+ label: Puppet debug logging
+ type: checkbox
+ value: true
+ weight: 20
+ resume_guests_state_on_host_boot:
+ description: Whether to resume previous guests state when the host reboots.
+ If enabled, this option causes guests assigned to the host to resume their
+ previous state. If the guest was running a restart will be attempted when
+ nova-compute starts. If the guest was not running previously, a restart will
+ not be attempted.
+ label: Resume guests state on host boot
+ type: checkbox
+ value: true
+ weight: 60
+ use_cow_images:
+ description: For most cases you will want qcow format. If it's disabled, raw
+ image format will be used to run VMs. OpenStack with raw format currently
+ does not support snapshotting.
+ label: Use qcow format for images
+ type: checkbox
+ value: true
+ weight: 50
+ use_vcenter:
+ type: hidden
+ value: false
+ weight: 30
+ corosync:
+ group:
+ description: ''
+ label: Group
+ type: text
+ value: 226.94.1.1
+ weight: 10
+ metadata:
+ label: Corosync
+ restrictions:
+ - action: hide
+ condition: 'true'
+ weight: 50
+ port:
+ description: ''
+ label: Port
+ type: text
+ value: '12000'
+ weight: 20
+ verified:
+ description: Set True only if multicast is configured correctly on router.
+ label: Need to pass network verification.
+ type: checkbox
+ value: false
+ weight: 10
+ external_dns:
+ dns_list:
+ description: List of upstream DNS servers, separated by comma
+ label: DNS list
+ regex:
+ error: Invalid IP address list
+ source: ^\*$|^(?:\d|1?\d\d|2[0-4]\d|25[0-5])(?:\.(?:\d|1?\d\d|2[0-4]\d|25[0-5])){3}(?:\s*,\s*(?:\d|1?\d\d|2[0-4]\d|25[0-5])(?:\.(?:\d|1?\d\d|2[0-4]\d|25[0-5])){3})*$
+ type: text
+ value: 8.8.4.4, 8.8.8.8
+ weight: 10
+ metadata:
+ label: Host OS DNS Servers
+ weight: 90
+ external_mongo:
+ hosts_ip:
+ description: IP Addresses of MongoDB. Use comma to split IPs
+ label: MongoDB hosts IP
+ regex:
+ error: Invalid hosts ip sequence
+ source: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?),)*((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$
+ type: text
+ value: ''
+ weight: 30
+ metadata:
+ label: External MongoDB
+ restrictions:
+ - action: hide
+ condition: settings:additional_components.mongo.value == false
+ weight: 20
+ mongo_db_name:
+ description: Mongo database name
+ label: Database name
+ regex:
+ error: Invalid database name
+ source: ^\w+$
+ type: text
+ value: ceilometer
+ weight: 30
+ mongo_password:
+ description: Mongo database password
+ label: Password
+ regex:
+ error: Password contains spaces
+ source: ^\S*$
+ type: password
+ value: ceilometer
+ weight: 30
+ mongo_replset:
+ description: Name for Mongo replication set
+ label: Replset
+ type: text
+ value: ''
+ weight: 30
+ mongo_user:
+ description: Mongo database username
+ label: Username
+ regex:
+ error: Empty username
+ source: ^\w+$
+ type: text
+ value: ceilometer
+ weight: 30
+ external_ntp:
+ metadata:
+ label: Host OS NTP Servers
+ weight: 100
+ ntp_list:
+ description: List of upstream NTP servers, separated by comma
+ label: NTP server list
+ regex:
+ error: Invalid NTP server list
+ source: ^\s*(?:(?:\w+(?:-+\w+)*\.)+[a-z]+|\d{1,3}(?:\.\d{1,3}){3})\s*(?:,\s*(?:(?:\w+(?:-+\w+)*\.)+[a-z]+|\d{1,3}(\.\d{1,3}){3})\s*)*$
+ type: text
+ value: 0.pool.ntp.org, 1.pool.ntp.org
+ weight: 10
+ kernel_params:
+ kernel:
+ description: Default kernel parameters
+ label: Initial parameters
+ type: text
+ value: console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90
+ nomodeset
+ weight: 45
+ metadata:
+ label: Kernel parameters
+ weight: 40
+ murano_settings:
+ metadata:
+ label: Murano Settings
+ restrictions:
+ - action: hide
+ condition: settings:additional_components.murano.value == false
+ weight: 20
+ murano_repo_url:
+ description: ''
+ label: Murano Repository URL
+ type: text
+ value: http://storage.apps.openstack.org/
+ weight: 10
+ neutron_mellanox:
+ metadata:
+ enabled: true
+ label: Mellanox Neutron components
+ restrictions:
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
+ toggleable: false
+ weight: 50
+ plugin:
+ label: Mellanox drivers and SR-IOV plugin
+ type: radio
+ value: disabled
+ values:
+ - data: disabled
+ description: If selected, Mellanox drivers, Neutron and Cinder plugin will
+ not be installed.
+ label: Mellanox drivers and plugins disabled
+ restrictions:
+ - settings:storage.iser.value == true
+ - data: drivers_only
+ description: If selected, Mellanox Ethernet drivers will be installed to support
+ networking over Mellanox NIC. Mellanox Neutron plugin will not be installed.
+ label: Install only Mellanox drivers
+ restrictions:
+ - settings:common.libvirt_type.value != 'kvm'
+ - data: ethernet
+ description: If selected, both Mellanox Ethernet drivers and Mellanox network
+ acceleration (Neutron) plugin will be installed.
+ label: Install Mellanox drivers and SR-IOV plugin
+ restrictions:
+ - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
+ == 'neutron' and networking_parameters:segmentation_type == 'vlan')
+ weight: 60
+ vf_num:
+ description: Note that one virtual function will be reserved to the storage
+ network, in case of choosing iSER.
+ label: Number of virtual NICs
+ restrictions:
+ - settings:neutron_mellanox.plugin.value != 'ethernet'
+ type: text
+ value: '16'
+ weight: 70
+ opendaylight:
+ metadata:
+ enabled: true
+ label: OpenDaylight plugin
+ plugin_id: 1
+ restrictions:
+ - cluster:net_provider != 'neutron': Only neutron is supported by OpenDaylight
+ toggleable: true
+ weight: 70
+ rest_api_port:
+ description: Port on which ODL REST API will be available.
+ label: Port number
+ regex:
+ error: Invalid port number
+ source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: text
+ value: '8282'
+ weight: 40
+ use_vxlan:
+ description: Configure neutron to use VXLAN tunneling
+ label: Use vxlan
+ restrictions:
+ - action: disable
+ condition: networking_parameters:segmentation_type == 'vlan'
+ message: Neutron with GRE segmentation required
+ type: checkbox
+ value: true
+ weight: 20
+ vni_range_end:
+ description: VXLAN VNI IDs range end
+ label: VNI range end
+ regex:
+ error: Invalid ID number
+ source: ^\d+$
+ restrictions:
+ - action: hide
+ condition: networking_parameters:segmentation_type == 'vlan'
+ type: text
+ value: '10000'
+ weight: 31
+ vni_range_start:
+ description: VXLAN VNI IDs range start
+ label: VNI range start
+ regex:
+ error: Invalid ID number
+ source: ^\d+$
+ restrictions:
+ - action: hide
+ condition: networking_parameters:segmentation_type == 'vlan'
+ type: text
+ value: '10'
+ weight: 30
+ provision:
+ metadata:
+ label: Provision
+ weight: 80
+ method:
+ description: Which provision method to use for this cluster.
+ label: Provision method
+ type: radio
+ value: image
+ values:
+ - data: image
+ description: Copying pre-built images on a disk.
+ label: Image
+ - data: cobbler
+ description: Install from scratch using anaconda or debian-installer.
+ label: (DEPRECATED) Classic (use anaconda or debian-installer)
+ public_network_assignment:
+ assign_to_all_nodes:
+ description: When disabled, public network will be assigned to controllers only
+ label: Assign public network to all nodes
+ type: checkbox
+ value: false
+ weight: 10
+ metadata:
+ label: Public network assignment
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'neutron'
+ weight: 50
+ repo_setup:
+ metadata:
+ always_editable: true
+ label: Repositories
+ weight: 50
+ repos:
+ description: 'Please note: the first repository will be considered the operating
+ system mirror that will be used during node provisioning.
+
+ To create a local repository mirror on the Fuel master node, please follow
+ the instructions provided by running "fuel-createmirror --help" on the Fuel
+ master node.
+
+ Please make sure your Fuel master node has Internet access to the repository
+ before attempting to create a mirror.
+
+ For more details, please refer to the documentation (https://docs.mirantis.com/openstack/fuel/fuel-6.1/operations.html#external-ubuntu-ops).
+
+ '
+ extra_priority: null
+ type: custom_repo_configuration
+ value:
+ - name: ubuntu
+ priority: null
+ section: main
+ suite: trusty
+ type: deb
+ uri: http://10.20.0.2:8080/ubuntu-part
+ - name: ubuntu-updates
+ priority: null
+ section: main
+ suite: trusty
+ type: deb
+ uri: http://10.20.0.2:8080/ubuntu-part
+ - name: ubuntu-security
+ priority: null
+ section: main
+ suite: trusty
+ type: deb
+ uri: http://10.20.0.2:8080/ubuntu-part
+ - name: mos
+ priority: 1050
+ section: main restricted
+ suite: mos6.1
+ type: deb
+ uri: http://10.20.0.2:8080/2014.2.2-6.1/ubuntu/x86_64
+ - name: mos-updates
+ priority: 1050
+ section: main restricted
+ suite: mos6.1-updates
+ type: deb
+ uri: http://10.20.0.2:8080/mos-ubuntu
+ - name: mos-security
+ priority: 1050
+ section: main restricted
+ suite: mos6.1-security
+ type: deb
+ uri: http://10.20.0.2:8080/mos-ubuntu
+ - name: mos-holdback
+ priority: 1100
+ section: main restricted
+ suite: mos6.1-holdback
+ type: deb
+ uri: http://10.20.0.2:8080/mos-ubuntu
+ - name: Auxiliary
+ priority: 1150
+ section: main restricted
+ suite: auxiliary
+ type: deb
+ uri: http://10.20.0.2:8080/2014.2.2-6.1/ubuntu/auxiliary
+ storage:
+ ephemeral_ceph:
+ description: Configures Nova to store ephemeral volumes in RBD. This works best
+ if Ceph is enabled for volumes and images, too. Enables live migration of
+ all types of Ceph backed VMs (without this option, live migration will only
+ work with VMs launched from Cinder volumes).
+ label: Ceph RBD for ephemeral volumes (Nova)
+ type: checkbox
+ value: true
+ weight: 75
+ images_ceph:
+ description: Configures Glance to use the Ceph RBD backend to store images.
+ If enabled, this option will prevent Swift from installing.
+ label: Ceph RBD for images (Glance)
+ restrictions:
+ - settings:storage.images_vcenter.value == true: Only one Glance backend could
+ be selected.
+ type: checkbox
+ value: true
+ weight: 30
+ images_vcenter:
+ description: Configures Glance to use the vCenter/ESXi backend to store images.
+ If enabled, this option will prevent Swift from installing.
+ label: VMWare vCenter/ESXi datastore for images (Glance)
+ restrictions:
+ - action: hide
+ condition: settings:common.use_vcenter.value != true
+ - condition: settings:storage.images_ceph.value == true
+ message: Only one Glance backend could be selected.
+ type: checkbox
+ value: false
+ weight: 35
+ iser:
+ description: 'High performance block storage: Cinder volumes over iSER protocol
+ (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC, and
+ will use a dedicated virtual function for the storage network.'
+ label: iSER protocol for volumes (Cinder)
+ restrictions:
+ - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
+ != 'kvm'
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
+ type: checkbox
+ value: false
+ weight: 11
+ metadata:
+ label: Storage
+ weight: 60
+ objects_ceph:
+ description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
+ Swift API Interfaces. If enabled, this option will prevent Swift from installing.
+ label: Ceph RadosGW for objects (Swift API)
+ restrictions:
+ - settings:storage.images_ceph.value == false
+ type: checkbox
+ value: false
+ weight: 80
+ osd_pool_size:
+ description: Configures the default number of object replicas in Ceph. This
+ number must be equal to or lower than the number of deployed 'Storage - Ceph
+ OSD' nodes.
+ label: Ceph object replication factor
+ regex:
+ error: Invalid number
+ source: ^[1-9]\d*$
+ type: text
+ value: '2'
+ weight: 85
+ volumes_ceph:
+ description: Configures Cinder to store volumes in Ceph RBD images.
+ label: Ceph RBD for volumes (Cinder)
+ restrictions:
+ - settings:storage.volumes_lvm.value == true
+ type: checkbox
+ value: true
+ weight: 20
+ volumes_lvm:
+ description: It is recommended to have at least one Storage - Cinder LVM node.
+ label: Cinder LVM over iSCSI for volumes
+ restrictions:
+ - settings:storage.volumes_ceph.value == true
+ type: checkbox
+ value: false
+ weight: 10
+ syslog:
+ metadata:
+ label: Syslog
+ weight: 50
+ syslog_port:
+ description: Remote syslog port
+ label: Port
+ regex:
+ error: Invalid Syslog port
+ source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: text
+ value: '514'
+ weight: 20
+ syslog_server:
+ description: Remote syslog hostname
+ label: Hostname
+ type: text
+ value: ''
+ weight: 10
+ syslog_transport:
+ label: Syslog transport protocol
+ type: radio
+ value: tcp
+ values:
+ - data: udp
+ description: ''
+ label: UDP
+ - data: tcp
+ description: ''
+ label: TCP
+ weight: 30
+ workloads_collector:
+ enabled:
+ type: hidden
+ value: true
+ metadata:
+ label: Workloads Collector User
+ restrictions:
+ - action: hide
+ condition: 'true'
+ weight: 10
+ password:
+ type: password
+ value: pBkLbu1k
+ tenant:
+ type: text
+ value: services
+ user:
+ type: text
+ value: fuel_stats_user
diff --git a/fuel/deploy/templates/hardware_environment/conf/opnfv_box/dha.yaml b/fuel/deploy/templates/hardware_environment/conf/opnfv_box/dha.yaml
new file mode 100644
index 000000000..c2624f2ba
--- /dev/null
+++ b/fuel/deploy/templates/hardware_environment/conf/opnfv_box/dha.yaml
@@ -0,0 +1,49 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version:
+created:
+comment: Config for OPNFV BOX
+
+# Adapter to use for this definition
+adapter: ipmi
+
+# Node list.
+# Mandatory property is id, all other properties are adapter specific.
+
+nodes:
+- id: 1
+ pxeMac: b8:ae:ed:76:4d:a4
+ ipmiIp: <ipmi_ip>
+ ipmiUser: <username>
+ ipmiPass: <password>
+- id: 2
+ pxeMac: b8:ae:ed:76:4d:94
+ ipmiIp: <ipmi_ip>
+ ipmiUser: <username>
+ ipmiPass: <password>
+- id: 3
+ pxeMac: b8:ae:ed:76:4c:eb
+ ipmiIp: <ipmi_ip>
+ ipmiUser: <username>
+ ipmiPass: <password>
+- id: 4
+ pxeMac: b8:ae:ed:76:37:62
+ ipmiIp: <ipmi_ip>
+ ipmiUser: <username>
+ ipmiPass: <password>
+- id: 5
+ pxeMac: b8:ae:ed:76:4d:95
+ ipmiIp: <ipmi_ip>
+ ipmiUser: <username>
+ ipmiPass: <password>
+# Adding the Fuel node as node id 6 which may not be correct - please
+# adjust as needed.
+- id: 6
+ libvirtName: fuel-opnfv
+ libvirtTemplate: templates/hardware_environment/vms/fuel.xml
+ isFuel: yes
+ username: root
+ password: r00tme
+
+disks:
+ fuel: 50G \ No newline at end of file
diff --git a/fuel/deploy/baremetal/vm/vFuel b/fuel/deploy/templates/hardware_environment/vms/fuel.xml
index 1b4f4eb47..e3e3f80bb 100644
--- a/fuel/deploy/baremetal/vm/vFuel
+++ b/fuel/deploy/templates/hardware_environment/vms/fuel.xml
@@ -1,15 +1,15 @@
-<domain type='kvm'>
- <name>vFuel</name>
+<domain type='kvm' id='62'>
+ <name>fuel</name>
<memory unit='KiB'>8290304</memory>
<currentMemory unit='KiB'>8290304</currentMemory>
- <vcpu placement='static'>2</vcpu>
+ <vcpu placement='static'>4</vcpu>
<resource>
<partition>/machine</partition>
</resource>
<os>
- <type arch='x86_64' machine='pc-i440fx-utopic'>hvm</type>
- <boot dev='hd'/>
+ <type arch='x86_64' machine='pc-i440fx-rhel7.0.0'>hvm</type>
<boot dev='cdrom'/>
+ <boot dev='hd'/>
<bootmenu enable='no'/>
</os>
<features>
@@ -33,15 +33,14 @@
<suspend-to-disk enabled='no'/>
</pm>
<devices>
- <emulator>/usr/bin/kvm</emulator>
+ <emulator>/usr/libexec/qemu-kvm</emulator>
<disk type='file' device='disk'>
<driver name='qemu' type='raw'/>
- <source file='/mnt/images/vFuel.raw'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block' device='cdrom'>
<driver name='qemu' type='raw'/>
- <target dev='hda' bus='ide'/>
+ <target dev='hdb' bus='ide'/>
<readonly/>
</disk>
<controller type='usb' index='0' model='ich9-ehci1'>
@@ -55,19 +54,21 @@
<controller type='usb' index='0' model='ich9-uhci3'>
<master startport='4'/>
</controller>
- <controller type='pci' index='0' model='pci-root'/>
- <controller type='virtio-serial' index='0'>
+ <controller type='pci' index='0' model='pci-root'>
</controller>
<controller type='ide' index='0'>
</controller>
+ <controller type='virtio-serial' index='0'>
+ </controller>
<interface type='bridge'>
- <source bridge='vfuelnet'/>
<model type='virtio'/>
</interface>
<serial type='pty'>
+ <source path='/dev/pts/0'/>
<target port='0'/>
</serial>
- <console type='pty'>
+ <console type='pty' tty='/dev/pts/0'>
+ <source path='/dev/pts/0'/>
<target type='serial' port='0'/>
</console>
<input type='mouse' bus='ps2'/>
@@ -83,5 +84,8 @@
<memballoon model='virtio'>
</memballoon>
</devices>
- <seclabel type='dynamic' model='apparmor' relabel='yes'/>
-</domain>
+ <seclabel type='dynamic' model='selinux' relabel='yes'>
+ <label>system_u:system_r:svirt_t:s0:c52,c932</label>
+ <imagelabel>system_u:object_r:svirt_image_t:s0:c52,c932</imagelabel>
+ </seclabel>
+</domain> \ No newline at end of file
diff --git a/fuel/deploy/templates/virtual_environment/conf/dea.yaml b/fuel/deploy/templates/virtual_environment/conf/dea.yaml
new file mode 100644
index 000000000..bc9a1f931
--- /dev/null
+++ b/fuel/deploy/templates/virtual_environment/conf/dea.yaml
@@ -0,0 +1,838 @@
+title: Deployment Environment Adapter (DEA)
+# DEA API version supported
+version:
+created:
+comment: Config for Virtual Environment - HA deployment with Ceph and Opendaylight
+environment:
+ name: opnfv_virt
+ mode: ha
+ net_segment_type: gre
+wanted_release: Juno on Ubuntu 14.04.1
+nodes:
+- id: 1
+ interfaces: interfaces_1
+ transformations: transformations_1
+ role: ceph-osd,controller
+- id: 2
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+- id: 3
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+- id: 4
+ interfaces: interfaces_1
+ transformations: transformations_2
+ role: ceph-osd,compute
+fuel:
+ ADMIN_NETWORK:
+ ipaddress: 10.20.0.2
+ netmask: 255.255.0.0
+ dhcp_pool_start: 10.20.0.3
+ dhcp_pool_end: 10.20.0.254
+ DNS_UPSTREAM: 10.118.32.193
+ DNS_DOMAIN: opnfvericsson.ca
+ DNS_SEARCH: opnfvericsson.ca
+ FUEL_ACCESS:
+ user: admin
+ password: admin
+ HOSTNAME: opnfv_virt
+ NTP1: 10.118.34.219
+ NTP2:
+ NTP3:
+interfaces_1:
+ eth0:
+ - fuelweb_admin
+ - management
+ eth1:
+ - storage
+ eth2:
+ - private
+ eth3:
+ - public
+transformations_1:
+ transformations:
+ - action: add-br
+ name: br-fw-admin
+ - action: add-br
+ name: br-mgmt
+ - action: add-br
+ name: br-storage
+ - action: add-br
+ name: br-ex
+ - action: add-br
+ name: br-floating
+ provider: ovs
+ - action: add-patch
+ bridges:
+ - br-floating
+ - br-ex
+ mtu: 65000
+ provider: ovs
+ - action: add-br
+ name: br-mesh
+ - action: add-port
+ bridge: br-fw-admin
+ name: eth0
+ - action: add-port
+ bridge: br-mgmt
+ name: eth0.101
+ - action: add-port
+ bridge: br-storage
+ name: eth1.102
+ - action: add-port
+ bridge: br-mesh
+ name: eth2.103
+ - action: add-port
+ bridge: br-ex
+ name: eth3
+transformations_2:
+ transformations:
+ - action: add-br
+ name: br-fw-admin
+ - action: add-br
+ name: br-mgmt
+ - action: add-br
+ name: br-storage
+ - action: add-br
+ name: br-mesh
+ - action: add-port
+ bridge: br-fw-admin
+ name: eth0
+ - action: add-port
+ bridge: br-mgmt
+ name: eth0.101
+ - action: add-port
+ bridge: br-storage
+ name: eth1.102
+ - action: add-port
+ bridge: br-mesh
+ name: eth2.103
+network:
+ management_vip: 192.168.0.2
+ management_vrouter_vip: 192.168.0.3
+ networking_parameters:
+ base_mac: fa:16:3e:00:00:00
+ dns_nameservers:
+ - 10.118.32.193
+ floating_ranges:
+ - - 172.16.0.130
+ - 172.16.0.254
+ gre_id_range:
+ - 2
+ - 65535
+ internal_cidr: 192.168.111.0/24
+ internal_gateway: 192.168.111.1
+ net_l23_provider: ovs
+ segmentation_type: gre
+ vlan_range:
+ - 1000
+ - 1030
+ networks:
+ - cidr: 172.16.0.0/24
+ gateway: 172.16.0.1
+ ip_ranges:
+ - - 172.16.0.2
+ - 172.16.0.126
+ meta:
+ cidr: 172.16.0.0/24
+ configurable: true
+ floating_range_var: floating_ranges
+ ip_range:
+ - 172.16.0.2
+ - 172.16.0.126
+ map_priority: 1
+ name: public
+ notation: ip_ranges
+ render_addr_mask: public
+ render_type: null
+ use_gateway: true
+ vips:
+ - haproxy
+ - vrouter
+ vlan_start: null
+ name: public
+ vlan_start: null
+ - cidr: 192.168.0.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.0.2
+ - 192.168.0.254
+ meta:
+ cidr: 192.168.0.0/24
+ configurable: true
+ map_priority: 2
+ name: management
+ notation: cidr
+ render_addr_mask: internal
+ render_type: cidr
+ use_gateway: false
+ vips:
+ - haproxy
+ - vrouter
+ vlan_start: 101
+ name: management
+ vlan_start: 101
+ - cidr: 192.168.1.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.1.1
+ - 192.168.1.254
+ meta:
+ cidr: 192.168.1.0/24
+ configurable: true
+ map_priority: 2
+ name: storage
+ notation: cidr
+ render_addr_mask: storage
+ render_type: cidr
+ use_gateway: false
+ vlan_start: 102
+ name: storage
+ vlan_start: 102
+ - cidr: 192.168.2.0/24
+ gateway: null
+ ip_ranges:
+ - - 192.168.2.2
+ - 192.168.2.254
+ meta:
+ cidr: 192.168.2.0/24
+ configurable: true
+ map_priority: 2
+ name: private
+ notation: cidr
+ render_addr_mask: private
+ render_type: cidr
+ seg_type: gre
+ use_gateway: false
+ vlan_start: 103
+ name: private
+ vlan_start: 103
+ - cidr: 10.20.0.0/16
+ gateway: 10.20.0.2
+ ip_ranges:
+ - - 10.20.0.3
+ - 10.20.0.254
+ meta:
+ configurable: false
+ map_priority: 0
+ notation: ip_ranges
+ render_addr_mask: null
+ render_type: null
+ unmovable: true
+ use_gateway: true
+ name: fuelweb_admin
+ vlan_start: null
+ public_vip: 172.16.0.2
+ public_vrouter_vip: 172.16.0.3
+settings:
+ editable:
+ access:
+ email:
+ description: Email address for Administrator
+ label: Email
+ regex:
+ error: Invalid email
+ source: ^\S+@\S+$
+ type: text
+ value: admin@localhost
+ weight: 40
+ metadata:
+ label: Access
+ weight: 10
+ password:
+ description: Password for Administrator
+ label: Password
+ regex:
+ error: Empty password
+ source: \S
+ type: password
+ value: admin
+ weight: 20
+ tenant:
+ description: Tenant (project) name for Administrator
+ label: Tenant
+ regex:
+ error: Invalid tenant name
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$)(?!.*
+ +.*$).+
+ type: text
+ value: admin
+ weight: 30
+ user:
+ description: Username for Administrator
+ label: Username
+ regex:
+ error: Invalid username
+ source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$)(?!.*
+ +.*$).+
+ type: text
+ value: admin
+ weight: 10
+ additional_components:
+ ceilometer:
+ description: If selected, Ceilometer component will be installed
+ label: Install Ceilometer
+ type: checkbox
+ value: false
+ weight: 40
+ heat:
+ description: ''
+ label: ''
+ type: hidden
+ value: true
+ weight: 30
+ metadata:
+ label: Additional Components
+ weight: 20
+ mongo:
+ description: If selected, You can use external Mongo DB as ceilometer backend
+ label: Use external Mongo DB
+ restrictions:
+ - settings:additional_components.ceilometer.value == false
+ type: checkbox
+ value: false
+ weight: 40
+ murano:
+ description: If selected, Murano component will be installed
+ label: Install Murano
+ restrictions:
+ - cluster:net_provider != 'neutron'
+ type: checkbox
+ value: false
+ weight: 20
+ sahara:
+ description: If selected, Sahara component will be installed
+ label: Install Sahara
+ type: checkbox
+ value: false
+ weight: 10
+ common:
+ auth_key:
+ description: Public key(s) to include in authorized_keys on deployed nodes
+ label: Public Key
+ type: textarea
+ value: ''
+ weight: 70
+ auto_assign_floating_ip:
+ description: If selected, OpenStack will automatically assign a floating IP
+ to a new instance
+ label: Auto assign floating IP
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider == 'neutron'
+ type: checkbox
+ value: false
+ weight: 40
+ debug:
+ description: Debug logging mode provides more information, but requires more
+ disk space.
+ label: OpenStack debug logging
+ type: checkbox
+ value: false
+ weight: 20
+ libvirt_type:
+ label: Hypervisor type
+ type: radio
+ value: qemu
+ values:
+ - data: kvm
+ description: Choose this type of hypervisor if you run OpenStack on hardware
+ label: KVM
+ - data: qemu
+ description: Choose this type of hypervisor if you run OpenStack on virtual
+ hosts.
+ label: QEMU
+ weight: 30
+ metadata:
+ label: Common
+ weight: 30
+ nova_quota:
+ description: Quotas are used to limit CPU and memory usage for tenants. Enabling
+ quotas will increase load on the Nova database.
+ label: Nova quotas
+ type: checkbox
+ value: false
+ weight: 25
+ puppet_debug:
+ description: Debug puppet logging mode provides more information, but requires
+ more disk space.
+ label: Puppet debug logging
+ type: checkbox
+ value: true
+ weight: 20
+ resume_guests_state_on_host_boot:
+ description: Whether to resume previous guests state when the host reboots.
+ If enabled, this option causes guests assigned to the host to resume their
+ previous state. If the guest was running a restart will be attempted when
+ nova-compute starts. If the guest was not running previously, a restart will
+ not be attempted.
+ label: Resume guests state on host boot
+ type: checkbox
+ value: true
+ weight: 60
+ use_cow_images:
+ description: For most cases you will want qcow format. If it's disabled, raw
+ image format will be used to run VMs. OpenStack with raw format currently
+ does not support snapshotting.
+ label: Use qcow format for images
+ type: checkbox
+ value: true
+ weight: 50
+ use_vcenter:
+ type: hidden
+ value: false
+ weight: 30
+ corosync:
+ group:
+ description: ''
+ label: Group
+ type: text
+ value: 226.94.1.1
+ weight: 10
+ metadata:
+ label: Corosync
+ restrictions:
+ - action: hide
+ condition: 'true'
+ weight: 50
+ port:
+ description: ''
+ label: Port
+ type: text
+ value: '12000'
+ weight: 20
+ verified:
+ description: Set True only if multicast is configured correctly on router.
+ label: Need to pass network verification.
+ type: checkbox
+ value: false
+ weight: 10
+ external_dns:
+ dns_list:
+ description: List of upstream DNS servers, separated by comma
+ label: DNS list
+ regex:
+ error: Invalid IP address list
+ source: ^\*$|^(?:\d|1?\d\d|2[0-4]\d|25[0-5])(?:\.(?:\d|1?\d\d|2[0-4]\d|25[0-5])){3}(?:\s*,\s*(?:\d|1?\d\d|2[0-4]\d|25[0-5])(?:\.(?:\d|1?\d\d|2[0-4]\d|25[0-5])){3})*$
+ type: text
+ value: 10.118.32.193
+ weight: 10
+ metadata:
+ label: Host OS DNS Servers
+ weight: 90
+ external_mongo:
+ hosts_ip:
+ description: IP Addresses of MongoDB. Use comma to split IPs
+ label: MongoDB hosts IP
+ regex:
+ error: Invalid hosts ip sequence
+ source: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?),)*((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$
+ type: text
+ value: ''
+ weight: 30
+ metadata:
+ label: External MongoDB
+ restrictions:
+ - action: hide
+ condition: settings:additional_components.mongo.value == false
+ weight: 20
+ mongo_db_name:
+ description: Mongo database name
+ label: Database name
+ regex:
+ error: Invalid database name
+ source: ^\w+$
+ type: text
+ value: ceilometer
+ weight: 30
+ mongo_password:
+ description: Mongo database password
+ label: Password
+ regex:
+ error: Password contains spaces
+ source: ^\S*$
+ type: password
+ value: ceilometer
+ weight: 30
+ mongo_replset:
+ description: Name for Mongo replication set
+ label: Replset
+ type: text
+ value: ''
+ weight: 30
+ mongo_user:
+ description: Mongo database username
+ label: Username
+ regex:
+ error: Empty username
+ source: ^\w+$
+ type: text
+ value: ceilometer
+ weight: 30
+ external_ntp:
+ metadata:
+ label: Host OS NTP Servers
+ weight: 100
+ ntp_list:
+ description: List of upstream NTP servers, separated by comma
+ label: NTP server list
+ regex:
+ error: Invalid NTP server list
+ source: ^\s*(?:(?:\w+(?:-+\w+)*\.)+[a-z]+|\d{1,3}(?:\.\d{1,3}){3})\s*(?:,\s*(?:(?:\w+(?:-+\w+)*\.)+[a-z]+|\d{1,3}(\.\d{1,3}){3})\s*)*$
+ type: text
+ value: 10.118.34.219
+ weight: 10
+ kernel_params:
+ kernel:
+ description: Default kernel parameters
+ label: Initial parameters
+ type: text
+ value: console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90
+ nomodeset
+ weight: 45
+ metadata:
+ label: Kernel parameters
+ weight: 40
+ murano_settings:
+ metadata:
+ label: Murano Settings
+ restrictions:
+ - action: hide
+ condition: settings:additional_components.murano.value == false
+ weight: 20
+ murano_repo_url:
+ description: ''
+ label: Murano Repository URL
+ type: text
+ value: http://storage.apps.openstack.org/
+ weight: 10
+ neutron_mellanox:
+ metadata:
+ enabled: true
+ label: Mellanox Neutron components
+ restrictions:
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
+ toggleable: false
+ weight: 50
+ plugin:
+ label: Mellanox drivers and SR-IOV plugin
+ type: radio
+ value: disabled
+ values:
+ - data: disabled
+ description: If selected, Mellanox drivers, Neutron and Cinder plugin will
+ not be installed.
+ label: Mellanox drivers and plugins disabled
+ restrictions:
+ - settings:storage.iser.value == true
+ - data: drivers_only
+ description: If selected, Mellanox Ethernet drivers will be installed to support
+ networking over Mellanox NIC. Mellanox Neutron plugin will not be installed.
+ label: Install only Mellanox drivers
+ restrictions:
+ - settings:common.libvirt_type.value != 'kvm'
+ - data: ethernet
+ description: If selected, both Mellanox Ethernet drivers and Mellanox network
+ acceleration (Neutron) plugin will be installed.
+ label: Install Mellanox drivers and SR-IOV plugin
+ restrictions:
+ - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
+ == 'neutron' and networking_parameters:segmentation_type == 'vlan')
+ weight: 60
+ vf_num:
+ description: Note that one virtual function will be reserved to the storage
+ network, in case of choosing iSER.
+ label: Number of virtual NICs
+ restrictions:
+ - settings:neutron_mellanox.plugin.value != 'ethernet'
+ type: text
+ value: '16'
+ weight: 70
+ opendaylight:
+ metadata:
+ enabled: true
+ label: OpenDaylight plugin
+ plugin_id: 1
+ restrictions:
+ - cluster:net_provider != 'neutron': Only neutron is supported by OpenDaylight
+ toggleable: true
+ weight: 70
+ rest_api_port:
+ description: Port on which ODL REST API will be available.
+ label: Port number
+ regex:
+ error: Invalid port number
+ source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: text
+ value: '8282'
+ weight: 40
+ use_vxlan:
+ description: Configure neutron to use VXLAN tunneling
+ label: Use vxlan
+ restrictions:
+ - action: disable
+ condition: networking_parameters:segmentation_type == 'vlan'
+ message: Neutron with GRE segmentation required
+ type: checkbox
+ value: true
+ weight: 20
+ vni_range_end:
+ description: VXLAN VNI IDs range end
+ label: VNI range end
+ regex:
+ error: Invalid ID number
+ source: ^\d+$
+ restrictions:
+ - action: hide
+ condition: networking_parameters:segmentation_type == 'vlan'
+ type: text
+ value: '10000'
+ weight: 31
+ vni_range_start:
+ description: VXLAN VNI IDs range start
+ label: VNI range start
+ regex:
+ error: Invalid ID number
+ source: ^\d+$
+ restrictions:
+ - action: hide
+ condition: networking_parameters:segmentation_type == 'vlan'
+ type: text
+ value: '10'
+ weight: 30
+ provision:
+ metadata:
+ label: Provision
+ weight: 80
+ method:
+ description: Which provision method to use for this cluster.
+ label: Provision method
+ type: radio
+ value: image
+ values:
+ - data: image
+ description: Copying pre-built images on a disk.
+ label: Image
+ - data: cobbler
+ description: Install from scratch using anaconda or debian-installer.
+ label: (DEPRECATED) Classic (use anaconda or debian-installer)
+ public_network_assignment:
+ assign_to_all_nodes:
+ description: When disabled, public network will be assigned to controllers only
+ label: Assign public network to all nodes
+ type: checkbox
+ value: false
+ weight: 10
+ metadata:
+ label: Public network assignment
+ restrictions:
+ - action: hide
+ condition: cluster:net_provider != 'neutron'
+ weight: 50
+ repo_setup:
+ metadata:
+ always_editable: true
+ label: Repositories
+ weight: 50
+ repos:
+ description: 'Please note: the first repository will be considered the operating
+ system mirror that will be used during node provisioning.
+
+ To create a local repository mirror on the Fuel master node, please follow
+ the instructions provided by running "fuel-createmirror --help" on the Fuel
+ master node.
+
+ Please make sure your Fuel master node has Internet access to the repository
+ before attempting to create a mirror.
+
+ For more details, please refer to the documentation (https://docs.mirantis.com/openstack/fuel/fuel-6.1/operations.html#external-ubuntu-ops).
+
+ '
+ extra_priority: null
+ type: custom_repo_configuration
+ value:
+ - name: ubuntu
+ priority: null
+ section: main
+ suite: trusty
+ type: deb
+ uri: http://10.20.0.2:8080/ubuntu-part
+ - name: ubuntu-updates
+ priority: null
+ section: main
+ suite: trusty
+ type: deb
+ uri: http://10.20.0.2:8080/ubuntu-part
+ - name: ubuntu-security
+ priority: null
+ section: main
+ suite: trusty
+ type: deb
+ uri: http://10.20.0.2:8080/ubuntu-part
+ - name: mos
+ priority: 1050
+ section: main restricted
+ suite: mos6.1
+ type: deb
+ uri: http://10.20.0.2:8080/2014.2.2-6.1/ubuntu/x86_64
+ - name: mos-updates
+ priority: 1050
+ section: main restricted
+ suite: mos6.1-updates
+ type: deb
+ uri: http://10.20.0.2:8080/mos-ubuntu
+ - name: mos-security
+ priority: 1050
+ section: main restricted
+ suite: mos6.1-security
+ type: deb
+ uri: http://10.20.0.2:8080/mos-ubuntu
+ - name: mos-holdback
+ priority: 1100
+ section: main restricted
+ suite: mos6.1-holdback
+ type: deb
+ uri: http://10.20.0.2:8080/mos-ubuntu
+ - name: Auxiliary
+ priority: 1150
+ section: main restricted
+ suite: auxiliary
+ type: deb
+ uri: http://10.20.0.2:8080/2014.2.2-6.1/ubuntu/auxiliary
+ storage:
+ ephemeral_ceph:
+ description: Configures Nova to store ephemeral volumes in RBD. This works best
+ if Ceph is enabled for volumes and images, too. Enables live migration of
+ all types of Ceph backed VMs (without this option, live migration will only
+ work with VMs launched from Cinder volumes).
+ label: Ceph RBD for ephemeral volumes (Nova)
+ type: checkbox
+ value: true
+ weight: 75
+ images_ceph:
+ description: Configures Glance to use the Ceph RBD backend to store images.
+ If enabled, this option will prevent Swift from installing.
+ label: Ceph RBD for images (Glance)
+ restrictions:
+ - settings:storage.images_vcenter.value == true: Only one Glance backend could
+ be selected.
+ type: checkbox
+ value: true
+ weight: 30
+ images_vcenter:
+ description: Configures Glance to use the vCenter/ESXi backend to store images.
+ If enabled, this option will prevent Swift from installing.
+ label: VMWare vCenter/ESXi datastore for images (Glance)
+ restrictions:
+ - action: hide
+ condition: settings:common.use_vcenter.value != true
+ - condition: settings:storage.images_ceph.value == true
+ message: Only one Glance backend could be selected.
+ type: checkbox
+ value: false
+ weight: 35
+ iser:
+ description: 'High performance block storage: Cinder volumes over iSER protocol
+ (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC, and
+ will use a dedicated virtual function for the storage network.'
+ label: iSER protocol for volumes (Cinder)
+ restrictions:
+ - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
+ != 'kvm'
+ - action: hide
+ condition: not ('experimental' in version:feature_groups)
+ type: checkbox
+ value: false
+ weight: 11
+ metadata:
+ label: Storage
+ weight: 60
+ objects_ceph:
+ description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
+ Swift API Interfaces. If enabled, this option will prevent Swift from installing.
+ label: Ceph RadosGW for objects (Swift API)
+ restrictions:
+ - settings:storage.images_ceph.value == false
+ type: checkbox
+ value: false
+ weight: 80
+ osd_pool_size:
+ description: Configures the default number of object replicas in Ceph. This
+ number must be equal to or lower than the number of deployed 'Storage - Ceph
+ OSD' nodes.
+ label: Ceph object replication factor
+ regex:
+ error: Invalid number
+ source: ^[1-9]\d*$
+ type: text
+ value: '2'
+ weight: 85
+ volumes_ceph:
+ description: Configures Cinder to store volumes in Ceph RBD images.
+ label: Ceph RBD for volumes (Cinder)
+ restrictions:
+ - settings:storage.volumes_lvm.value == true
+ type: checkbox
+ value: true
+ weight: 20
+ volumes_lvm:
+ description: It is recommended to have at least one Storage - Cinder LVM node.
+ label: Cinder LVM over iSCSI for volumes
+ restrictions:
+ - settings:storage.volumes_ceph.value == true
+ type: checkbox
+ value: false
+ weight: 10
+ syslog:
+ metadata:
+ label: Syslog
+ weight: 50
+ syslog_port:
+ description: Remote syslog port
+ label: Port
+ regex:
+ error: Invalid Syslog port
+ source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: text
+ value: '514'
+ weight: 20
+ syslog_server:
+ description: Remote syslog hostname
+ label: Hostname
+ type: text
+ value: ''
+ weight: 10
+ syslog_transport:
+ label: Syslog transport protocol
+ type: radio
+ value: tcp
+ values:
+ - data: udp
+ description: ''
+ label: UDP
+ - data: tcp
+ description: ''
+ label: TCP
+ weight: 30
+ workloads_collector:
+ enabled:
+ type: hidden
+ value: true
+ metadata:
+ label: Workloads Collector User
+ restrictions:
+ - action: hide
+ condition: 'true'
+ weight: 10
+ password:
+ type: password
+ value: pBkLbu1k
+ tenant:
+ type: text
+ value: services
+ user:
+ type: text
+ value: fuel_stats_user
diff --git a/fuel/deploy/templates/virtual_environment/conf/dha.yaml b/fuel/deploy/templates/virtual_environment/conf/dha.yaml
new file mode 100644
index 000000000..6d476b874
--- /dev/null
+++ b/fuel/deploy/templates/virtual_environment/conf/dha.yaml
@@ -0,0 +1,38 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version:
+created:
+comment: Config for Virtual Environment
+
+# Adapter to use for this definition
+adapter: libvirt
+
+# Node list.
+# Mandatory property is id, all other properties are adapter specific.
+
+nodes:
+- id: 1
+ libvirtName: controller1
+ libvirtTemplate: templates/virtual_environment/vms/controller.xml
+- id: 2
+ libvirtName: compute1
+ libvirtTemplate: templates/virtual_environment/vms/compute.xml
+- id: 3
+ libvirtName: compute2
+ libvirtTemplate: templates/virtual_environment/vms/compute.xml
+- id: 4
+ libvirtName: compute3
+ libvirtTemplate: templates/virtual_environment/vms/compute.xml
+- id: 5
+ libvirtName: fuel-master
+ libvirtTemplate: templates/virtual_environment/vms/fuel.xml
+ isFuel: yes
+ username: root
+ password: r00tme
+
+virtNetConfDir: templates/virtual_environment/networks
+
+disks:
+ fuel: 50G
+ controller: 50G
+ compute: 50G
diff --git a/fuel/deploy/libvirt/networks/fuel1 b/fuel/deploy/templates/virtual_environment/networks/fuel1.xml
index 7b2b15423..7b2b15423 100644
--- a/fuel/deploy/libvirt/networks/fuel1
+++ b/fuel/deploy/templates/virtual_environment/networks/fuel1.xml
diff --git a/fuel/deploy/libvirt/networks/fuel2 b/fuel/deploy/templates/virtual_environment/networks/fuel2.xml
index 615c92094..615c92094 100644
--- a/fuel/deploy/libvirt/networks/fuel2
+++ b/fuel/deploy/templates/virtual_environment/networks/fuel2.xml
diff --git a/fuel/deploy/libvirt/networks/fuel3 b/fuel/deploy/templates/virtual_environment/networks/fuel3.xml
index 2383e6c1f..2383e6c1f 100644
--- a/fuel/deploy/libvirt/networks/fuel3
+++ b/fuel/deploy/templates/virtual_environment/networks/fuel3.xml
diff --git a/fuel/deploy/libvirt/networks/fuel4 b/fuel/deploy/templates/virtual_environment/networks/fuel4.xml
index 5b69f912d..5b69f912d 100644
--- a/fuel/deploy/libvirt/networks/fuel4
+++ b/fuel/deploy/templates/virtual_environment/networks/fuel4.xml
diff --git a/fuel/deploy/libvirt/vms/compute b/fuel/deploy/templates/virtual_environment/vms/compute.xml
index 75915090c..fbef4bda7 100644
--- a/fuel/deploy/libvirt/vms/compute
+++ b/fuel/deploy/templates/virtual_environment/vms/compute.xml
@@ -1,5 +1,5 @@
<domain type='kvm'>
- <name>compute4</name>
+ <name>compute</name>
<memory unit='KiB'>8388608</memory>
<currentMemory unit='KiB'>8388608</currentMemory>
<vcpu placement='static'>2</vcpu>
@@ -7,7 +7,7 @@
<type arch='x86_64' machine='pc-1.0'>hvm</type>
<boot dev='network'/>
<boot dev='hd'/>
- <bootmenu enable='yes'/>
+ <bios rebootTimeout='30000'/>
</os>
<features>
<acpi/>
diff --git a/fuel/deploy/libvirt/vms/controller b/fuel/deploy/templates/virtual_environment/vms/controller.xml
index a87126296..3ff28218d 100644
--- a/fuel/deploy/libvirt/vms/controller
+++ b/fuel/deploy/templates/virtual_environment/vms/controller.xml
@@ -1,12 +1,13 @@
<domain type='kvm'>
- <name>controller1</name>
- <memory unit='KiB'>2097152</memory>
- <currentMemory unit='KiB'>2097152</currentMemory>
+ <name>controller</name>
+ <memory unit='KiB'>8388608</memory>
+ <currentMemory unit='KiB'>8388608</currentMemory>
<vcpu placement='static'>2</vcpu>
<os>
<type arch='x86_64' machine='pc-1.0'>hvm</type>
<boot dev='network'/>
<boot dev='hd'/>
+ <bios rebootTimeout='30000'/>
</os>
<features>
<acpi/>
diff --git a/fuel/deploy/libvirt/vms/fuel-master b/fuel/deploy/templates/virtual_environment/vms/fuel.xml
index f4e652bf2..1a3286001 100644
--- a/fuel/deploy/libvirt/vms/fuel-master
+++ b/fuel/deploy/templates/virtual_environment/vms/fuel.xml
@@ -1,5 +1,5 @@
<domain type='kvm'>
- <name>fuel-master</name>
+ <name>fuel</name>
<memory unit='KiB'>2097152</memory>
<currentMemory unit='KiB'>2097152</currentMemory>
<vcpu placement='static'>2</vcpu>
diff --git a/fuel/deploy/transplant_fuel_settings.py b/fuel/deploy/transplant_fuel_settings.py
index bb4f9b6d7..d2aece87a 100644
--- a/fuel/deploy/transplant_fuel_settings.py
+++ b/fuel/deploy/transplant_fuel_settings.py
@@ -1,3 +1,13 @@
+###############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+# szilard.cserey@ericsson.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+###############################################################################
+
+
import sys
import common
import io
@@ -6,12 +16,16 @@ from dea import DeploymentEnvironmentAdapter
check_file_exists = common.check_file_exists
+ASTUTE_YAML = '/etc/fuel/astute.yaml'
+
+
def usage():
print '''
Usage:
python transplant_fuel_settings.py <deafile>
'''
+
def parse_arguments():
if len(sys.argv) != 2:
usage()
@@ -20,6 +34,7 @@ def parse_arguments():
check_file_exists(dea_file)
return dea_file
+
def transplant(dea, astute):
fuel_conf = dea.get_fuel_config()
for key in fuel_conf.iterkeys():
@@ -30,17 +45,17 @@ def transplant(dea, astute):
astute[key] = fuel_conf[key]
return astute
+
def main():
dea_file = parse_arguments()
- astute_yaml = '/etc/fuel/astute.yaml'
- check_file_exists(astute_yaml)
+ check_file_exists(ASTUTE_YAML)
dea = DeploymentEnvironmentAdapter(dea_file)
- with io.open(astute_yaml) as stream:
+ with io.open(ASTUTE_YAML) as stream:
astute = yaml.load(stream)
transplant(dea, astute)
- with io.open(astute_yaml, 'w') as stream:
+ with io.open(ASTUTE_YAML, 'w') as stream:
yaml.dump(astute, stream, default_flow_style=False)
if __name__ == '__main__':
- main() \ No newline at end of file
+ main()
diff --git a/fuel/docs/src/build-instructions.rst b/fuel/docs/src/build-instructions.rst
index b05164f98..790bcf513 100644
--- a/fuel/docs/src/build-instructions.rst
+++ b/fuel/docs/src/build-instructions.rst
@@ -1,6 +1,6 @@
-========================================================================
-OPNFV Build instructions for - Fuel deployment tool - OPNFV Arno release
-========================================================================
+============================================================================
+OPNFV Build instructions for - Fuel deployment tool - OPNFV Arno SR1 release
+============================================================================
.. contents:: Table of Contents
:backlinks: none
@@ -8,31 +8,34 @@ OPNFV Build instructions for - Fuel deployment tool - OPNFV Arno release
Abstract
========
-This document describes how to build the Fuel deployment tool for the Arno release of OPNFV, the build system, dependencies and required system resources.
+This document describes how to build the Fuel deployment tool for the Arno SR1 release of OPNFV, the build system, dependencies and required system resources.
License
=======
-Arno release of OPNFV when using Fuel as a deployment tool DOCs (c) by Jonas Bjurel (Ericsson AB)
+Arno SR1 release of OPNFV when using Fuel as a deployment tool DOCs (c) by Jonas Bjurel (Ericsson AB)
-Arno release of OPNFV when using Fuel as a deployment tool DOCs (c) are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
+Arno SR1 release of OPNFV when using Fuel as a deployment tool DOCs (c) are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
Version history
===============
-+--------------------+--------------------+--------------------+--------------------+
-| **Date** | **Ver.** | **Author** | **Comment** |
-| | | | |
-+--------------------+--------------------+--------------------+--------------------+
-| 2015-06-03 | 1.0.0 | Jonas Bjurel | Instructions for |
-| | | (Ericsson AB) | the Arno release |
-+--------------------+--------------------+--------------------+--------------------+
++--------------------+--------------------+--------------------+----------------------+
+| **Date** | **Ver.** | **Author** | **Comment** |
+| | | | |
++--------------------+--------------------+--------------------+----------------------+
+| 2015-06-03 | 1.0.0 | Jonas Bjurel | Instructions for |
+| | | (Ericsson AB) | the Arno release |
++--------------------+--------------------+--------------------+----------------------+
+| 2015-09-24 | 1.1.0 | Jonas Bjurel | Instructions for |
+| | | (Ericsson AB) | the Arno SR1 release |
++--------------------+--------------------+--------------------+----------------------+
Introduction
============
-This document describes the build system used to build the Fuel deployment tool for the Arno release of OPNFV, required dependencies and minimum requirements on the host to be used for the buildsystem.
+This document describes the build system used to build the Fuel deployment tool for the Arno SR1 release of OPNFV, required dependencies and minimum requirements on the host to be used for the buildsystem.
The Fuel build system is desigened around Docker containers such that dependencies outside of the build system can be kept to a minimum. It also shields the host from any potential dangerous operations performed by the build system.
@@ -100,9 +103,9 @@ Now it is time to clone the code repository:
Now you should have the OPNFV genesis repository with the Fuel directories stored locally on your build host.
-Check out the Arno release:
+Check out the Arno SR1 release:
<cd genesis>
-<git checkout arno.2015.1.0>
+<git checkout arno.2015.2.0>
Building
========
@@ -170,7 +173,7 @@ References
-
:Authors: Jonas Bjurel (Ericsson)
-:Version: 1.0.0
+:Version: 1.1.0
**Documentation tracking**
diff --git a/fuel/docs/src/installation-instructions.rst b/fuel/docs/src/installation-instructions.rst
index 56699e9c3..aedbb53d5 100644
--- a/fuel/docs/src/installation-instructions.rst
+++ b/fuel/docs/src/installation-instructions.rst
@@ -9,13 +9,13 @@ OPNFV Installation instructions for the Arno release of OPNFV when using Fuel as
Abstract
========
-This document describes how to install the Arno release of OPNFV when using Fuel as a deployment tool covering it's limitations, dependencies and required system resources.
+This document describes how to install the Arno SR1 release of OPNFV when using Fuel as a deployment tool covering it's limitations, dependencies and required system resources.
License
=======
-Arno release of OPNFV when using Fuel as a deployment tool Docs (c) by Jonas Bjurel (Ericsson AB)
+Arno SR1 release of OPNFV when using Fuel as a deployment tool Docs (c) by Jonas Bjurel (Ericsson AB)
-Arno release of OPNFV when using Fuel as a deployment tool Docs are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
+Arno SR1 release of OPNFV when using Fuel as a deployment tool Docs are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
Version history
===============
@@ -27,52 +27,61 @@ Version history
| 2015-06-03 | 1.0.0 | Jonas Bjurel | Installation |
| | | (Ericsson AB) | instructions for |
| | | | the Arno release |
+| | | | |
+| 2015-09-27 | 1.1.0 | Daniel Smith | ARNO SR1-RC1 |
+| | | (Ericsson AB) | update |
+| | | | |
+| | | | |
+--------------------+--------------------+--------------------+--------------------+
Introduction
============
-This document describes providing guidelines on how to install and configure the Arno release of OPNFV when using Fuel as a deployment tool including required software and hardware configurations.
+This document describes providing guidelines on how to install and configure the Arno SR1 release of OPNFV when using Fuel as a deployment tool including required software and hardware configurations.
-Although the available installation options gives a high degree of freedom in how the system is set-up including architecture, services and features, etc. said permutations may not provide an OPNFV compliant reference architecture. This instruction provides a step-by-step guide that results in an OPNFV Arno compliant deployment.
+Although the available installation options gives a high degree of freedom in how the system is set-up including architecture, services and features, etc. said permutations may not provide an OPNFV compliant reference architecture. This instruction provides a step-by-step guide that results in an OPNFV Arno SR1 compliant deployment.
The audience of this document is assumed to have good knowledge in networking and Unix/Linux administration.
Preface
=======
-Before starting the installation of the Arno release of OPNFV when using Fuel as a deployment tool, some planning must be done.
+Before starting the installation of the Arno SR1 release of OPNFV when using Fuel as a deployment tool, some planning must be done.
Retrieving the ISO image
------------------------
-First of all, the Fuel deployment ISO image needs to be retrieved, the .iso image of the Arno release of OPNFV when using Fuel as a deployment tool can be found at http://artifacts.opnfv.org/arno.2015.1.0/fuel/arno.2015.1.0.fuel.iso
+First of all, the Fuel deployment ISO image needs to be retrieved, the .iso image of the Arno SR1 release of OPNFV when using Fuel as a deployment tool can be found at http://artifacts.opnfv.org/arno.2015.2.0/fuel/arno.2015.2.0.fuel.iso
+
Building the ISO image
----------------------
+
Alternatively, you may build the .iso from source by cloning the opnfv/genesis git repository. To retrieve the repository for the Arno release use the following command:
-<git clone https://<linux foundation uid>@gerrit.opnf.org/gerrit/genesis>
+- git clone https://<linux foundation uid>@gerrit.opnf.org/gerrit/genesis
-Check-out the Arno release tag to set the branch to the baseline required to replicate the Arno release:
+Check-out the Arno SR1 release tag to set the branch to the baseline required to replicate the Arno SR1 release:
-<cd genesis; git checkout arno.2015.1.0>
+- cd genesis; git checkout stable/arno2015.2.0
Go to the fuel directory and build the .iso:
-<cd fuel/build; make all>
+- cd fuel/build; make all
+
+For more information on how to build, please see "OPNFV Build instructions for - Arno SR1 release of OPNFV when using Fuel as a deployment tool which you retrieved with the repository at </genesis/fuel/docs/src/build-instructions.rst>
-For more information on how to build, please see "OPNFV Build instructions for - Arno release of OPNFV when using Fuel as a deployment tool which you retrieved with the repository at </genesis/fuel/docs/src/build-instructions.rst>
+Next, familiarize yourself with the Fuel 6.1 version by reading the following documents:
-Next, familiarize yourself with the Fuel 6.0.1 version by reading the following documents:
+- Fuel planning guide <https://docs.mirantis.com/openstack/fuel/fuel-6.1/planning-guide.html>
-- Fuel planning guide <http://docs.mirantis.com/openstack/fuel/fuel-6.0/planning-guide.html#planning-guide>
+- Fuel user guide <http://docs.mirantis.com/openstack/fuel/fuel-6.1/user-guide.html#user-guide>
-- Fuel user guide <http://docs.mirantis.com/openstack/fuel/fuel-6.0/user-guide.html#user-guide>
+- Fuel operations guide <http://docs.mirantis.com/openstack/fuel/fuel-6.1/operations.html#operations-guide>
-- Fuel operations guide <http://docs.mirantis.com/openstack/fuel/fuel-6.0/operations.html#operations-guide>
+- Fuel Plugin Developers Guide <https://wiki.openstack.org/wiki/Fuel/Plugins>
A number of deployment specific parameters must be collected, those are:
@@ -84,27 +93,40 @@ A number of deployment specific parameters must be collected, those are:
4. Provider NTP addresses
+5. Network Topology you plan to Deploy (VLAN, GRE(VXLAN), FLAT)
+
+6. Linux Distro you intend to deploy.
+
+7. How many nodes and what roles you want to deploy (Controllers, Storage, Computes)
+
+8. Monitoring Options you want to deploy (Ceilometer, MongoDB).
+
+9. Other options not covered in the document are available in the links above
+
+
This information will be needed for the configuration procedures provided in this document.
Hardware requirements
=====================
-The following minimum hardware requirements must be met for the installation of Arno using Fuel:
+The following minimum hardware requirements must be met for the installation of Arno SR1 using Fuel:
+--------------------+------------------------------------------------------+
| **HW Aspect** | **Requirement** |
| | |
+--------------------+------------------------------------------------------+
-| **# of servers** | Minimum 5 (3 for non redundant deployment): |
+| **# of nodes** | Minimum 6 (3 for non redundant deployment): |
| | |
| | - 1 Fuel deployment master (may be virtualized) |
| | |
| | - 3(1) Controllers |
| | |
| | - 1 Compute |
-+--------------------+------------------------------------------------------+
-| **CPU** | Minimum 1 socket x86_AMD64 Ivy bridge 1.6 GHz |
| | |
+| | - 1 Ceilometer (VM option) |
++--------------------+------------------------------------------------------+
+| **CPU** | Minimum 1 socket x86_AMD64 with Virtualization |
+| | support |
+--------------------+------------------------------------------------------+
| **RAM** | Minimum 16GB/server (Depending on VNF work load) |
| | |
@@ -112,14 +134,30 @@ The following minimum hardware requirements must be met for the installation of
| **Disk** | Minimum 256GB 10kRPM spinning disks |
| | |
+--------------------+------------------------------------------------------+
-| **NICs** | - 2(1)x10GE Niantec for Private/Public (Redundant) |
+| **Networks** | 4 Tagged VLANs (PUBLIC, MGMT, STORAGE, PRIVATE) |
| | |
-| | - 2(1)x10GE Niantec for SAN (Redundant) |
-| | |
-| | - 2(1)x1GE for admin (PXE) and control (RabitMQ,etc) |
+| | 1 Un-Tagged VLAN for PXE Boot - ADMIN Network |
| | |
+| | note: These can be run on single NIC - or spread out |
+| | over other nics as your hardware supports |
+--------------------+------------------------------------------------------+
+Help with Hardware Requirements
+===============================
+
+
+Calculate hardware requirements:
+
+You can use the Fuel Hardware Calculator <https://www.mirantis.com/openstack-services/bom-calculator/> to calculate the hardware required for your OpenStack environment.
+
+When choosing the hardware on which you will deploy your OpenStack environment, you should think about:
+
+ - CPU -- Consider the number of virtual machines that you plan to deploy in your cloud environment and the CPU per virtual machine.
+ - Memory -- Depends on the amount of RAM assigned per virtual machine and the controller node.
+ - Storage -- Depends on the local drive space per virtual machine, remote volumes that can be attached to a virtual machine, and object storage.
+ - Networking -- Depends on the Choose Network Topology, the network bandwidth per virtual machine, and network storage.
+
+
Top of the rack (TOR) Configuration requirements
================================================
@@ -127,8 +165,7 @@ The switching infrastructure provides connectivity for the OPNFV infrastructure
The physical TOR switches are **not** automatically configured from the OPNFV reference platform. All the networks involved in the OPNFV infrastructure as well as the provider networks and the private tenant VLANs needs to be manually configured.
-
-Manual configuration of the Arno hardware platform should be carried out according to the Pharos specification http://artifacts.opnfv.org/arno.2015.1.0/docs/pharos-spec.arno.2015.1.0.pdf
+Manual configuration of the Arno SR1 hardware platform should be carried out according to the Pharos specification TODO-<insert link to Pharos ARNO SR1 Specification>
OPNFV Software installation and deployment
==========================================
@@ -137,35 +174,29 @@ This section describes the installation of the OPNFV installation server (Fuel m
Install Fuel master
-------------------
-1. Mount the built arno.2015.1.0.fuel.iso file as a boot device to the jump host server.
+1. Mount the Arno SR1 ISO file as a boot device to the jump host server.
2. Reboot the jump host to establish the Fuel server.
- The system now boots from the ISO image.
-3. Change the grub boot parameters
-
- - When the grub boot menu shows up - Press Tab to edit the kernel parameters
-
- - Change <showmenu=no> to <showmenu=yes>.
-
- - Change <netmask=255.255.255.0> to <netmask=255.255.0.0>.
+ - Select 'DVD Fuel Install (Static IP)'
- Press [Enter].
-4. Wait until screen Fuel setup is shown (Note: This can take up to 30 minutes).
+3. Wait until screen Fuel setup is shown (Note: This can take up to 30 minutes).
-5. Select PXE Setup and change the following fields to appropriate values (example below):
+4. Configure DHCP/Static IP information for your FUEL node - For example, ETH0 is 10.20.0.2/24 for FUEL booting and ETH1 is DHCP in your corporate/lab network.
- - Static Pool Start 10.20.0.3
+ - Configure eth1 or other network interfaces here as well (if you have them present on your FUEL server).
- - Static Pool End 10.20.0.254
+5. Select PXE Setup and change the following fields to appropriate values (example below):
- - DHCP Pool Start 10.20.128.3
+ - DHCP Pool Start 10.20.0.3
- - DHCP Pool End 10.20.128.254
+ - DHCP Pool End 10.20.0.254
-6. Select DNS & Hostname and change the following fields to appropriate values:
+ - DHCP Pool Gateway 10.20.0.2 (ip of Fuel node)
- Hostname <OPNFV Region name>-fuel
@@ -175,7 +206,7 @@ Install Fuel master
- Hostname to test DNS <Hostname to test DNS>
-7. Select Time Sync and change the following fields to appropriate values:
+6. Select Time Sync and change the following fields to appropriate values:
- NTP Server 1 <Customer NTP server 1>
@@ -183,67 +214,94 @@ Install Fuel master
- NTP Server 3 <Customer NTP server 3>
- **Note: This step is only to pass the network sanity test, the actual ntp parameters will be set with the pre-deploy script.**
-
-8. Start the installation.
+7. Start the installation.
- Select Quit Setup and press Save and Quit.
- Installation starts, wait until a screen with logon credentials is shown.
- Note: This will take about 15 minutes.
+
+Boot the Node Servers
+---------------------
+
+After the Fuel Master node has rebooted from the above step and is at the login prompt, you should boot the Node Servers (Your Compute/Control/Storage blades (nested or real)) with a PXE Booting Scheme so that the FUEL
+Master can pick them up for control.
+
+8. Enable PXE booting
+
+ - For every controller and compute server: enable PXE Booting as the first boot device in the BIOS boot order menu and hard disk as the second boot device in the same menu.
+
+9. Reboot all the control and compute blades.
+
+10. Wait for the availability of nodes showing up in the Fuel GUI.
+
+ - Wait until all nodes are displayed in top right corner of the Fuel GUI: <total number of server> TOTAL NODES and <total number of servers> UNALLOCATED NODES.
+
+
+
+Install ODL Plugin on FUEL node
+-------------------------------
+
+11. SSH to your FUEL node (e.g. root@10.20.0.2 pwd: r00tme)
+
+12. Verify the plugin exists at /opt/opnfv/opendaylight-0.6-0.6.1-1.noarch.rpm
+
+13. Install the plugin with the command
+
+ - "fuel plugins --install /opt/opnfv/opendaylight-0.6-0.6.1-1.noarch.rpm"
+
+ - Expected output: "Plugin opendaylight-0.6-0.6.1-1.noarch.rpm was successfully installed."
+
Create an OPNFV Environment
---------------------------
-9. Connect to Fuel with a browser towards port 8000
+14. Connect to Fuel WEB UI with a browser towards port http://<ip of fuel server>:8000 (login admin/admin)
-10. Create and name a new OpenStack environment, to be installed.
+15. Create and name a new OpenStack environment, to be installed.
-11. Select <Juno on Ubuntu> or <Juno on CentOS> as per your which in the "OpenStack Release" field.
+16. Select <Juno on Ubuntu> or <Juno on CentOS> as per your which in the "OpenStack Release" field and press "Next"
-12. Select deployment mode.
+17. Select compute virtulization method.
- - Select the Multi-node with HA.
+ - Select KVM as hypervisor (or one of your choosing) and press "Next"
-13. Select compute node mode.
+18. Select network mode.
- - Select KVM as hypervisor (unless you're not deploying bare metal or nested KVM/ESXI).
+ - Select Neutron with GRE segmentation and press "Next"
-14. Select network mode.
+ Note: this is the supportted method when using the ODL installation, other options will not work with the plugin and this Instruction Set.
- - Select Neutron with VLAN segmentation
+19. Select Storage Back-ends.
- ** Note: This will later be overridden to VXLAN by OpenDaylight.**
+ - Select "Yes, use Ceph" if you intend to deploy Ceph Backends and press "Next"
-15. Select Storage Back-ends.
- - Select Ceph for Cinder and default for glance.
+20. Select additional services you wish to install.
-16. Select additional services.
+ - Check option <Install Celiometer (OpenStack Telemetry)> and press "Next"
+ Note: If you use Ceilometer and you only have 5 nodes, you may have to run in a 3/1/1 (controller/ceilo-mongo/compute) configuration. Suggest adding more compute nodes
- - Check option <Install Celiometer (OpenStack Telemetry)>.
+21. Create the new environment.
-17. Create the new environment.
+ - Click "Create" Button
Configure the OPNFV environment
-------------------------------
-18. Enable PXE booting
+22. Enable PXE booting (if you haven't done this already)
- For every controller and compute server: enable PXE Booting as the first boot device in the BIOS boot order menu and hard disk as the second boot device in the same menu.
-19. Reboot all the control and compute blades.
-
-20. Wait for the availability of nodes showing up in the Fuel GUI.
+23. Wait for the availability of nodes showing up in the Fuel GUI.
- Wait until all nodes are displayed in top right corner of the Fuel GUI: <total number of server> TOTAL NODES and <total number of servers> UNALLOCATED NODES.
-21. Open the environment you previously created.
+24. Open the environment you previously created.
-22. Open the networks tab.
+25. Open the networks tab.
-23. Update the public network configuration.
+26. Update the Public network configuration.
Change the following fields to appropriate values:
@@ -253,57 +311,67 @@ Configure the OPNFV environment
- CIDR to <CIDR for Public IP Addresses>
- - Gateway to <Gateway for Public IP Addresses>
-
- Check VLAN tagging.
- Set appropriate VLAN id.
-24. Update the management network configuration.
+ - Gateway to <Gateway for Public IP Addresses>
+
+ - Set floating ip ranges
+
+
+27. Update the Storage Network Configuration
+
+ - Set CIDR to appropriate value (default 192.168.1.0/24)
- - Set CIDR to 172.16.255.128/25 (or as per your which).
+ - Set vlan to appropriate value (default 102)
+
+28. Update the Management network configuration.
+
+ - Set CIDR to appropriate value (default 192.168.0.0/24)
- Check VLAN tagging.
- - Set appropriate VLAN id.
+ - Set appropriate VLAN id. (default 101)
-25. Update the Neutron L2 configuration.
+29. Update the Private Network Information
- - Set VLAN ID range.
+ - Set CIDR to appropriate value (default 192.168.2.0/24
-26. Update the Neutron L3 configuration.
+ - Check and set VLAN tag appropriately (default 103)
+
+30. Update the Neutron L3 configuration.
- Set Internal network CIDR to an appropriate value
- Set Internal network gateway to an appropriate value
- - Set Floating IP ranges.
-
- - Set DNS Servers
+ - Set Guest OS DNS Server values appropriately
-27. Save Settings.
+31. Save Settings.
-28. Click "verify network" to check the network set-up consistency and connectivity
+32. Click on the "Nodes" Tab in the FUEL WEB UI.
-29. Update the storage configuration.
+33. Assign roles.
-30. Open the nodes tab.
+ - Click on "+Add Nodes" button
-31. Assign roles.
+ - Check "Controller" and the "Storage-Ceph OSD" in the Assign Roles Section
- - Check <Controller and Telemetry MongoDB>.
-
- - Check the three servers you want to be installed as Controllers in pane <Assign Role>.
+ - Check the 3 Nodes you want to act as Controllers from the bottom half of the screen
- Click <Apply Changes>.
- - Check <Compute>.
+ - Click on "+Add Nodes" button
+
+ - Check "Compute" in the Assign Roles Section
- - Check nodes to be installed as compute nodes in pane Assign Role.
+ - Check the Nodes that you want to act as Computes from the bottom half of the screen
- Click <Apply Changes>.
-32. Configure interfaces.
+
+34. Configure interfaces.
- Check Select <All> to select all nodes with Control, Telemetry, MongoDB and Compute node roles.
@@ -313,67 +381,84 @@ Configure the OPNFV environment
- Assign interfaces (bonded) for mgmt-, admin-, private-, public- and storage networks
-Deploy the OPNFV environment
-----------------------------
-**NOTE: Before the deployment is performed, the OPNFV pre-deploy script must be run**
+ - Note: Set MTU level to at least MTU=2090 (recommended MTU=2140 for SDN over VXLAN Usage) for each network
-35. Run the pre-deploy script.
- Log on as root to the Fuel node.
- Print Fuel environment Id (fuel env)
- #> id | status | name | mode | release_id | changes <id>| new | <CEE Region name>| ha_compact | 2 | <ite specific information>
+ - Click Apply
-36. Run the pre-deployment script (/opt/opnfv/pre-deploy.sh <id>)
- As prompted for-, set the DNS servers to go into /etc/resolv.conf.
- As prompted for-, set any Hosts file additions for controllers and compute nodes. You will be prompted for name, FQDN and IP for each entry. Press return when prompted for a name when you have completed your input.
- As prompted for-, set NTP upstream configuration for controllers. You will be prompted for a NTP server each entry. Press return when prompted for a NTP server when you have completed your input.
+Enable ODL
+----------
-37. Deploy the environment.
- In the Fuel GUI, click Deploy Changes.
+35. In the FUEL UI of your Enviornment, click the "Settings" Tab
-Installation health-check
-=========================
+ - Enable OpenStack debug logging (in the Common Section) - optional
+
+ - Check the OpenDaylight Lithium Plugin Section
+
+ - Check to enable VXLAN
+
+ - Modify VNI and Port Range if desired
+
+ - Click "Save Settings" at the bottom to Save.
+
+
+OPTIONAL - Set Local Mirror Repos
+---------------------------------
+
+The following steps can be executed if you are in an environment with no connection to the internet. The Fuel server delivers a local repo that can be used for
+installation / deployment of openstack.
+
+36. In the Fuel UI of your Environment, click the Settings Tab and scroll to the Repositories Section.
+
+ - Replace the URI values for the "Name" values outlined below:
+
+ - "ubuntu" URI="deb http://<ip-of-fuel-server>:8080/ubuntu-part trusty main"
+ - "ubuntu-security" URI="deb http://<ip-of-fuel-server>:8080/ubuntu-part trusty main"
+ - "ubuntu-updates" URI="deb http://<ip-of-fuel-server>:8080/ubuntu-part trusty main"
+ - "mos-updates" URI="deb http://<ip-of-fuel-server>:8080/mos-ubuntu mos6.1-updates main restricted"
+ - "mos-security" URI="deb http://<ip-of-fuel-server>:8080/mos-ubuntu mos6.1-security main restricted"
+ - "mos-holdback" URI="deb http://<ip-of-fuel-server>:8080/mos-ubuntu mos6.1-holdback main restricted"
-38. Perform system health-check
-Now that the OPNFV environment has been created, and before the post installation configurations is started, perform a system health check from the Fuel GUI:
+ - Click "Save Settings" at the bottom to Save your changes
-- Select the “Health check” TAB.
-- Select all test cases
-- And click “Run tests”
+Verify Networks
+---------------
-All test cases should pass.
+Its is important that Verify Networks be done as it will ensure that you can not only communicate on the networks you have setup, but can fetch the packages needed for a succesful
+deployment.
+
+37. From the FUEL UI in your Environment, Select the Networks Tab
+
+ - At the bottom of the page, Select "Verify Networks"
+
+ - Continue to fix your topology (physical switch, etc) until the "Verification Succeeded - Your network is configured correctly" message is shown
+
+Deploy Your Environment
+-----------------------
+
+38. Deploy the environment.
+ In the Fuel GUI, click Deploy Changes.
+
+ - Wait until your Environment is deployed and the Horizon URI to connect is displayed in the FUEL GUI for your Environment
+
+Installation health-check
+=========================
-Post installation and deployment actions
-========================================
+39. Perform system health-check
-Activate OpenDaylight and VXLAN network segmentation
-----------------------------------------------------
-** Note: With the current release, the OpenDaylight option is experimental!**
-** Note: With ODL enabled, L3 features will no longer be available **
-The activation of ODL within a deployed Fuel system is a two part process.
+ - Click the "Health Check" tab inside your Environment in the FUEL Web UI
-The first part involves staging the ODL container, i.e. starting the ODL container itself.
-The second part involves a reconfiguration of the underlying networking components to enable VXLAN tunneling.
-The staging of the ODL container works without manual intervention except for editing with a valid DNS IP for your system
+ - Check "Select All" and Click "Run Tests"
-For the second part - the reconfiguration of the networking, the script <config_net_odl.sh> is provided as a baseline example to show what needs to be configured for your system system setup. Since there are many variants of valid networking topologies, this script will not be 100% correct in all deployment cases and some manual script modifications maybe required.
+ Note: Live-Migraition test will fail (Bug in ODL currently), you can skip this test in the list if you choose to not see the error message, simply uncheck it in the list
-39. Enable the ODL controller
-ssh to any of the OpenStack controllers and issue the following command as root user: </opt/opnfv/odl/stage_odl.sh>
-This script will start ODL, load modules and make the Controller ready for use.
-** Note: - The script should only be ran on a single controller (even if the system is setup in a High Availability OpenStack mode). **
+ - Allow tests to run and investigate results where appropriate
40. Verify that the OpenDaylight GUI is accessible
-Point your browser to the following URL: <http://{ODL-CONTROLLER-IP}:8181/dlux/index.html> and login:
-Username: Admin
-Password: Admin
-41. Reconfiguring the networking and switch to VXLAN network segmentation
-ssh to all of the nodes and issue the following command </opt/opnfv/odl/config_net_odl.sh> in the order specified below:
-a. All compute nodes
-b. All OpenStack controller nodes except the one running the ODL-controller
-c. The OpenStack controller also running the ODL controller
+Point your browser to the following URL: http://{Controller-VIP}:8181/index.html> and login:
-This script will reconfigure the networking from VLAN Segregation to VXLAN mode.
+ - Username: admin
+ - Password: admin
References
==========
@@ -402,11 +487,11 @@ Fuel
`Fuel documentation <https://wiki.openstack.org/wiki/Fuel>`_
-:Authors: Jonas Bjurel (Ericsson AB)
-:Version: 1.0.0
+:Authors: Daniel Smith (Ericsson AB)
+:Version: 1.1.0
**Documentation tracking**
Revision: _sha1_
-Build date: _date_
+Build date: _date
diff --git a/fuel/docs/src/release-notes.rst b/fuel/docs/src/release-notes.rst
index 220e80b8a..435789767 100644
--- a/fuel/docs/src/release-notes.rst
+++ b/fuel/docs/src/release-notes.rst
@@ -1,6 +1,6 @@
-=====================================================================================
-OPNFV Release Note for the Arno release of OPNFV when using Fuel as a deployment tool
-=====================================================================================
+=========================================================================================
+OPNFV Release Note for the Arno SR1 release of OPNFV when using Fuel as a deployment tool
+=========================================================================================
.. contents:: Table of Contents
@@ -10,14 +10,14 @@ OPNFV Release Note for the Arno release of OPNFV when using Fuel as a deployment
Abstract
========
-This document compiles the release notes for the Arno release of OPNFV when using Fuel as a deployment tool.
+This document compiles the release notes for the Arno SR1 release of OPNFV when using Fuel as a deployment tool.
License
=======
-Arno release with the Fuel deployment tool Docs (c) by Jonas Bjurel (Ericsson AB)
+Arno SR1 release with the Fuel deployment tool Docs (c) by Jonas Bjurel (Ericsson AB)
-Arno release with the Fuel deployment tool Docs are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
+Arno SR1 release with the Fuel deployment tool Docs are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
Version history
===============
@@ -29,6 +29,9 @@ Version history
| 2015-06-03 | 1.0.0 | Jonas Bjurel | Arno SR0 release |
| | | | |
+--------------------+--------------------+--------------------+--------------------+
+| 2015-09-28 | 1.1.3 | Jonas Bjurel | Arno SR1 release |
+| | | | |
++--------------------+--------------------+--------------------+--------------------+
Important notes
===============
@@ -40,7 +43,7 @@ Carefully follow the installation-instructions and pay special attention to the
Summary
=======
-For Arno, the typical use of Fuel as an OpenStack installer is supplemented with OPNFV unique components such as `OpenDaylight <http://www.opendaylight.org/software>`_ version Helium as well as OPNFV-unique configurations.
+For Arno SR1, the typical use of Fuel as an OpenStack installer is supplemented with OPNFV unique components such as `OpenDaylight <http://www.opendaylight.org/software>`_ version Helium as well as OPNFV-unique configurations.
This Arno artefact provides Fuel as the deployment stage tool in the OPNFV CI pipeline including:
@@ -60,16 +63,16 @@ Release Data
| **Project** | genesis/bgs |
| | |
+--------------------------------------+--------------------------------------+
-| **Repo/tag** | genesis/arno.2015.1.0 |
+| **Repo/tag** | genesis/arno.2015.2.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Arno Base Service release 0 (SR0) |
+| **Release designation** | Arno Base Service release 1 (SR1) |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | 2015-06-04 |
+| **Release date** | 2015-10-01 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | OPNFV Arno Base SR0 release |
+| **Purpose of the delivery** | OPNFV Arno Base SR1 release |
| | |
+--------------------------------------+--------------------------------------+
@@ -78,15 +81,15 @@ Version change
Module version changes
~~~~~~~~~~~~~~~~~~~~~~
-This is the first tracked release of genesis/fuel. It is based on following upstream versions:
+This is the second tracked release of genesis/fuel. It is based on following upstream versions:
-- Fuel 6.0.1
+- Fuel 6.1.0
- OpenStack Juno release
-- OpenDaylight Helium-SR3
+- OpenDaylight Litium release
Document version changes
~~~~~~~~~~~~~~~~~~~~~~~~
-This is the first tracked version of the fuel installer for OPNFV. It comes with the following documentation:
+This is the second tracked version of the fuel installer for OPNFV. It comes with the following documentation:
- OPNFV Installation instructions for Arno with Fuel as deployment tool
- OPNFV Release Notes for Arno use of Fuel as deployment tool
@@ -102,10 +105,10 @@ Feature additions
| **JIRA REFERENCE** | **SLOGAN** |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA:- | Baselining Fuel 6.0.1 for OPNFV |
+| JIRA: FUEL-4 | Baselining Fuel 6.0.1 for OPNFV |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA:- | Integration of OpenDaylight |
+| JIRA: FUEL-17 | Integration of OpenDaylight |
| | |
+--------------------------------------+--------------------------------------+
@@ -118,8 +121,17 @@ Bug corrections
| **JIRA REFERENCE** | **SLOGAN** |
| | |
+--------------------------------------+--------------------------------------+
+| JIRA: BGS-57 | The OpenDaylight Helium release is |
+| | not fully functional and the |
+| | resulting Fuel integration is not |
+| | able to cope with the deficiancies. |
+| | It is therefore not recommended to |
+| | to enable this option. |
+| | A functional integration of ODL |
+| | version: Lithium is expected to be |
+| | available in an upcomming service |
+| | release. |
| | |
-| - | - |
+--------------------------------------+--------------------------------------+
Deliverables
@@ -127,13 +139,13 @@ Deliverables
Software deliverables
~~~~~~~~~~~~~~~~~~~~~
-Fuel-based installer iso file <arno.2015.1.0.fuel.iso>
+Fuel-based installer iso file <arno.2015.2.0.fuel.iso>
Documentation deliverables
~~~~~~~~~~~~~~~~~~~~~~~~~~
-- OPNFV Installation instructions for Arno release with the Fuel deployment tool - ver. 1.0.0
-- OPNFV Build instructions for Arno release with the Fuel deployment tool - ver. 1.0.0
-- OPNFV Release Note for Arno release with the Fuel deployment tool - ver. 1.0.0 (this document)
+- OPNFV Installation instructions for Arno release with the Fuel deployment tool - ver. 1.1.0
+- OPNFV Build instructions for Arno release with the Fuel deployment tool - ver. 1.1.0
+- OPNFV Release Note for Arno release with the Fuel deployment tool - ver. 1.1.3 (this document)
Known Limitations, Issues and Workarounds
=========================================
@@ -159,63 +171,32 @@ Known issues
| **JIRA REFERENCE** | **SLOGAN** |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: BGS-57 | The OpenDaylight Helium release is |
-| | not fully functional and the |
-| | resulting Fuel integration is not |
-| | able to cope with the deficiancies. |
-| | It is therefore not recommended to |
-| | to enable this option. |
-| | A functional integration of ODL |
-| | version: Lithium is expected to be |
-| | available in an upcomming service |
-| | release. |
-| | |
+| JIRA: FUEL-43 | VMs not accessible through SSH due |
+| | to VXLAN 50 Byte overhead and lack |
+| | of proper MTU value setting on |
+| | virtual ethernet devices |
++--------------------------------------+--------------------------------------+
+| JIRA: FUEL-44 | Centos 6.5 option has not been |
+| | enough verified |
+--------------------------------------+--------------------------------------+
+
Workarounds
-----------
-Current workaround for the JIRA: BGS-57 is not to enable OpenDaylight networking - see installation instructions.
+See JIRA: `FUEL-43 <https://jira.opnfv.org/browse/FUEL-43>`
Test Result
===========
-
-Arno release with the Fuel deployment tool has undergone QA test runs with the following results:
-
-+--------------------------------------+--------------------------------------+
-| **TEST-SUITE** | **Results:** |
-| | |
-+--------------------------------------+--------------------------------------+
-| Tempest test suite 1: | 27 out of 105 testcases fails |
-| | see note (1) and note (2) |
-+--------------------------------------+--------------------------------------+
-| Tempest test suite 2: | 26 out of 100 testcases fails |
-| | see note (1) and note (2) |
-+--------------------------------------+--------------------------------------+
-| Tempest test suite 3: | 14 out of 106 testcases fails |
-| | see note (1) and note (2) |
-+--------------------------------------+--------------------------------------+
-| Rally test suite suie 1: | 10 out of 18 testcases fails |
-| | see note (1) and note (3) |
-+--------------------------------------+--------------------------------------+
-| ODL test suite suie | 7 out of 7 testcases fails |
-| | see note (1) and note (4) |
-+--------------------------------------+--------------------------------------+
-| vPING | OK |
-| | see note (1) |
-+--------------------------------------+--------------------------------------+
-
-** - Note (1): Have been run with ODL controller active but not with integrated ODL networking VXLAN segmentation activated **
-** - Note (2): see https://wiki.opnfv.org/r1_tempest **
-** - Note (3): see https://wiki.opnfv.org/r1_rally_bench **
-** - Note (4): see https://wiki.opnfv.org/r1_odl_suite **
+Arno SR1 release with the Fuel deployment tool has undergone QA test runs with the following results:
+https://wiki.opnfv.org/arno_sr1_result_page?rev=1443626728
References
==========
For more information on the OPNFV Arno release, please see http://wiki.opnfv.org/releases/arno.
:Authors: Jonas Bjurel (Ericsson)
-:Version: 1.0.0
+:Version: 1.1.3
**Documentation tracking**
diff --git a/fuel/prototypes/auto-deploy/configs/lf_pod1/dea_ha.yaml b/fuel/prototypes/auto-deploy/configs/lf_pod1/dea_ha.yaml
index 9e7042732..25de4b98e 100644
--- a/fuel/prototypes/auto-deploy/configs/lf_pod1/dea_ha.yaml
+++ b/fuel/prototypes/auto-deploy/configs/lf_pod1/dea_ha.yaml
@@ -205,7 +205,7 @@ network:
gateway: 172.30.9.1
ip_ranges:
- - 172.30.9.70
- - 172.30.9.70
+ - 172.30.9.79
meta:
assign_vip: true
cidr: 172.16.0.0/24
diff --git a/fuel/prototypes/auto-deploy/configs/lf_pod1/dea_no-ha.yaml b/fuel/prototypes/auto-deploy/configs/lf_pod1/dea_no-ha.yaml
index fd0e7b3e8..3abbdce93 100644
--- a/fuel/prototypes/auto-deploy/configs/lf_pod1/dea_no-ha.yaml
+++ b/fuel/prototypes/auto-deploy/configs/lf_pod1/dea_no-ha.yaml
@@ -205,7 +205,7 @@ network:
gateway: 172.30.9.1
ip_ranges:
- - 172.30.9.70
- - 172.30.9.70
+ - 172.30.9.79
meta:
assign_vip: true
cidr: 172.16.0.0/24
diff --git a/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute4 b/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute4
index 099c21e68..ad5d4d1c8 100644
--- a/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute4
+++ b/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute4
@@ -9,6 +9,7 @@
<boot dev='network'/>
<boot dev='hd'/>
<bootmenu enable='yes'/>
+ <bios rebootTimeout='30000'/>
</os>
<features>
<acpi/>
diff --git a/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute5 b/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute5
index 76569e0ca..39059066c 100644
--- a/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute5
+++ b/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/compute5
@@ -8,6 +8,7 @@
<type arch='x86_64' machine='pc-1.0'>hvm</type>
<boot dev='network'/>
<boot dev='hd'/>
+ <bios rebootTimeout='30000'/>
</os>
<features>
<acpi/>
diff --git a/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/controller1 b/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/controller1
index 715d4c4b4..ca1bd3b05 100644
--- a/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/controller1
+++ b/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/controller1
@@ -8,6 +8,7 @@
<type arch='x86_64' machine='pc-1.0'>hvm</type>
<boot dev='network'/>
<boot dev='hd'/>
+ <bios rebootTimeout='30000'/>
</os>
<features>
<acpi/>
diff --git a/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/fuel-master b/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/fuel-master
index 9ff801706..d2a78411c 100644
--- a/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/fuel-master
+++ b/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/fuel-master
@@ -98,6 +98,5 @@
<address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
</memballoon>
</devices>
- <seclabel type='dynamic' model='apparmor' relabel='yes'/>
</domain>
diff --git a/opensteak/ci/build.sh b/opensteak/ci/build.sh
index e69de29bb..7a853329a 100644
--- a/opensteak/ci/build.sh
+++ b/opensteak/ci/build.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+#placeholder
diff --git a/opensteak/ci/deploy.sh b/opensteak/ci/deploy.sh
index e69de29bb..bd6ff862c 100644
--- a/opensteak/ci/deploy.sh
+++ b/opensteak/ci/deploy.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+
+# TODO: find a way to create the openvswitch bridge
+
+
+# This will create a Foreman Virtual Machine with KVM (libvirt)
+cd ../tools/
+sudo python3 create_foreman.py --config ../config/infra.yaml
+
+
diff --git a/opensteak/config/common.yaml b/opensteak/config/common.yaml
new file mode 100644
index 000000000..144e84ff7
--- /dev/null
+++ b/opensteak/config/common.yaml
@@ -0,0 +1,119 @@
+# common.yaml
+---
+
+###
+## OpenStack passwords
+###
+ceph_password: "password"
+admin_password: "password"
+mysql_service_password: "password"
+mysql_root_password: "password"
+rabbitmq_password: "password"
+glance_password: "password"
+nova_password: "password"
+neutron_shared_secret: "password"
+neutron_password: "password"
+cinder_password: "password"
+keystone_admin_token: "password"
+horizon_secret_key: "12345"
+
+domain: "infra.opensteak.fr"
+
+###
+## Class parameters
+###
+# Rabbit
+opensteak::rabbitmq::rabbitmq_password: "%{hiera('rabbitmq_password')}"
+
+# MySQL
+opensteak::mysql::root_password: "%{hiera('mysql_root_password')}"
+opensteak::mysql::mysql_password: "%{hiera('mysql_service_password')}"
+
+# Key
+opensteak::key::password: "%{hiera('admin_password')}"
+opensteak::key::stack_domain: "%{hiera('domain')}"
+
+# Keystone
+opensteak::keystone::mysql_password: "%{hiera('mysql_root_password')}"
+opensteak::keystone::rabbitmq_password: "%{hiera('rabbitmq_password')}"
+opensteak::keystone::keystone_token: "%{hiera('keystone_admin_token')}"
+opensteak::keystone::stack_domain: "%{hiera('domain')}"
+opensteak::keystone::admin_mail: "admin@opensteak.fr"
+opensteak::keystone::admin_password: "%{hiera('admin_password')}"
+opensteak::keystone::glance_password: "%{hiera('glance_password')}"
+opensteak::keystone::nova_password: "%{hiera('nova_password')}"
+opensteak::keystone::neutron_password: "%{hiera('neutron_password')}"
+opensteak::keystone::cinder_password: "%{hiera('cinder_password')}"
+
+# Glance
+opensteak::glance::mysql_password: "%{hiera('mysql_root_password')}"
+opensteak::glance::rabbitmq_password: "%{hiera('rabbitmq_password')}"
+opensteak::glance::stack_domain: "%{hiera('domain')}"
+opensteak::glance::glance_password: "%{hiera('glance_password')}"
+
+# Nova
+opensteak::nova::mysql_password: "%{hiera('mysql_root_password')}"
+opensteak::nova::rabbitmq_password: "%{hiera('rabbitmq_password')}"
+opensteak::nova::stack_domain: "%{hiera('domain')}"
+opensteak::nova::nova_password: "%{hiera('nova_password')}"
+opensteak::nova::neutron_password: "%{hiera('neutron_password')}"
+opensteak::nova::neutron_shared: "%{hiera('neutron_shared_secret')}"
+
+# Cinder
+opensteak::cinder::mysql_password: "%{hiera('mysql_root_password')}"
+opensteak::cinder::rabbitmq_password: "%{hiera('rabbitmq_password')}"
+opensteak::cinder::stack_domain: "%{hiera('domain')}"
+opensteak::cinder::nova_password: "%{hiera('cinder_password')}"
+
+# Compute
+opensteak::nova-compute::mysql_password: "%{hiera('mysql_root_password')}"
+opensteak::nova-compute::rabbitmq_password: "%{hiera('rabbitmq_password')}"
+opensteak::nova-compute::stack_domain: "%{hiera('domain')}"
+opensteak::nova-compute::neutron_password: "%{hiera('neutron_password')}"
+
+
+# Neutron controller
+opensteak::neutron-controller::mysql_password: "%{hiera('mysql_root_password')}"
+opensteak::neutron-controller::rabbitmq_password: "%{hiera('rabbitmq_password')}"
+opensteak::neutron-controller::stack_domain: "%{hiera('domain')}"
+opensteak::neutron-controller::nova_password: "%{hiera('nova_password')}"
+opensteak::neutron-controller::neutron_password: "%{hiera('neutron_password')}"
+# Neutron compute
+opensteak::neutron-compute::mysql_password: "%{hiera('mysql_root_password')}"
+opensteak::neutron-compute::rabbitmq_password: "%{hiera('rabbitmq_password')}"
+opensteak::neutron-compute::stack_domain: "%{hiera('domain')}"
+opensteak::neutron-compute::neutron_password: "%{hiera('neutron_password')}"
+opensteak::neutron-compute::neutron_shared: "%{hiera('neutron_shared_secret')}"
+opensteak::neutron-compute::infra_nodes:
+ server186:
+ ip: 192.168.1.27
+ bridge_uplinks:
+ - 'br-vm:p3p1'
+ server187:
+ ip: 192.168.1.155
+ bridge_uplinks:
+ - 'br-vm:p3p1'
+ server188:
+ ip: 192.168.1.116
+ bridge_uplinks:
+ - 'br-vm:p3p1'
+ server189:
+ ip: 192.168.1.117
+ bridge_uplinks:
+ - 'br-vm:p3p1'
+# Neutron network
+opensteak::neutron-network::mysql_password: "%{hiera('mysql_root_password')}"
+opensteak::neutron-network::rabbitmq_password: "%{hiera('rabbitmq_password')}"
+opensteak::neutron-network::stack_domain: "%{hiera('domain')}"
+opensteak::neutron-network::neutron_password: "%{hiera('neutron_password')}"
+opensteak::neutron-network::neutron_shared: "%{hiera('neutron_shared_secret')}"
+opensteak::neutron-network::infra_nodes:
+ server98:
+ ip: 192.168.1.58
+ bridge_uplinks:
+ - 'br-ex:em2'
+ - 'br-vm:em5'
+
+# Horizon
+opensteak::horizon::stack_domain: "%{hiera('domain')}"
+opensteak::horizon::secret_key: "%{hiera('horizon_secret_key')}"
diff --git a/opensteak/config/infra.yaml b/opensteak/config/infra.yaml
new file mode 100644
index 000000000..2ff02a1dd
--- /dev/null
+++ b/opensteak/config/infra.yaml
@@ -0,0 +1,81 @@
+domains: "infra.opensteak.fr"
+media: "Ubuntu mirror"
+environments: "production"
+operatingsystems: "Ubuntu14.04Cloud"
+subnets: "Admin"
+compute_profiles: "Test"
+smart_proxies: "foreman.infra.opensteak.fr"
+ptables: "Preseed default"
+architectures: "x86_64"
+
+operatingsystems:
+ "Ubuntu 14.04.2 LTS":
+ name: "Ubuntu"
+ description: "Ubuntu 14.04.2 LTS"
+ major: "14"
+ minor: "04"
+ family: "Debian"
+ release_name: "trusty"
+ password_hash: "MD5"
+ "Ubuntu 14.04 Cloud":
+ name: "Ubuntu14.04Cloud"
+ description: "Ubuntu 14.04 Cloud"
+ major: "14"
+ minor: "04"
+ family: "Debian"
+ release_name: "trusty"
+ password_hash: "MD5"
+
+hostgroupTop:
+ name: 'test'
+ classes:
+ - "ntp"
+ subnet: "Admin"
+ params:
+ password: 'toto'
+hostgroups:
+ hostgroupController:
+ name: 'controller'
+ classes:
+ - "opensteak::base-network"
+ - "opensteak::libvirt"
+ params:
+ foreman_sshkey: 'xxxx'
+ hostgroupControllerVM:
+ name: 'controller_VM'
+ classes:
+ - "opensteak::apt"
+ params:
+ foreman_sshkey: 'xxxx'
+ password: 'toto'
+ hostgroupCompute:
+ name: 'compute'
+ classes:
+ - "opensteak::neutron-compute"
+ - "opensteak::nova-compute"
+subnets:
+ Admin:
+ shared: False
+ data:
+ network: "192.168.4.0"
+ mask: "255.255.255.0"
+ vlanid:
+ gateway: "192.168.4.1"
+ dns_primary: "192.168.1.4"
+ from: "192.168.4.10"
+ to: "192.168.4.200"
+ ipam: "DHCP"
+ boot_mode: "DHCP"
+
+foreman:
+ ip: "192.168.4.2"
+ admin: "admin"
+ password: "opnfv"
+ cpu: "4"
+ ram: "4194304"
+ iso: "trusty-server-cloudimg-amd64-disk1.img"
+ disksize: "5G"
+ force: True
+ dns: "8.8.8.8"
+ bridge: "br-libvirt"
+ bridge_type: "openvswitch"
diff --git a/opensteak/tools/README.rst b/opensteak/tools/README.rst
new file mode 100644
index 000000000..188addc99
--- /dev/null
+++ b/opensteak/tools/README.rst
@@ -0,0 +1,52 @@
+:Authors: Arnaud Morin (arnaud1.morin@orange.com)
+:Version: 0.0.2
+
+=======================================================
+OPNFV Installation instructions using Foreman/OpenSteak
+=======================================================
+
+Abstract
+========
+
+This document describes how to setup OPNFV from a foreman Virtual Machine on an Ubuntu server.
+
+License
+=======
+OPNFV Installation instructions using Foreman/OpenSteak (c) by Arnaud Morin (Orange)
+
+OPNFV Installation instructions using Foreman/OpenSteak are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
+
+Version history
+===================
+
++--------------------+--------------------+--------------------+--------------------+
+| **Date** | **Ver.** | **Author** | **Comment** |
+| | | | |
++--------------------+--------------------+--------------------+--------------------+
+| 2015-06-08 | 0.0.1 | Arnaud Morin | First draft |
+| | | (Orange) | |
++--------------------+--------------------+--------------------+--------------------+
+
+Table of contents
+===================
+
+.. contents::
+ :backlinks: none
+
+Introduction
+============
+
+This document describes how to setup OPNFV from a foreman Virtual Machine on an Ubuntu server.
+Before starting, you should have an Ubuntu 14.04 LTS server already installed.
+
+Here is the manual workflow that you will have to perform:
+
+- Install
+- Manually prepare configuration files from templates.
+
+
+Here is the current workflow of the automated installation:
+
+- Dependencies installation (such as libvirt, impitools, etc.)
+- Foreman Virtual Machine creation
+- to be completed
diff --git a/opensteak/tools/config.yaml b/opensteak/tools/config.yaml
new file mode 100644
index 000000000..c618a5203
--- /dev/null
+++ b/opensteak/tools/config.yaml
@@ -0,0 +1,78 @@
+domains: "test-infra.opensteak.fr"
+media: "Ubuntu mirror"
+environments: "production"
+operatingsystems: "Ubuntu14.04Cloud"
+subnets: "Admin"
+compute_profiles: "Test"
+smart_proxies: "foreman.infra.opensteak.fr"
+ptables: "Preseed default"
+architectures: "x86_64"
+
+operatingsystems:
+ "Ubuntu 14.04.1 LTS":
+ name: "Ubuntu"
+ description: "Ubuntu 14.04.1 LTS"
+ major: "14"
+ minor: "04"
+ family: "Debian"
+ release_name: "trusty"
+ password_hash: "MD5"
+ "Ubuntu 14.04 Cloud":
+ name: "Ubuntu14.04Cloud"
+ description: "Ubuntu 14.04 Cloud"
+ major: "14"
+ minor: "04"
+ family: "Debian"
+ release_name: "trusty"
+ password_hash: "MD5"
+
+hostgroupTop:
+ name: 'test'
+ classes:
+ - "ntp"
+ subnet: "Admin"
+ params:
+ password: 'toto'
+hostgroups:
+ hostgroupController:
+ name: 'controller'
+ classes:
+ - "opensteak::base-network"
+ - "opensteak::libvirt"
+ params:
+ foreman_sshkey: 'xxxx'
+ hostgroupControllerVM:
+ name: 'controller_VM'
+ classes:
+ - "opensteak::apt"
+ params:
+ foreman_sshkey: 'xxxx'
+ password: 'toto'
+ hostgroupCompute:
+ name: 'compute'
+ classes:
+ - "opensteak::neutron-compute"
+ - "opensteak::nova-compute"
+subnets:
+ Admin:
+ shared: False
+ data:
+ network: "172.16.0.0"
+ mask: "255.255.255.0"
+ vlanid:
+ gateway: "172.16.0.1"
+ dns_primary: "172.16.0.1"
+ from: "172.16.0.10"
+ to: "172.16.0.200"
+ ipam: "DHCP"
+ boot_mode: "DHCP"
+
+foreman:
+ ip: "172.16.0.2"
+ password: "opnfv"
+ cpu: "2"
+ ram: "2097152"
+ iso: "trusty-server-cloudimg-amd64-disk1.img"
+ disksize: "5G"
+ force: True
+ dns: "8.8.8.8"
diff --git a/opensteak/tools/create_foreman.py b/opensteak/tools/create_foreman.py
new file mode 100644
index 000000000..6cf4510e4
--- /dev/null
+++ b/opensteak/tools/create_foreman.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+"""
+Create Virtual Machines
+"""
+
+# TODO: be sure that we are runnning as root
+
+from opensteak.conf import OpenSteakConfig
+from opensteak.printer import OpenSteakPrinter
+# from opensteak.argparser import OpenSteakArgParser
+from opensteak.templateparser import OpenSteakTemplateParser
+from opensteak.virsh import OpenSteakVirsh
+from pprint import pprint as pp
+# from ipaddress import IPv4Address
+import argparse
+import tempfile
+import shutil
+import os
+# import sys
+
+p = OpenSteakPrinter()
+
+#
+# Check for params
+#
+p.header("Check parameters")
+args = {}
+
+# Update args with values from CLI
+parser = argparse.ArgumentParser(description='This script will create a foreman VM.', usage='%(prog)s [options]')
+parser.add_argument('-c', '--config', help='YAML config file to use (default is config/infra.yaml).', default='config/infra.yaml')
+args.update(vars(parser.parse_args()))
+
+# Open config file
+conf = OpenSteakConfig(config_file=args["config"])
+# pp(conf.dump())
+
+a = {}
+a["name"] = "foreman"
+a["ip"] = conf["foreman"]["ip"]
+a["netmask"] = conf["subnets"]["Admin"]["data"]["mask"]
+a["netmaskshort"] = sum([bin(int(x)).count('1')
+ for x in conf["subnets"]["Admin"]
+ ["data"]["mask"]
+ .split('.')])
+a["gateway"] = conf["subnets"]["Admin"]["data"]["gateway"]
+a["network"] = conf["subnets"]["Admin"]["data"]["network"]
+a["admin"] = conf["foreman"]["admin"]
+a["password"] = conf["foreman"]["password"]
+a["cpu"] = conf["foreman"]["cpu"]
+a["ram"] = conf["foreman"]["ram"]
+a["iso"] = conf["foreman"]["iso"]
+a["disksize"] = conf["foreman"]["disksize"]
+a["force"] = conf["foreman"]["force"]
+a["dhcprange"] = "{0} {1}".format(conf["subnets"]["Admin"]
+ ["data"]["from"],
+ conf["subnets"]["Admin"]
+ ["data"]["to"])
+a["domain"] = conf["domains"]
+reverse_octets = str(conf["foreman"]["ip"]).split('.')[-2::-1]
+a["reversedns"] = '.'.join(reverse_octets) + '.in-addr.arpa'
+a["dns"] = conf["foreman"]["dns"]
+a["bridge"] = conf["foreman"]["bridge"]
+if conf["foreman"]["bridge_type"] == "openvswitch":
+ a["bridgeconfig"] = "<virtualport type='openvswitch'></virtualport>"
+else:
+ # no specific config for linuxbridge
+ a["bridgeconfig"] = ""
+
+# Update args with values from config file
+args.update(a)
+del a
+
+p.list_id(args)
+
+# Ask confirmation
+if args["force"] is not True:
+ p.ask_validation()
+
+# Create the VM
+p.header("Initiate configuration")
+
+###
+# Work on templates
+###
+# Create temporary folders and files
+tempFolder = tempfile.mkdtemp(dir="/tmp")
+tempFiles = {}
+
+for f in os.listdir("templates_foreman/"):
+ tempFiles[f] = "{0}/{1}".format(tempFolder, f)
+ try:
+ OpenSteakTemplateParser("templates_foreman/{0}".format(f),
+ tempFiles[f], args)
+ except Exception as err:
+ p.status(False, msg=("Something went wrong when trying to create "
+ "the file {0} from the template "
+ "templates_foreman/{1}").format(tempFiles[f], f),
+ failed="{0}".format(err))
+
+###
+# Work on files
+###
+for f in os.listdir("files_foreman/"):
+ tempFiles[f] = "{0}/{1}".format(tempFolder, f)
+ shutil.copyfile("files_foreman/{0}".format(f), tempFiles[f])
+
+p.status(True, msg="Temporary files created:")
+p.list_id(tempFiles)
+
+
+###
+# Delete if already exists
+###
+
+# Get all volumes and VM
+p.header("Virsh calls")
+OpenSteakVirsh = OpenSteakVirsh()
+volumeList = OpenSteakVirsh.volumeList()
+domainList = OpenSteakVirsh.domainList()
+# p.list_id(volumeList)
+# p.list_id(domainList)
+
+# TODO: check that the default image is in the list
+# (trusty-server-cloudimg-amd64-disk1.img by default)
+
+# Delete the volume if exists
+try:
+ oldVolume = volumeList[args["name"]]
+
+ # Ask confirmation
+ if args["force"] is not True:
+ p.ask_validation()
+
+ status = OpenSteakVirsh.volumeDelete(volumeList[args["name"]])
+ if (status["stderr"]):
+ p.status(False, msg=status["stderr"])
+ p.status(True, msg=status["stdout"])
+except KeyError as err:
+ # no old volume, do nothing
+ pass
+
+# Delete the VM if exists
+try:
+ vmStatus = domainList[args["name"]]
+
+ # Ask confirmation
+ if args["force"] is not True:
+ p.ask_validation()
+
+ # Destroy (stop)
+ if vmStatus == "running":
+ status = OpenSteakVirsh.domainDestroy(args["name"])
+ if (status["stderr"]):
+ p.status(False, msg=status["stderr"])
+ p.status(True, msg=status["stdout"])
+
+ # Undefine (delete)
+ status = OpenSteakVirsh.domainUndefine(args["name"])
+ if (status["stderr"]):
+ p.status(False, msg=status["stderr"])
+ p.status(True, msg=status["stdout"])
+except KeyError as err:
+ # no old VM defined, do nothing
+ pass
+
+###
+# Create the configuration image file from metadata and userdata
+###
+status = OpenSteakVirsh.generateConfiguration(args["name"], tempFiles)
+if (status["stderr"]):
+ p.status(False, msg=status["stderr"])
+p.status(True, msg=("Configuration generated successfully in "
+ "/var/lib/libvirt/images/{0}-configuration.iso")
+ .format(args["name"]))
+
+# Refresh the pool
+status = OpenSteakVirsh.poolRefresh()
+if (status["stderr"]):
+ p.status(False, msg=status["stderr"])
+p.status(True, msg=status["stdout"])
+
+###
+# Create the new VM
+###
+# Create the volume from a clone
+status = OpenSteakVirsh.volumeClone(args["iso"], args["name"])
+if (status["stderr"]):
+ p.status(False, msg=status["stderr"])
+p.status(True, msg=status["stdout"])
+
+# Resize the volume
+status = OpenSteakVirsh.volumeResize(args["name"], args["disksize"])
+if (status["stderr"]):
+ p.status(False, msg=status["stderr"])
+p.status(True, msg=status["stdout"])
+
+# Create the VM
+status = OpenSteakVirsh.domainDefine(tempFiles["kvm-config"])
+if (status["stderr"]):
+ p.status(False, msg=status["stderr"])
+p.status(True, msg=status["stdout"])
+
+
+###
+# Start the VM
+###
+status = OpenSteakVirsh.domainStart(args["name"])
+if (status["stderr"]):
+ p.status(False, msg=status["stderr"])
+p.status(True, msg=status["stdout"])
+
+p.status(True, msg="Log file is at: /var/log/libvirt/qemu/{0}-serial.log"
+ .format(args["name"]))
+
+p.header("fini")
+
+# Delete temporary dir
+shutil.rmtree(tempFolder)
diff --git a/opensteak/tools/files_foreman/id_rsa b/opensteak/tools/files_foreman/id_rsa
new file mode 100644
index 000000000..d53ba8874
--- /dev/null
+++ b/opensteak/tools/files_foreman/id_rsa
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAz0jMplucYXoe0xJ21ASL98PGbwZYCI5Xr4/kHXOdGvHvZr3z
+58tWU1Ta4qMf0qa272VsdQiO1pCmSlqrDW5C9rEeqLhhRX/yLbgv35mOdjRoIIAX
+6RfNniT/xXrfvPZYdw603fIbbw5igTRwc6W5QvJHRcKRKb762Vw2gPSS0GgFBLCk
+vC2kQbW4cfP+9elo86FAhNBs2TbBHLc9H2W+9KzYfgsigjJLsgRXL6/uhu3+sL2d
+3F1J9Nhyy3aoUOVxD2YPJlJvzYhLZcSXgXI+Oi0gZmhh3uImc4WRyOihK5jRpJaw
+desygyXo4lVskzxBjm7L9ynbCNMOO85ZVVJGxQIDAQABAoIBAQCaOWcSy4yRtiPj
+FZTV8MAXS1GD36t2SjoRhLTL+O5GUwW1YtVrfA2xmKv2/jm6KJJpkgPdG83y9NLU
+9ZrZNlWaaHQQQocVB7ovrB/qdLzbU+i5bbTcl/pDlPG8g8yeMoflpUqK7AzfV0uR
+KGwWj5JErjC7RaVt8wt+164xykbFyZeUu9htNthFD/OPaIPqgv6AoJdEULyGrTbd
+SRyJ01n0beGkB0o+0dnOEO34K+pU0Zzk+rAcOEl3UNkpxOzedEFOR6NdnX1eH4t4
+a6OZgskcVjyxFQPAyhcSkQ2iWncQx2ritTclst4NFjBae5hwYgEB4S9ZN5IOueMH
+eYhxYthNAoGBAPXtSDmRGPc4EHDBrbgDn4vhxK7QN35bWFW1KvHLD0hBBJO57GqT
+jGCJsbkw6peERuFV8qq+Bvz0nvlKl9humB1djlndUETksUTrNz73XxpJJ8L5parF
+okx0QLMXONOP5b6yGWYay3QD0gNz/HYVf//oDTdWRhbq5EY6VarOagfjAoGBANfG
+UrlxEYHwq3TE7unvgaao5Vpmw8Hqir2bnl2zKmPoV8ds/V+paMnV6Hhzgzu3bKgF
+ukZgAizEcfvxrxnfIraRJTI5xgBoIl8gdbsWkLre4qKpVSAkw4JLyzVVlXCyKYHp
+ocjeNVbO5Z2Yft0cv30LfeX+DEDeQS12RHLu/Sc3AoGBAMns2ZfC5p/encknje8A
+spjVeHwdJOOQNxiwl6FPHK40DIELcO4VVnbRuGaZnpVoHBbbTlQZkX1TkdCZCdLB
+BA9giQiKamUW7eLry0HdNW5M0OQLvZZZjih+b71c/ODhTz/j1mz65UDN/jutmYaP
+orjJnUhpg0U/+s0bCsojj/YHAoGBAKtsMhiFjaUv8OdJ9Y0A7H3dPKk/b1JF5YeR
+dJV4W7sXwXT8T6eKTWfce14GV0JADSDHvB9g8xlh0DSa48OoFEn6shRe9cEo+fWd
+Mis6WC0+Gcukv65TxsdjM8PhhGIOCQ/e7ttIPhQDN0Sm/FLqHe9YC+OGm3GFoT5e
+8S5mU9StAoGABFwqkFELU84twzKYJCVPZPktwtfrD0Hkbd9pk0ebuSnQ3bATFIyU
+CDspTADbY2IgC53u+XAhTd5BOsicTtMM9x1p5EOglbK1ANagWuGlzVfdbp+bmql9
+S8AaH22lha5vCfHHfAN2NSkQ+ABZnNpP66nFx06VcyEYkhuZgd6s5A0=
+-----END RSA PRIVATE KEY-----
diff --git a/opensteak/tools/files_foreman/id_rsa.pub b/opensteak/tools/files_foreman/id_rsa.pub
new file mode 100644
index 000000000..8b4c6a12d
--- /dev/null
+++ b/opensteak/tools/files_foreman/id_rsa.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPSMymW5xheh7TEnbUBIv3w8ZvBlgIjlevj+Qdc50a8e9mvfPny1ZTVNriox/SprbvZWx1CI7WkKZKWqsNbkL2sR6ouGFFf/ItuC/fmY52NGgggBfpF82eJP/Fet+89lh3DrTd8htvDmKBNHBzpblC8kdFwpEpvvrZXDaA9JLQaAUEsKS8LaRBtbhx8/716WjzoUCE0GzZNsEctz0fZb70rNh+CyKCMkuyBFcvr+6G7f6wvZ3cXUn02HLLdqhQ5XEPZg8mUm/NiEtlxJeBcj46LSBmaGHe4iZzhZHI6KErmNGklrB16zKDJejiVWyTPEGObsv3KdsI0w47zllVUkbF arnaud@l-bibicy
diff --git a/opensteak/tools/opensteak/__init__.py b/opensteak/tools/opensteak/__init__.py
new file mode 100644
index 000000000..01f9c9a12
--- /dev/null
+++ b/opensteak/tools/opensteak/__init__.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+# This directory is a Python package.
diff --git a/opensteak/tools/opensteak/argparser.py b/opensteak/tools/opensteak/argparser.py
new file mode 100644
index 000000000..de980b6b6
--- /dev/null
+++ b/opensteak/tools/opensteak/argparser.py
@@ -0,0 +1,46 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+# @author: Pawel Chomicki <pawel.chomicki@nokia.com>
+
+"""
+Parse arguments from CLI
+"""
+
+import argparse
+
+class OpenSteakArgParser:
+
+ def __init__(self):
+ """
+ Parse the command line
+ """
+ self.parser = argparse.ArgumentParser(description='This script will create config files for a VM in current folder.', usage='%(prog)s [options] name')
+ self.parser.add_argument('name', help='Set the name of the machine')
+ self.parser.add_argument('-i', '--ip', help='Set the ip address of the machine. (Default is 192.168.42.42)', default='192.168.42.42')
+ self.parser.add_argument('-n', '--netmask', help='Set the netmask in short format. (Default is 24)', default='24')
+ self.parser.add_argument('-g', '--gateway', help='Set the gateway to ping internet. (Default is 192.168.42.1)', default='192.168.42.1')
+ self.parser.add_argument('-p', '--password', help='Set the ssh password. Login is ubuntu. (Default password is moutarde)', default='moutarde')
+ self.parser.add_argument('-u', '--cpu', help='Set number of CPU for the VM. (Default is 2)', default='2')
+ self.parser.add_argument('-r', '--ram', help='Set quantity of RAM for the VM in kB. (Default is 2097152)', default='2097152')
+ self.parser.add_argument('-o', '--iso', help='Use this iso file. (Default is trusty-server-cloudimg-amd64-disk1.img)', default='trusty-server-cloudimg-amd64-disk1.img')
+ self.parser.add_argument('-d', '--disksize', help='Create a disk with that size. (Default is 5G)', default='5G')
+ self.parser.add_argument('-f', '--force', help='Force creation without asking questions. This is dangerous as it will delete old VM with same name.', default=False, action='store_true')
+
+ def parse(self):
+ return self.parser.parse_args()
+
diff --git a/opensteak/tools/opensteak/conf.py b/opensteak/tools/opensteak/conf.py
new file mode 100644
index 000000000..65eaf433b
--- /dev/null
+++ b/opensteak/tools/opensteak/conf.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from yaml import load, dump
+try:
+ from yaml import CLoader as Loader, CDumper as Dumper
+except ImportError:
+ from yaml import Loader, Dumper
+
+
+class OpenSteakConfig:
+ """OpenSteak config class
+ Use this object as a dict
+ """
+
+ def __init__(self,
+ config_file="/usr/local/opensteak/infra/config/common.yaml",
+ autosave=False):
+ """ Function __init__
+ Load saved opensteak config.
+
+ @param PARAM: DESCRIPTION
+ @return RETURN: DESCRIPTION
+ @param config_file: the yaml config file to read.
+ default is '/usr/local/opensteak/infra/config/common.yaml'
+ @param autosave: save automaticly the config at destroy
+ default is False
+ """
+ self.config_file = config_file
+ self.autosave = autosave
+ with open(self.config_file, 'r') as stream:
+ self._data = load(stream, Loader=Loader)
+
+ def __getitem__(self, index):
+ """Get an item of the configuration"""
+ return self._data[index]
+
+ def __setitem__(self, index, value):
+ """Set an item of the configuration"""
+ self._data[index] = value
+
+ def list(self):
+ """Set an item of the configuration"""
+ return self._data.keys()
+
+ def dump(self):
+ """Dump the configuration"""
+ return dump(self._data, Dumper=Dumper)
+
+ def save(self):
+ """Save the configuration to the file"""
+ with open(self.config_file, 'w') as f:
+ f.write(dump(self._data, Dumper=Dumper))
+
+ def __del__(self):
+ if self.autosave:
+ self.save()
diff --git a/opensteak/tools/opensteak/foreman.py b/opensteak/tools/opensteak/foreman.py
new file mode 100644
index 000000000..b7cbf42de
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman.py
@@ -0,0 +1,60 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.api import Api
+from opensteak.foreman_objects.objects import ForemanObjects
+from opensteak.foreman_objects.domains import Domains
+from opensteak.foreman_objects.smart_proxies import SmartProxies
+from opensteak.foreman_objects.operatingsystems import OperatingSystems
+from opensteak.foreman_objects.hostgroups import HostGroups
+from opensteak.foreman_objects.hosts import Hosts
+from opensteak.foreman_objects.architectures import Architectures
+from opensteak.foreman_objects.subnets import Subnets
+from opensteak.foreman_objects.puppetClasses import PuppetClasses
+from opensteak.foreman_objects.compute_resources import ComputeResources
+
+
+class OpenSteakForeman:
+ """
+ HostGroup class
+ """
+ def __init__(self, password, login='admin', ip='127.0.0.1'):
+ """ Function __init__
+ Init the API with the connection params
+ @param password: authentication password
+ @param password: authentication login - default is admin
+ @param ip: api ip - default is localhost
+ @return RETURN: self
+ """
+ self.api = Api(login=login, password=password, ip=ip,
+ printErrors=False)
+ self.domains = Domains(self.api)
+ self.smartProxies = SmartProxies(self.api)
+ self.puppetClasses = PuppetClasses(self.api)
+ self.operatingSystems = OperatingSystems(self.api)
+ self.architectures = Architectures(self.api)
+ self.subnets = Subnets(self.api)
+ self.hostgroups = HostGroups(self.api)
+ self.hosts = Hosts(self.api)
+ self.computeResources = ComputeResources(self.api)
+ self.environments = ForemanObjects(self.api,
+ 'environments',
+ 'environment')
+ self.smartClassParameters = ForemanObjects(self.api,
+ 'smart_class_parameters',
+ 'smart_class_parameter')
diff --git a/opensteak/tools/opensteak/foreman_objects/__init__.py b/opensteak/tools/opensteak/foreman_objects/__init__.py
new file mode 100644
index 000000000..01f9c9a12
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/__init__.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+# This directory is a Python package.
diff --git a/opensteak/tools/opensteak/foreman_objects/api.py b/opensteak/tools/opensteak/foreman_objects/api.py
new file mode 100644
index 000000000..dc9973484
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/api.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+import json
+import requests
+from requests_futures.sessions import FuturesSession
+from pprint import pformat
+
+
+class Api:
+ """
+ Api class
+ Class to deal with the foreman API v2
+ """
+ def __init__(self, password, login='admin', ip='127.0.0.1', printErrors=False):
+ """ Function __init__
+ Init the API with the connection params
+
+ @param password: authentication password
+ @param password: authentication login - default is admin
+ @param ip: api ip - default is localhost
+ @return RETURN: self
+ """
+ self.base_url = 'http://{}/api/v2/'.format(ip)
+ self.headers = {'Accept': 'version=2',
+ 'Content-Type': 'application/json; charset=UTF-8'}
+ self.auth = (login, password)
+ self.errorMsg = ''
+ self.printErrors = printErrors
+
+ def list(self, obj, filter=False, only_id=False, limit=20):
+ """ Function list
+ Get the list of an object
+
+ @param obj: object name ('hosts', 'puppetclasses'...)
+ @param filter: filter for objects
+ @param only_id: boolean to only return dict with name/id
+ @return RETURN: the list of the object
+ """
+ self.url = '{}{}/?per_page={}'.format(self.base_url, obj, limit)
+ if filter:
+ self.url += '&search={}'.format(filter)
+ self.resp = requests.get(url=self.url, auth=self.auth,
+ headers=self.headers)
+ if only_id:
+ if self.__process_resp__(obj) is False:
+ return False
+ if type(self.res['results']) is list:
+ return dict((x['name'], x['id']) for x in self.res['results'])
+ elif type(self.res['results']) is dict:
+ r = {}
+ for v in self.res['results'].values():
+ for vv in v:
+ r[vv['name']] = vv['id']
+ return r
+ else:
+ return False
+ else:
+ return self.__process_resp__(obj)
+
+ def get(self, obj, id, sub_object=None):
+ """ Function get
+ Get an object by id
+
+ @param obj: object name ('hosts', 'puppetclasses'...)
+ @param id: the id of the object (name or id)
+ @return RETURN: the targeted object
+ """
+ self.url = '{}{}/{}'.format(self.base_url, obj, id)
+ if sub_object:
+ self.url += '/' + sub_object
+ self.resp = requests.get(url=self.url, auth=self.auth,
+ headers=self.headers)
+ if self.__process_resp__(obj):
+ return self.res
+ return False
+
+ def get_id_by_name(self, obj, name):
+ """ Function get_id_by_name
+ Get the id of an object
+
+ @param obj: object name ('hosts', 'puppetclasses'...)
+ @param id: the id of the object (name or id)
+ @return RETURN: the targeted object
+ """
+ list = self.list(obj, filter='name = "{}"'.format(name),
+ only_id=True, limit=1)
+ return list[name] if name in list.keys() else False
+
+ def set(self, obj, id, payload, action='', async=False):
+ """ Function set
+ Set an object by id
+
+ @param obj: object name ('hosts', 'puppetclasses'...)
+ @param id: the id of the object (name or id)
+ @param action: specific action of an object ('power'...)
+ @param payload: the dict of the payload
+ @param async: should this request be async, if true use
+ return.result() to get the response
+ @return RETURN: the server response
+ """
+ self.url = '{}{}/{}'.format(self.base_url, obj, id)
+ if action:
+ self.url += '/{}'.format(action)
+ self.payload = json.dumps(payload)
+ if async:
+ session = FuturesSession()
+ return session.put(url=self.url, auth=self.auth,
+ headers=self.headers, data=self.payload)
+ else:
+ self.resp = requests.put(url=self.url, auth=self.auth,
+ headers=self.headers, data=self.payload)
+ if self.__process_resp__(obj):
+ return self.res
+ return False
+
+ def create(self, obj, payload, async=False):
+ """ Function create
+ Create an new object
+
+ @param obj: object name ('hosts', 'puppetclasses'...)
+ @param payload: the dict of the payload
+ @param async: should this request be async, if true use
+ return.result() to get the response
+ @return RETURN: the server response
+ """
+ self.url = self.base_url + obj
+ self.payload = json.dumps(payload)
+ if async:
+ session = FuturesSession()
+ return session.post(url=self.url, auth=self.auth,
+ headers=self.headers, data=self.payload)
+ else:
+ self.resp = requests.post(url=self.url, auth=self.auth,
+ headers=self.headers,
+ data=self.payload)
+ return self.__process_resp__(obj)
+
+ def delete(self, obj, id):
+ """ Function delete
+ Delete an object by id
+
+ @param obj: object name ('hosts', 'puppetclasses'...)
+ @param id: the id of the object (name or id)
+ @return RETURN: the server response
+ """
+ self.url = '{}{}/{}'.format(self.base_url, obj, id)
+ self.resp = requests.delete(url=self.url,
+ auth=self.auth,
+ headers=self.headers, )
+ return self.__process_resp__(obj)
+
+ def __process_resp__(self, obj):
+ """ Function __process_resp__
+ Process the response sent by the server and store the result
+
+ @param obj: object name ('hosts', 'puppetclasses'...)
+ @return RETURN: the server response
+ """
+ self.last_obj = obj
+ if self.resp.status_code > 299:
+ self.errorMsg = ">> Error {} for object '{}'".format(self.resp.status_code,
+ self.last_obj)
+ try:
+ self.ret = json.loads(self.resp.text)
+ self.errorMsg += pformat(self.ret[list(self.ret.keys())[0]])
+ except:
+ self.ret = self.resp.text
+ self.errorMsg += self.ret
+ if self.printErrors:
+ print(self.errorMsg)
+ return False
+ self.res = json.loads(self.resp.text)
+ if 'results' in self.res.keys():
+ return self.res['results']
+ return self.res
+
+ def __str__(self):
+ ret = pformat(self.base_url) + "\n"
+ ret += pformat(self.headers) + "\n"
+ ret += pformat(self.auth) + "\n"
+ return ret
diff --git a/opensteak/tools/opensteak/foreman_objects/architectures.py b/opensteak/tools/opensteak/foreman_objects/architectures.py
new file mode 100644
index 000000000..5e4303e17
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/architectures.py
@@ -0,0 +1,49 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.objects import ForemanObjects
+
+
+class Architectures(ForemanObjects):
+ """
+ Architectures class
+ """
+ objName = 'architectures'
+ payloadObj = 'architecture'
+
+ def checkAndCreate(self, key, payload, osIds):
+ """ Function checkAndCreate
+ Check if an architectures exists and create it if not
+
+ @param key: The targeted architectures
+ @param payload: The targeted architectures description
+ @param osIds: The list of os ids liked with this architecture
+ @return RETURN: The id of the object
+ """
+ if key not in self:
+ self[key] = payload
+ oid = self[key]['id']
+ if not oid:
+ return False
+ #~ To be sure the OS list is good, we ensure our os are in the list
+ for os in self[key]['operatingsystems']:
+ osIds.add(os['id'])
+ self[key]["operatingsystem_ids"] = list(osIds)
+ if (len(self[key]['operatingsystems']) is not len(osIds)):
+ return False
+ return oid
diff --git a/opensteak/tools/opensteak/foreman_objects/compute_resources.py b/opensteak/tools/opensteak/foreman_objects/compute_resources.py
new file mode 100644
index 000000000..9ada9c481
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/compute_resources.py
@@ -0,0 +1,62 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.objects import ForemanObjects
+from opensteak.foreman_objects.item import ForemanItem
+
+
+class ComputeResources(ForemanObjects):
+ """
+ HostGroups class
+ """
+ objName = 'compute_resources'
+ payloadObj = 'compute_resource'
+
+ def list(self):
+ """ Function list
+ list the hostgroups
+
+ @return RETURN: List of ForemanItemHostsGroup objects
+ """
+ return list(map(lambda x: ForemanItem(self.api, x['id'], x),
+ self.api.list(self.objName)))
+
+ def __getitem__(self, key):
+ """ Function __getitem__
+ Get an hostgroup
+
+ @param key: The hostgroup name or ID
+ @return RETURN: The ForemanItemHostsGroup object of an host
+ """
+ # Because Hostgroup did not support get by name we need to do it by id
+ if type(key) is not int:
+ key = self.getId(key)
+ ret = self.api.get(self.objName, key)
+ return ForemanItem(self.api, key, ret)
+
+ def __delitem__(self, key):
+ """ Function __delitem__
+ Delete an hostgroup
+
+ @param key: The hostgroup name or ID
+ @return RETURN: The API result
+ """
+ # Because Hostgroup did not support get by name we need to do it by id
+ if type(key) is not int:
+ key = self.getId(key)
+ return self.api.delete(self.objName, key)
diff --git a/opensteak/tools/opensteak/foreman_objects/domains.py b/opensteak/tools/opensteak/foreman_objects/domains.py
new file mode 100644
index 000000000..753833fc5
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/domains.py
@@ -0,0 +1,44 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.objects import ForemanObjects
+
+
+class Domains(ForemanObjects):
+ """
+ Domain class
+ """
+ objName = 'domains'
+ payloadObj = 'domain'
+
+ def load(self, id='0', name=''):
+ """ Function load
+ To be rewriten
+
+ @param id: The Domain ID
+ @return RETURN: DESCRIPTION
+ """
+
+ if name:
+ id = self.__getIdByName__(name)
+ self.data = self.foreman.get('domains', id)
+ if 'parameters' in self.data:
+ self.params = self.data['parameters']
+ else:
+ self.params = []
+ self.name = self.data['name']
diff --git a/opensteak/tools/opensteak/foreman_objects/freeip.py b/opensteak/tools/opensteak/foreman_objects/freeip.py
new file mode 100644
index 000000000..86c003fec
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/freeip.py
@@ -0,0 +1,79 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+#~ from foreman.api import Api
+import requests
+from bs4 import BeautifulSoup
+import sys
+import json
+
+class FreeIP:
+ """ FreeIP return an available IP in the targeted network """
+
+ def __init__ (self, login, password):
+ """ Init: get authenticity token """
+ with requests.session() as self.session:
+ try:
+ #~ 1/ Get login token and authentify
+ payload = {}
+ log_soup = BeautifulSoup(self.session.get('https://127.0.0.1/users/login', verify=False).text)
+ payload['utf8'] = log_soup.findAll('input',attrs={'name':'utf8'})[0].get('value')
+ payload['authenticity_token'] = log_soup.findAll('input',attrs={'name':'authenticity_token'})[0].get('value')
+ if payload['authenticity_token'] == None:
+ raise requests.exceptions.RequestException("Bad catch of authenticity_token")
+ payload['commit']='Login'
+ payload['login[login]'] = login
+ payload['login[password]'] = password
+ #~ 2/ Log in
+ r = self.session.post('https://127.0.0.1/users/login', verify=False, data=payload)
+ if r.status_code != 200:
+ raise requests.exceptions.RequestException("Bad login or password")
+ #~ Get token for host creation
+ log_soup = BeautifulSoup(self.session.get('https://127.0.0.1/hosts/new', verify=False).text)
+ self.authenticity_token = log_soup.findAll('input',attrs={'name':'authenticity_token'})[0].get('value')
+ if payload['authenticity_token'] == None:
+ raise requests.exceptions.RequestException("Bad catch of authenticity_token")
+ except requests.exceptions.RequestException as e:
+ print("Error connection Foreman to get a free ip")
+ print(e)
+ sys.exit(1)
+ pass
+
+ def get(self, subnet, mac = ""):
+ payload = {"host_mac": mac, "subnet_id": subnet}
+ payload['authenticity_token'] = self.authenticity_token
+ try:
+ self.last_ip = json.loads(self.session.post('https://127.0.0.1/subnets/freeip', verify=False, data=payload).text)['ip']
+ if payload['authenticity_token'] == None:
+ raise requests.exceptions.RequestException("Error getting free IP")
+ except requests.exceptions.RequestException as e:
+ print("Error connection Foreman to get a free ip")
+ print(e)
+ sys.exit(1)
+ return self.last_ip
+
+
+
+if __name__ == "__main__":
+ import pprint
+ import sys
+ if len(sys.argv) == 4:
+ f = FreeIP(sys.argv[1], sys.argv[2])
+ print(f.get(sys.argv[3]))
+ else:
+ print('Error: Usage\npython {} foreman_user foreman_password subnet'.format(sys.argv[0]))
diff --git a/opensteak/tools/opensteak/foreman_objects/hostgroups.py b/opensteak/tools/opensteak/foreman_objects/hostgroups.py
new file mode 100644
index 000000000..55b8ba6b3
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/hostgroups.py
@@ -0,0 +1,103 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.objects import ForemanObjects
+from opensteak.foreman_objects.itemHostsGroup import ItemHostsGroup
+from pprint import pprint as pp
+
+
+class HostGroups(ForemanObjects):
+ """
+ HostGroups class
+ """
+ objName = 'hostgroups'
+ payloadObj = 'hostgroup'
+
+ def list(self):
+ """ Function list
+ list the hostgroups
+
+ @return RETURN: List of ItemHostsGroup objects
+ """
+ return list(map(lambda x: ItemHostsGroup(self.api, x['id'], x),
+ self.api.list(self.objName)))
+
+ def __getitem__(self, key):
+ """ Function __getitem__
+ Get an hostgroup
+
+ @param key: The hostgroup name or ID
+ @return RETURN: The ItemHostsGroup object of an host
+ """
+ # Because Hostgroup did not support get by name we need to do it by id
+ if type(key) is not int:
+ key = self.getId(key)
+ ret = self.api.get(self.objName, key)
+ return ItemHostsGroup(self.api, key, ret)
+
+ def __delitem__(self, key):
+ """ Function __delitem__
+ Delete an hostgroup
+
+ @param key: The hostgroup name or ID
+ @return RETURN: The API result
+ """
+ # Because Hostgroup did not support get by name we need to do it by id
+ if type(key) is not int:
+ key = self.getId(key)
+ return self.api.delete(self.objName, key)
+
+ def checkAndCreate(self, key, payload,
+ hostgroupConf,
+ hostgroupParent,
+ puppetClassesId):
+ """ Function checkAndCreate
+ check And Create procedure for an hostgroup
+ - check the hostgroup is not existing
+ - create the hostgroup
+ - Add puppet classes from puppetClassesId
+ - Add params from hostgroupConf
+
+ @param key: The hostgroup name or ID
+ @param payload: The description of the hostgroup
+ @param hostgroupConf: The configuration of the host group from the
+ foreman.conf
+ @param hostgroupParent: The id of the parent hostgroup
+ @param puppetClassesId: The dict of puppet classes ids in foreman
+ @return RETURN: The ItemHostsGroup object of an host
+ """
+ if key not in self:
+ self[key] = payload
+ oid = self[key]['id']
+ if not oid:
+ return False
+
+ # Create Hostgroup classes
+ hostgroupClassIds = self[key]['puppetclass_ids']
+ if 'classes' in hostgroupConf.keys():
+ if not self[key].checkAndCreateClasses(puppetClassesId.values()):
+ print("Failed in classes")
+ return False
+
+ # Set params
+ if 'params' in hostgroupConf.keys():
+ if not self[key].checkAndCreateParams(hostgroupConf['params']):
+ print("Failed in params")
+ return False
+
+ return oid
diff --git a/opensteak/tools/opensteak/foreman_objects/hosts.py b/opensteak/tools/opensteak/foreman_objects/hosts.py
new file mode 100644
index 000000000..95d47af9d
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/hosts.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.objects import ForemanObjects
+from opensteak.foreman_objects.itemHost import ItemHost
+import time
+
+
+class Hosts(ForemanObjects):
+ """
+ Host sclass
+ """
+ objName = 'hosts'
+ payloadObj = 'host'
+
+ def list(self):
+ """ Function list
+ list the hosts
+
+ @return RETURN: List of ItemHost objects
+ """
+ return list(map(lambda x: ItemHost(self.api, x['id'], x),
+ self.api.list(self.objName)))
+
+ def __getitem__(self, key):
+ """ Function __getitem__
+ Get an host
+
+ @param key: The host name or ID
+ @return RETURN: The ItemHost object of an host
+ """
+ return ItemHost(self.api, key, self.api.get(self.objName, key))
+
+ def __printProgression__(self, status, msg, eol):
+ """ Function __printProgression__
+ Print the creation progression or not
+ It uses the foreman.printer lib
+
+ @param status: Status of the message
+ @param msg: Message
+ @param eol: End Of Line (to get a new line or not)
+ @return RETURN: None
+ """
+ if self.printHostProgress:
+ self.__printProgression__(status, msg, eol=eol)
+
+ def createVM(self, key, attributes, printHostProgress=False):
+ """ Function createVM
+ Create a Virtual Machine
+
+ The creation of a VM with libVirt is a bit complexe.
+ We first create the element in foreman, the ask to start before
+ the result of the creation.
+ To do so, we make async calls to the API and check the results
+
+ @param key: The host name or ID
+ @param attributes:The payload of the host creation
+ @param printHostProgress: The link to opensteak.printerlib
+ to print or not the
+ progression of the host creation
+ @return RETURN: The API result
+ """
+
+ self.printHostProgress = printHostProgress
+ self.async = True
+ # Create the VM in foreman
+ self.__printProgression__('In progress',
+ key + ' creation: push in Foreman', eol='\r')
+ future1 = self.api.create('hosts', attributes, async=True)
+
+ # Wait before asking to power on the VM
+ sleep = 5
+ for i in range(0, sleep):
+ time.sleep(1)
+ self.__printProgression__('In progress',
+ key + ' creation: start in {0}s'
+ .format(sleep - i),
+ eol='\r')
+
+ # Power on the VM
+ self.__printProgression__('In progress',
+ key + ' creation: starting', eol='\r')
+ future2 = self[key].powerOn()
+
+ # Show Power on result
+ if future2.result().status_code is 200:
+ self.__printProgression__('In progress',
+ key + ' creation: wait for end of boot',
+ eol='\r')
+ else:
+ self.__printProgression__(False,
+ key + ' creation: Error',
+ failed=str(future2.result().status_code))
+ return False
+ # Show creation result
+ if future1.result().status_code is 200:
+ self.__printProgression__('In progress',
+ key + ' creation: created',
+ eol='\r')
+ else:
+ self.__printProgression__(False,
+ key + ' creation: Error',
+ failed=str(future1.result().status_code))
+ return False
+
+ # Wait for puppet catalog to be applied
+ loop_stop = False
+ while not loop_stop:
+ status = self[key].getStatus()
+ if status == 'No Changes' or status == 'Active':
+ self.__printProgression__(True,
+ key + ' creation: provisioning OK')
+ loop_stop = True
+ elif status == 'Error':
+ self.__printProgression__(False,
+ key + ' creation: Error',
+ failed="Error during provisioning")
+ loop_stop = True
+ return False
+ else:
+ self.__printProgression__('In progress',
+ key + ' creation: provisioning ({})'
+ .format(status),
+ eol='\r')
+ time.sleep(5)
+
+ return True
diff --git a/opensteak/tools/opensteak/foreman_objects/item.py b/opensteak/tools/opensteak/foreman_objects/item.py
new file mode 100644
index 000000000..f418f8c11
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/item.py
@@ -0,0 +1,135 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# David Blaisonneau <david.blaisonneau@orange.com>
+# Arnaud Morin <arnaud1.morin@orange.com>
+
+from pprint import pprint as pp
+
+
+class ForemanItem(dict):
+ """
+ Item class
+ Represent the content of a foreman object as a dict
+ """
+
+ def __init__(self, api, key,
+ objName, payloadObj,
+ *args, **kwargs):
+ """ Function __init__
+ Represent the content of a foreman object as a dict
+
+ @param api: The foreman api
+ @param key: The object Key
+ @param *args, **kwargs: the dict representation
+ @return RETURN: Itself
+ """
+ self.api = api
+ self.key = key
+ if objName:
+ self.objName = objName
+ if payloadObj:
+ self.payloadObj = payloadObj
+ self.store = dict()
+ if args[0]:
+ self.update(dict(*args, **kwargs))
+ # We get the smart class parameters for the good items
+ if objName in ['hosts', 'hostgroups',
+ 'puppet_classes', 'environments']:
+ from opensteak.foreman_objects.itemSmartClassParameter\
+ import ItemSmartClassParameter
+ scp_ids = map(lambda x: x['id'],
+ self.api.list('{}/{}/smart_class_parameters'
+ .format(self.objName, key)))
+ scp_items = list(map(lambda x: ItemSmartClassParameter(self.api, x,
+ self.api.get('smart_class_parameters', x)),
+ scp_ids))
+ scp = {'{}::{}'.format(x['puppetclass']['name'],
+ x['parameter']): x
+ for x in scp_items}
+ self.update({'smart_class_parameters_dict': scp})
+
+ def __setitem__(self, key, attributes):
+ """ Function __setitem__
+ Set a parameter of a foreman object as a dict
+
+ @param key: The key to modify
+ @param attribute: The data
+ @return RETURN: The API result
+ """
+ if key is 'puppetclass_ids':
+ payload = {"puppetclass_id": attributes,
+ self.payloadObj + "_class":
+ {"puppetclass_id": attributes}}
+ return self.api.create("{}/{}/{}"
+ .format(self.objName,
+ self.key,
+ "puppetclass_ids"),
+ payload)
+ elif key is 'parameters':
+ payload = {"parameter": attributes}
+ return self.api.create("{}/{}/{}"
+ .format(self.objName,
+ self.key,
+ "parameters"),
+ payload)
+ else:
+ payload = {self.payloadObj: {key: attributes}}
+ return self.api.set(self.objName, self.key, payload)
+
+ def getParam(self, name=None):
+ """ Function getParam
+ Return a dict of parameters or a parameter value
+
+ @param key: The parameter name
+ @return RETURN: dict of parameters or a parameter value
+ """
+ if 'parameters' in self.keys():
+ l = {x['name']: x['value'] for x in self['parameters']}
+ if name:
+ if name in l.keys():
+ return l[name]
+ else:
+ return False
+ else:
+ return l
+
+ def checkAndCreateClasses(self, classes):
+ """ Function checkAndCreateClasses
+ Check and add puppet classe
+
+ @param key: The parameter name
+ @param classes: The classes ids list
+ @return RETURN: boolean
+ """
+ actual_classes = self['puppetclass_ids']
+ for v in classes:
+ if v not in actual_classes:
+ self['puppetclass_ids'] = v
+ return list(classes).sort() is list(self['puppetclass_ids']).sort()
+
+ def checkAndCreateParams(self, params):
+ """ Function checkAndCreateParams
+ Check and add global parameters
+
+ @param key: The parameter name
+ @param params: The params dict
+ @return RETURN: boolean
+ """
+ actual_params = self['param_ids']
+ for k, v in params.items():
+ if k not in actual_params:
+ self['parameters'] = {"name": k, "value": v}
+ return self['param_ids'].sort() == list(params.values()).sort()
diff --git a/opensteak/tools/opensteak/foreman_objects/itemHost.py b/opensteak/tools/opensteak/foreman_objects/itemHost.py
new file mode 100644
index 000000000..c531e5cf4
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/itemHost.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+import base64
+from string import Template
+from opensteak.foreman_objects.item import ForemanItem
+
+
+class ItemHost(ForemanItem):
+ """
+ ItemHostsGroup class
+ Represent the content of a foreman hostgroup as a dict
+ """
+
+ objName = 'hosts'
+ payloadObj = 'host'
+
+ def __init__(self, api, key, *args, **kwargs):
+ """ Function __init__
+ Represent the content of a foreman object as a dict
+
+ @param api: The foreman api
+ @param key: The object Key
+ @param *args, **kwargs: the dict representation
+ @return RETURN: Itself
+ """
+ ForemanItem.__init__(self, api, key,
+ self.objName, self.payloadObj,
+ *args, **kwargs)
+ self.update({'puppetclass_ids':
+ self.api.list('{}/{}/puppetclass_ids'
+ .format(self.objName, key))})
+ self.update({'param_ids':
+ list(self.api.list('{}/{}/parameters'
+ .format(self.objName, key),
+ only_id=True)
+ .keys())})
+
+
+ def getStatus(self):
+ """ Function getStatus
+ Get the status of an host
+
+ @return RETURN: The host status
+ """
+ return self.api.get('hosts', self.key, 'status')['status']
+
+ def powerOn(self):
+ """ Function powerOn
+ Power on a host
+
+ @return RETURN: The API result
+ """
+ return self.api.set('hosts', self.key,
+ {"power_action": "start"},
+ 'power', async=self.async)
+
+ def getParamFromEnv(self, var, default=''):
+ """ Function getParamFromEnv
+ Search a parameter in the host environment
+
+ @param var: the var name
+ @param hostgroup: the hostgroup item linked to this host
+ @param default: default value
+ @return RETURN: the value
+ """
+ if self.getParam(var):
+ return self.getParam(var)
+ if self.hostgroup:
+ if self.hostgroup.getParam(var):
+ return self.hostgroup.getParam(var)
+ if self.domain.getParam('password'):
+ return self.domain.getParam('password')
+ else:
+ return default
+
+ def getUserData(self,
+ hostgroup,
+ domain,
+ defaultPwd='',
+ defaultSshKey='',
+ proxyHostname='',
+ tplFolder='templates_metadata/'):
+ """ Function getUserData
+ Generate a userdata script for metadata server from Foreman API
+
+ @param domain: the domain item linked to this host
+ @param hostgroup: the hostgroup item linked to this host
+ @param defaultPwd: the default password if no password is specified
+ in the host>hostgroup>domain params
+ @param defaultSshKey: the default ssh key if no password is specified
+ in the host>hostgroup>domain params
+ @param proxyHostname: hostname of the smartproxy
+ @param tplFolder: the templates folder
+ @return RETURN: the user data
+ """
+ if 'user-data' in self.keys():
+ return self['user-data']
+ else:
+ self.hostgroup = hostgroup
+ self.domain = domain
+ if proxyHostname == '':
+ proxyHostname = 'foreman.' + domain
+ password = self.getParamFromEnv('password', defaultPwd)
+ sshauthkeys = self.getParamFromEnv('global_sshkey', defaultSshKey)
+ with open(tplFolder+'puppet.conf', 'rb') as puppet_file:
+ p = MyTemplate(puppet_file.read())
+ enc_puppet_file = base64.b64encode(p.substitute(
+ foremanHostname=proxyHostname))
+ with open(tplFolder+'cloud-init.tpl', 'r') as content_file:
+ s = MyTemplate(content_file.read())
+ if sshauthkeys:
+ sshauthkeys = ' - '+sshauthkeys
+ self.userdata = s.substitute(
+ password=password,
+ fqdn=self['name'],
+ sshauthkeys=sshauthkeys,
+ foremanurlbuilt="http://{}/unattended/built"
+ .format(proxyHostname),
+ puppet_conf_content=enc_puppet_file.decode('utf-8'))
+ return self.userdata
+
+
+class MyTemplate(Template):
+ delimiter = '%'
+ idpattern = r'[a-z][_a-z0-9]*'
diff --git a/opensteak/tools/opensteak/foreman_objects/itemHostsGroup.py b/opensteak/tools/opensteak/foreman_objects/itemHostsGroup.py
new file mode 100644
index 000000000..d6a641c03
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/itemHostsGroup.py
@@ -0,0 +1,50 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.item import ForemanItem
+
+
+class ItemHostsGroup(ForemanItem):
+ """
+ ItemHostsGroup class
+ Represent the content of a foreman hostgroup as a dict
+ """
+
+ objName = 'hostgroups'
+ payloadObj = 'hostgroup'
+
+ def __init__(self, api, key, *args, **kwargs):
+ """ Function __init__
+ Represent the content of a foreman object as a dict
+
+ @param api: The foreman api
+ @param key: The object Key
+ @param *args, **kwargs: the dict representation
+ @return RETURN: Itself
+ """
+ ForemanItem.__init__(self, api, key,
+ self.objName, self.payloadObj,
+ *args, **kwargs)
+ self.update({'puppetclass_ids':
+ self.api.list('{}/{}/puppetclass_ids'
+ .format(self.objName, key))})
+ self.update({'param_ids':
+ list(self.api.list('{}/{}/parameters'
+ .format(self.objName, key),
+ only_id=True)
+ .keys())})
diff --git a/opensteak/tools/opensteak/foreman_objects/itemOverrideValues.py b/opensteak/tools/opensteak/foreman_objects/itemOverrideValues.py
new file mode 100644
index 000000000..936185e98
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/itemOverrideValues.py
@@ -0,0 +1,61 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+
+from opensteak.foreman_objects.item import ForemanItem
+from pprint import pprint as pp
+
+class ItemOverrideValues(ForemanItem):
+ """
+ ItemOverrideValues class
+ Represent the content of a foreman smart class parameter as a dict
+ """
+
+ objName = 'override_values'
+ payloadObj = 'override_value'
+
+ def __init__(self, api, key, parentName, parentKey, *args, **kwargs):
+ """ Function __init__
+ Represent the content of a foreman object as a dict
+
+ @param api: The foreman api
+ @param key: The object Key
+ @param parentName: The object parent name (eg: smart_class_parameter)
+ @param parentKey: The object parent key
+ @param *args, **kwargs: the dict representation
+ @return RETURN: Itself
+ """
+ self.parentName = parentName
+ self.parentKey = parentKey
+ ForemanItem.__init__(self, api, key,
+ self.objName, self.payloadObj,
+ *args, **kwargs)
+
+ def __setitem__(self, key, attributes):
+ """ Function __setitem__
+ Set a parameter of a foreman object as a dict
+
+ @param key: The key to modify
+ @param attribute: The data
+ @return RETURN: The API result
+ """
+ payload = {self.payloadObj: {key: attributes}}
+ return self.api.set('{}/{}/{}'.format(self.parentName,
+ self.parentKey,
+ self.objName),
+ self.key, payload)
diff --git a/opensteak/tools/opensteak/foreman_objects/itemSmartClassParameter.py b/opensteak/tools/opensteak/foreman_objects/itemSmartClassParameter.py
new file mode 100644
index 000000000..2d7ca2ab9
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/itemSmartClassParameter.py
@@ -0,0 +1,62 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+
+from opensteak.foreman_objects.item import ForemanItem
+from opensteak.foreman_objects.itemOverrideValues import ItemOverrideValues
+
+
+class ItemSmartClassParameter(ForemanItem):
+ """
+ ItemSmartClassParameter class
+ Represent the content of a foreman smart class parameter as a dict
+ """
+
+ objName = 'smart_class_parameters'
+ payloadObj = 'smart_class_parameter'
+
+ def __init__(self, api, key, *args, **kwargs):
+ """ Function __init__
+ Represent the content of a foreman object as a dict
+
+ @param api: The foreman api
+ @param key: The object Key
+ @param *args, **kwargs: the dict representation
+ @return RETURN: Itself
+ """
+ ForemanItem.__init__(self, api, key,
+ self.objName, self.payloadObj,
+ *args, **kwargs)
+ self.update({'override_values':
+ list(map(lambda x: ItemOverrideValues(self.api,
+ x['id'],
+ self.objName,
+ key,
+ x),
+ self['override_values']))})
+
+ def __setitem__(self, key, attributes):
+ """ Function __setitem__
+ Set a parameter of a foreman object as a dict
+
+ @param key: The key to modify
+ @param attribute: The data
+ @return RETURN: The API result
+ """
+ payload = {self.payloadObj: {key: attributes}}
+ return self.api.set(self.objName, self.key, payload)
diff --git a/opensteak/tools/opensteak/foreman_objects/objects.py b/opensteak/tools/opensteak/foreman_objects/objects.py
new file mode 100644
index 000000000..c20c5a138
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/objects.py
@@ -0,0 +1,136 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.item import ForemanItem
+
+
+class ForemanObjects:
+ """
+ ForemanObjects class
+ Parent class for Foreman Objects
+ """
+
+ def __init__(self, api, objName=None, payloadObj=None):
+ """ Function __init__
+ Init the foreman object
+
+ @param api: The foreman API
+ @param objName: The object name (linked with the Foreman API)
+ @param payloadObj: The object name inside the payload (in general
+ the singular of objName)
+ @return RETURN: Itself
+ """
+
+ self.api = api
+ if objName:
+ self.objName = objName
+ if payloadObj:
+ self.payloadObj = payloadObj
+ # For asynchronous creations
+ self.async = False
+
+ def __iter__(self):
+ """ Function __iter__
+
+ @return RETURN: The iteration of objects list
+ """
+ return iter(self.list())
+
+ def __getitem__(self, key):
+ """ Function __getitem__
+
+ @param key: The targeted object
+ @return RETURN: A ForemanItem
+ """
+ return ForemanItem(self.api,
+ key,
+ self.objName,
+ self.payloadObj,
+ self.api.get(self.objName, key))
+
+ def __setitem__(self, key, attributes):
+ """ Function __setitem__
+
+ @param key: The targeted object
+ @param attributes: The attributes to apply to the object
+ @return RETURN: API result if the object was not present, or False
+ """
+ if key not in self:
+ payload = {self.payloadObj: {'name': key}}
+ payload[self.payloadObj].update(attributes)
+ return self.api.create(self.objName, payload, async=self.async)
+ return False
+
+ def __delitem__(self, key):
+ """ Function __delitem__
+
+ @return RETURN: API result
+ """
+ return self.api.delete(self.objName, key)
+
+ def __contains__(self, key):
+ """ Function __contains__
+
+ @param key: The targeted object
+ @return RETURN: True if the object exists
+ """
+ return bool(key in self.listName().keys())
+
+ def getId(self, key):
+ """ Function getId
+ Get the id of an object
+
+ @param key: The targeted object
+ @return RETURN: The ID
+ """
+ return self.api.get_id_by_name(self.objName, key)
+
+ def list(self, limit=20):
+ """ Function list
+ Get the list of all objects
+
+ @param key: The targeted object
+ @param limit: The limit of items to return
+ @return RETURN: A ForemanItem list
+ """
+ return list(map(lambda x:
+ ForemanItem(self.api, x['id'],
+ self.objName, self.payloadObj,
+ x),
+ self.api.list(self.objName, limit=limit)))
+
+ def listName(self):
+ """ Function listName
+ Get the list of all objects name with Ids
+
+ @param key: The targeted object
+ @return RETURN: A dict of obejct name:id
+ """
+ return self.api.list(self.objName, limit=999999, only_id=True)
+
+ def checkAndCreate(self, key, payload):
+ """ Function checkAndCreate
+ Check if an object exists and create it if not
+
+ @param key: The targeted object
+ @param payload: The targeted object description
+ @return RETURN: The id of the object
+ """
+ if key not in self:
+ self[key] = payload
+ return self[key]['id']
diff --git a/opensteak/tools/opensteak/foreman_objects/operatingsystems.py b/opensteak/tools/opensteak/foreman_objects/operatingsystems.py
new file mode 100644
index 000000000..8cce606e6
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/operatingsystems.py
@@ -0,0 +1,66 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.objects import ForemanObjects
+from opensteak.foreman_objects.item import ForemanItem
+
+
+class OperatingSystems(ForemanObjects):
+ """
+ OperatingSystems class
+ """
+ objName = 'operatingsystems'
+ payloadObj = 'operatingsystem'
+
+ def __getitem__(self, key):
+ """ Function __getitem__
+
+ @param key: The operating system id/name
+ @return RETURN: The item
+ """
+ ret = self.api.list(self.objName,
+ filter='title = "{}"'.format(key))
+ if len(ret):
+ return ForemanItem(self.api, key,
+ self.objName, self.payloadObj,
+ ret[0])
+ else:
+ return None
+
+ def __setitem__(self, key, attributes):
+ """ Function __getitem__
+
+ @param key: The operating system id/name
+ @param attributes: The content of the operating system to create
+ @return RETURN: The API result
+ """
+ if key not in self:
+ payload = {self.payloadObj: {'title': key}}
+ payload[self.payloadObj].update(attributes)
+ return self.api.create(self.objName, payload)
+ return False
+
+ def listName(self):
+ """ Function listName
+ Get the list of all objects name with Ids
+
+ @param key: The targeted object
+ @return RETURN: A dict of obejct name:id
+ """
+ return { x['title']: x['id'] for x in self.api.list(self.objName,
+ limit=999999)}
diff --git a/opensteak/tools/opensteak/foreman_objects/puppetClasses.py b/opensteak/tools/opensteak/foreman_objects/puppetClasses.py
new file mode 100644
index 000000000..7f397f27a
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/puppetClasses.py
@@ -0,0 +1,46 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.objects import ForemanObjects
+from opensteak.foreman_objects.item import ForemanItem
+from pprint import pprint as pp
+
+
+class PuppetClasses(ForemanObjects):
+ """
+ OperatingSystems class
+ """
+ objName = 'puppetclasses'
+ payloadObj = 'puppetclass'
+
+ def list(self, limit=20):
+ """ Function list
+ Get the list of all objects
+
+ @param key: The targeted object
+ @param limit: The limit of items to return
+ @return RETURN: A ForemanItem list
+ """
+ puppetClassList = list()
+ for v in self.api.list(self.objName, limit=limit).values():
+ puppetClassList.extend(v)
+ return list(map(lambda x:
+ ForemanItem(self.api, x['id'],
+ self.objName, self.payloadObj,
+ x),
+ puppetClassList))
diff --git a/opensteak/tools/opensteak/foreman_objects/smart_proxies.py b/opensteak/tools/opensteak/foreman_objects/smart_proxies.py
new file mode 100644
index 000000000..2d6518b48
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/smart_proxies.py
@@ -0,0 +1,36 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.objects import ForemanObjects
+
+
+class SmartProxies(ForemanObjects):
+ """
+ Domain class
+ """
+ objName = 'smart_proxies'
+ payloadObj = 'smart_proxy'
+
+ def importPuppetClasses(self, smartProxyId):
+ """ Function importPuppetClasses
+ Force the reload of puppet classes
+
+ @param smartProxyId: smartProxy Id
+ @return RETURN: the API result
+ """
+ return self.api.create('smart_proxies/{}/import_puppetclasses'.format(smartProxyId), '{}')
diff --git a/opensteak/tools/opensteak/foreman_objects/subnets.py b/opensteak/tools/opensteak/foreman_objects/subnets.py
new file mode 100644
index 000000000..b1cac5445
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/subnets.py
@@ -0,0 +1,67 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.objects import ForemanObjects
+
+
+class Subnets(ForemanObjects):
+ """
+ Subnets class
+ """
+ objName = 'subnets'
+ payloadObj = 'subnet'
+
+ def checkAndCreate(self, key, payload, domainId):
+ """ Function checkAndCreate
+ Check if a subnet exists and create it if not
+
+ @param key: The targeted subnet
+ @param payload: The targeted subnet description
+ @param domainId: The domainId to be attached wiuth the subnet
+ @return RETURN: The id of the subnet
+ """
+ if key not in self:
+ self[key] = payload
+ oid = self[key]['id']
+ if not oid:
+ return False
+ #~ Ensure subnet contains the domain
+ subnetDomainIds = []
+ for domain in self[key]['domains']:
+ subnetDomainIds.append(domain['id'])
+ if domainId not in subnetDomainIds:
+ subnetDomainIds.append(domainId)
+ self[key]["domain_ids"] = subnetDomainIds
+ if len(self[key]["domains"]) is not len(subnetDomainIds):
+ return False
+ return oid
+
+ def removeDomain(self, subnetId, domainId):
+ """ Function removeDomain
+ Delete a domain from a subnet
+
+ @param subnetId: The subnet Id
+ @param domainId: The domainId to be attached wiuth the subnet
+ @return RETURN: boolean
+ """
+ subnetDomainIds = []
+ for domain in self[subnetId]['domains']:
+ subnetDomainIds.append(domain['id'])
+ subnetDomainIds.remove(domainId)
+ self[subnetId]["domain_ids"] = subnetDomainIds
+ return len(self[subnetId]["domains"]) is len(subnetDomainIds)
diff --git a/opensteak/tools/opensteak/printer.py b/opensteak/tools/opensteak/printer.py
new file mode 100644
index 000000000..98c5af54e
--- /dev/null
+++ b/opensteak/tools/opensteak/printer.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+import sys
+
+
+class OpenSteakPrinter:
+ """ Just a nice message printer """
+ HEADER = '\033[95m'
+ OKBLUE = '\033[94m'
+ OKGREEN = '\033[92m'
+ WARNING = '\033[93m'
+ FAIL = '\033[91m'
+ ENDC = '\033[0m'
+ BOLD = '\033[1m'
+ UNDERLINE = '\033[4m'
+
+ TABSIZE = 4
+
+ def header(self, msg):
+ """ Function header
+ Print a header for a block
+
+ @param msg: The message to print in the header (limited to 78 chars)
+ @return RETURN: None
+ """
+ print("""
+#
+# {}
+#
+""".format(msg[0:78]))
+
+ def config(self, msg, name, value=None, indent=0):
+ """ Function config
+ Print a line with the value of a parameter
+
+ @param msg: The message to print in the header (limited to 78 chars)
+ @param name: The name of the prameter
+ @param value: The value of the parameter
+ @param indent: Tab size at the beginning of the line
+ @return RETURN: None
+ """
+ ind = ' ' * indent * self.TABSIZE
+ if value is None:
+ print('{} - {} = {}'.format(ind, msg, name))
+ elif value is False:
+ print('{} [{}KO{}] {} > {} (NOT found)'.
+ format(ind, self.FAIL, self.ENDC, msg, name))
+ else:
+ print('{} [{}OK{}] {} > {} = {}'.
+ format(ind, self.OKGREEN, self.ENDC, msg, name, str(value)))
+
+ def list(self, msg, indent=0):
+ """ Function list
+ Print a list item
+
+ @param msg: The message to print in the header (limited to 78 chars)
+ @param indent: Tab size at the beginning of the line
+ @return RETURN: None
+ """
+ print(' ' * indent * self.TABSIZE, '-', msg)
+
+ def list_id(self, dic, indent=0):
+ """ Function list_id
+ Print a list of dict items
+
+ @param dic: The dict to print
+ @param indent: Tab size at the beginning of the line
+ @return RETURN: None
+ """
+ for (k, v) in dic.items():
+ self.list("{}: {}".format(k, v), indent=indent)
+
+ def status(self, res, msg, failed="", eol="\n", quit=True, indent=0):
+ """ Function status
+ Print status message
+ - OK/KO if the result is a boolean
+ - Else the result text
+
+ @param res: The status to show
+ @param msg: The message to show
+ @param eol: End of line
+ @param quit: Exit the system in case of failure
+ @param indent: Tab size at the beginning of the line
+ @return RETURN: None
+ """
+ ind = ' ' * indent * self.TABSIZE
+ if res is True:
+ msg = '{} [{}OK{}] {}'.format(ind, self.OKGREEN, self.ENDC, msg)
+ elif res:
+ msg = '{} [{}{}{}] {}'.format(ind, self.OKBLUE, res,
+ self.ENDC, msg)
+ else:
+ msg = '{} [{}KO{}] {}'.format(ind, self.FAIL, self.ENDC, msg)
+ if failed:
+ msg += '\n > {}'.format(failed)
+ msg = msg.ljust(140) + eol
+ sys.stdout.write(msg)
+ if res is False and quit is True:
+ sys.exit(0)
+
+ def ask_validation(self, prompt=None, resp=False):
+ """ Function ask_validation
+ Ask a validation message
+
+ @param prompt: The question to ask ('Continue ?') if None
+ @param resp: The default value (Default is False)
+ @return RETURN: Trie or False
+ """
+ if prompt is None:
+ prompt = 'Continue ?'
+ if resp:
+ prompt += ' [{}Y{}/n]: '.format(self.BOLD, self.ENDC)
+ else:
+ prompt += ' [y/{}N{}]: '.format(self.BOLD, self.ENDC)
+ while True:
+ ans = input(prompt)
+ if not ans:
+ ans = 'y' if resp else 'n'
+ if ans not in ['y', 'Y', 'n', 'N']:
+ print('please enter y or n.')
+ continue
+ if ans == 'y' or ans == 'Y':
+ return True
+ if ans == 'n' or ans == 'N':
+ sys.exit(0)
diff --git a/opensteak/tools/opensteak/templateparser.py b/opensteak/tools/opensteak/templateparser.py
new file mode 100644
index 000000000..720f008da
--- /dev/null
+++ b/opensteak/tools/opensteak/templateparser.py
@@ -0,0 +1,34 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+"""
+Template parser
+"""
+
+from string import Template
+
+class OpenSteakTemplateParser:
+
+ def __init__(self, filein, fileout, dictionary):
+ """
+ Parse the files with the dictionary
+ """
+ fin = open(filein)
+ fout = open(fileout,'w')
+ template = Template(fin.read())
+ fout.write(template.substitute(dictionary))
diff --git a/opensteak/tools/opensteak/virsh.py b/opensteak/tools/opensteak/virsh.py
new file mode 100644
index 000000000..594b84299
--- /dev/null
+++ b/opensteak/tools/opensteak/virsh.py
@@ -0,0 +1,174 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+"""
+Virsh library
+"""
+
+import subprocess
+import os
+
+class OpenSteakVirsh:
+
+ virsh = "/usr/bin/virsh"
+ genisoimage = "/usr/bin/genisoimage"
+ environment = ""
+
+ ###
+ # INIT
+ ###
+ def __init__(self):
+ self.environment = dict(os.environ) # Copy current environment
+ self.environment['LANG'] = 'en_US.UTF-8'
+
+
+ ###
+ # VOLUMES
+ ###
+ def volumeList(self, pool="default"):
+ """
+ Return all volumes from a pool
+ """
+ p = subprocess.Popen([self.virsh, "-q", "vol-list", pool], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ # Split lines
+ lines = stdout.splitlines()
+
+ # Foreach line, split with space and construct a dictionnary
+ newLines = {}
+ for line in lines:
+ name, path = line.split(maxsplit=1)
+ newLines[name.strip()] = path.strip()
+
+ return newLines
+
+ def volumeDelete(self, path):
+ """
+ Delete a volume
+ """
+ p = subprocess.Popen([self.virsh, "-q", "vol-delete", path], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ return {"stdout":stdout, "stderr":stderr}
+
+ def volumeClone(self, origin, name, pool="default"):
+ """
+ Clone a volume
+ """
+ p = subprocess.Popen([self.virsh, "-q", "vol-clone", "--pool", pool, origin, name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ return {"stdout":stdout, "stderr":stderr}
+
+ def volumeResize(self, name, size, pool="default"):
+ """
+ Resize a volume
+ """
+ p = subprocess.Popen([self.virsh, "-q", "vol-resize", "--pool", pool, name, size], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ return {"stdout":stdout, "stderr":stderr}
+
+ ###
+ # POOLS
+ ###
+ def poolRefresh(self, pool="default"):
+ """
+ Refresh a pool
+ """
+ p = subprocess.Popen([self.virsh, "-q", "pool-refresh", pool], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ return {"stdout":stdout, "stderr":stderr}
+
+ ###
+ # DOMAINS
+ ###
+ def domainList(self):
+ """
+ Return all domains (VM)
+ """
+ p = subprocess.Popen([self.virsh, "-q", "list", "--all"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ # Split lines
+ lines = stdout.splitlines()
+
+ # Foreach line, split with space and construct a dictionnary
+ newLines = {}
+ for line in lines:
+ id, name, status = line.split(maxsplit=2)
+ newLines[name.strip()] = status.strip()
+
+ return newLines
+
+ def domainDefine(self, xml):
+ """
+ Define a domain (create a VM)
+ """
+ p = subprocess.Popen([self.virsh, "-q", "define", xml], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ return {"stdout":stdout, "stderr":stderr}
+
+ def domainUndefine(self, name):
+ """
+ Undefine a domain (delete a VM)
+ """
+ p = subprocess.Popen([self.virsh, "-q", "undefine", name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ return {"stdout":stdout, "stderr":stderr}
+
+ def domainStart(self, name):
+ """
+ Define a domain (create a VM)
+ """
+ p = subprocess.Popen([self.virsh, "-q", "start", name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ return {"stdout":stdout, "stderr":stderr}
+
+ def domainDestroy(self, name):
+ """
+ Destroy a domain (stop a VM)
+ """
+ p = subprocess.Popen([self.virsh, "-q", "destroy", name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ return {"stdout":stdout, "stderr":stderr}
+
+ ###
+ # ISO
+ ###
+ def generateConfiguration(self, name, files):
+ """
+ Generate an ISO file
+ """
+
+ commandArray = [self.genisoimage, "-quiet", "-o", "/var/lib/libvirt/images/{0}-configuration.iso".format(name), "-volid", "cidata", "-joliet", "-rock"]
+ for k, f in files.items():
+ commandArray.append(f)
+
+ # Generate the iso file
+ p = subprocess.Popen(commandArray, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ return {"stdout":stdout, "stderr":stderr}
+
diff --git a/opensteak/tools/templates_foreman/install.sh b/opensteak/tools/templates_foreman/install.sh
new file mode 100644
index 000000000..497be86e7
--- /dev/null
+++ b/opensteak/tools/templates_foreman/install.sh
@@ -0,0 +1,216 @@
+#!/bin/sh
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+### Set vars
+NAME="${name}"
+DOMAIN="${domain}"
+DATEE=$$(date +%F-%Hh%M)
+IP="${ip}"
+MASK="${netmaskshort}"
+NET="${network}"
+DHCP_RANGE="${dhcprange}"
+REVERSE_DNS="${reversedns}"
+DNS_FORWARDER="${dns}"
+ADMIN="${admin}"
+PASSWORD="${password}"
+
+### Set correct env
+#dpkg-reconfigure locales
+export LC_CTYPE=en_US.UTF-8
+export LANG=en_US.UTF-8
+unset LC_ALL
+umask 0022
+
+### Check hostname is on the public interface
+echo "* Ensure hostname point to external IP"
+# Remove useless lines
+perl -i -pe 's/^127.0.1.1.*\n$$//' /etc/hosts
+perl -i -pe "s/^$${IP}.*\n$$//" /etc/hosts
+# Append a line
+echo "$${IP} $${NAME}.$${DOMAIN} $${NAME}" >> /etc/hosts
+
+### Dependencies
+echo "* Install dependencies"
+apt-get -y install ca-certificates wget git isc-dhcp-server
+
+### Set AppArmor
+echo "* Set App armor"
+cat /etc/apparmor.d/local/usr.sbin.dhcpd | grep '/etc/bind/rndc.key r,' >/dev/null
+if [ $$? -eq 1 ] ; then
+ echo "/etc/bind/rndc.key r," >> /etc/apparmor.d/local/usr.sbin.dhcpd
+fi
+
+### Prepare repos
+echo "* Enable Puppet labs repo"
+if [ "Z" = "Z$$(dpkg -l |grep 'ii puppetlabs-release')" ] ; then
+ wget https://apt.puppetlabs.com/puppetlabs-release-trusty.deb
+ dpkg -i puppetlabs-release-trusty.deb
+ apt-get update
+fi
+
+# Install puppetmaster
+echo "* Install puppetmaster"
+if [ "Z" = "Z$$(dpkg -l |grep 'ii puppetmaster')" ] ; then
+ apt-get -y install puppetmaster
+fi
+
+# Enable the Foreman repo
+echo "* Enable Foreman repo"
+if [ ! -e /etc/apt/sources.list.d/foreman.list ] ; then
+ echo "deb http://deb.theforeman.org/ trusty 1.8" > /etc/apt/sources.list.d/foreman.list
+ echo "deb http://deb.theforeman.org/ plugins 1.8" >> /etc/apt/sources.list.d/foreman.list
+ wget -q http://deb.theforeman.org/pubkey.gpg -O- | apt-key add -
+ apt-get update
+fi
+
+### Install Foreman
+echo "* Install foreman-installer"
+if [ "Z" = "Z$$(dpkg -l |grep 'ii foreman-installer')" ] ; then
+ apt-get -y install foreman-installer
+fi
+if [ "Z" = "Z$$(gem list --local |grep rubyipmi)" ] ; then
+ gem install -q rubyipmi
+fi
+
+### Execute foreman installer
+echo "* Execute foreman installer"
+
+foreman-installer \
+ --foreman-admin-username="$$ADMIN" \
+ --foreman-admin-password="$$PASSWORD" \
+ --enable-foreman-plugin-templates \
+ --enable-foreman-plugin-discovery \
+ --foreman-plugin-discovery-install-images=true \
+ --enable-foreman-compute-libvirt
+
+
+foreman-installer \
+ --foreman-admin-username="$$ADMIN" \
+ --foreman-admin-password="$$PASSWORD" \
+ --enable-foreman-plugin-templates \
+ --enable-foreman-plugin-discovery \
+ --foreman-plugin-discovery-install-images=true \
+ --enable-foreman-compute-libvirt \
+ --enable-foreman-proxy \
+ --foreman-proxy-bmc=true \
+ --foreman-proxy-tftp=true \
+ --foreman-proxy-tftp-servername="$$IP" \
+ --foreman-proxy-dhcp=true \
+ --foreman-proxy-dhcp-interface="eth0" \
+ --foreman-proxy-dhcp-gateway="$$IP" \
+ --foreman-proxy-dhcp-range="$$DHCP_RANGE" \
+ --foreman-proxy-dhcp-nameservers="$$IP" \
+ --foreman-proxy-dns=true \
+ --foreman-proxy-dns-interface="eth0" \
+ --foreman-proxy-dns-zone="$$DOMAIN" \
+ --foreman-proxy-dns-reverse="$$REVERSE_DNS" \
+ --foreman-proxy-dns-forwarders="$$DNS_FORWARDER" \
+ --foreman-proxy-foreman-base-url="https://localhost"
+
+### Sync community templates for last ubuntu versions
+
+echo "* Sync community templates for last ubuntu versions"
+foreman-rake templates:sync
+
+### Get and install OpenSteak files
+
+echo "* Get OpenSteak repos"
+if [ -d /usr/local/opensteak ] ; then
+ cd /usr/local/opensteak
+ git pull
+else
+ cd /usr/local/
+ git clone https://github.com/Orange-OpenSource/opnfv.git -b foreman opensteak
+fi
+cd /usr/local/opensteak/infra/puppet_master
+
+echo "* Set puppet auth"
+echo "*.$$DOMAIN" > /etc/puppet/autosign.conf
+if [ -e /etc/puppet/auth.conf ] ; then
+ # Make a backup
+ mv /etc/puppet/auth.conf /etc/puppet/auth.conf.$$DATEE
+fi
+cp etc/puppet/auth.conf /etc/puppet/auth.conf
+perl -i -pe "s/__NET__/$$NET/" /etc/puppet/auth.conf
+perl -i -pe "s/__MASK__/$$MASK/" /etc/puppet/auth.conf
+
+# Set Hiera Conf
+echo "* Push Hiera conf into /etc/puppet/"
+if [ -e /etc/puppet/hiera.yaml ] ; then
+ # Make a backup
+ mv /etc/puppet/hiera.yaml /etc/puppet/hiera.yaml.$$DATEE
+fi
+cp etc/puppet/hiera.yaml /etc/puppet/hiera.yaml
+if [ -e /etc/hiera.yaml ] ; then
+ rm /etc/hiera.yaml
+fi
+ln -s /etc/puppet/hiera.yaml /etc/hiera.yaml
+cp -rf etc/puppet/hieradata /etc/puppet/
+rename s/DOMAIN/$$DOMAIN/ /etc/puppet/hieradata/production/nodes/*.yaml
+cp etc/puppet/manifests/site.pp /etc/puppet/manifests/site.pp
+cp ../config/common.yaml /etc/puppet/hieradata/production/common.yaml
+chgrp puppet /etc/puppet/hieradata/production/*.yaml
+
+# Install and config r10k
+echo "* Install and setup r10k"
+if [ "Z" = "Z$$(gem list --local |grep r10k)" ] ; then
+ gem install -q r10k
+fi
+if [ -e /etc/r10k.yaml ] ; then
+ # Make a backup
+ mv /etc/r10k.yaml /etc/r10k.yaml.$$DATEE
+fi
+cp etc/r10k.yaml /etc/r10k.yaml
+
+# Install opensteak-r10k-update script
+echo "* Install opensteak-r10k-update script into /usr/local/bin"
+cp usr/local/bin/opensteak-r10k-update /usr/local/bin/opensteak-r10k-update
+chmod +x /usr/local/bin/opensteak-r10k-update
+
+echo "* Run R10k. You can re-run r10k by calling:"
+echo " opensteak-r10k-update"
+opensteak-r10k-update
+
+#### Install VIM puppet
+echo "* Install VIM puppet"
+if [ ! -d ~/.vim/autoload ] ; then
+ mkdir -p ~/.vim/autoload
+fi
+if [ ! -d ~/.vim/bundle ] ; then
+ mkdir -p ~/.vim/bundle
+fi
+curl -LSso ~/.vim/autoload/pathogen.vim https://tpo.pe/pathogen.vim
+cat <<EOF > ~/.vimrc
+execute pathogen#infect()
+syntax on
+filetype plugin indent on
+EOF
+cd ~/.vim/bundle
+if [ ! -d vim-puppet ] ; then
+ git clone https://github.com/rodjek/vim-puppet.git > /dev/null
+fi
+
+### Gen SSH key for foreman
+echo "* SSH Key"
+cp /mnt/id_rsa /usr/share/foreman/.ssh/
+cp /mnt/id_rsa.pub /usr/share/foreman/.ssh/
+chown foreman:foreman /usr/share/foreman/.ssh/ -R
+
+### Run puppet
+puppet agent -t -v
+
diff --git a/opensteak/tools/templates_foreman/kvm-config b/opensteak/tools/templates_foreman/kvm-config
new file mode 100644
index 000000000..7e3d65dc8
--- /dev/null
+++ b/opensteak/tools/templates_foreman/kvm-config
@@ -0,0 +1,65 @@
+<domain type='kvm'>
+ <name>${name}</name>
+ <memory>${ram}</memory>
+ <currentMemory>${ram}</currentMemory>
+ <vcpu>${cpu}</vcpu>
+ <os>
+ <type arch='x86_64'>hvm</type>
+ <!-- uncomment to enable PXE boot
+ <boot dev='network'/>
+ -->
+ <boot dev='hd'/>
+ </os>
+ <features>
+ <acpi/><apic/><pae/>
+ </features>
+ <clock offset="utc"/>
+ <on_poweroff>preserve</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>/usr/bin/qemu-system-x86_64</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2'/>
+ <source file='/var/lib/libvirt/images/${name}'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='raw'/>
+ <source file='/var/lib/libvirt/images/${name}-configuration.iso'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
+ <input type='mouse' bus='ps2'/>
+ <!-- uncomment to allow virsh console
+ <console type='pty'/>
+ <!- - end -->
+ <!-- uncomment to allow console to a log file -->
+ <serial type='file'>
+ <source path='/var/log/libvirt/qemu/${name}-serial.log'/>
+ <target port='0'/>
+ <alias name='serial0'/>
+ </serial>
+ <serial type='pty'>
+ <source path='/dev/pts/1'/>
+ <target port='1'/>
+ <alias name='serial1'/>
+ </serial>
+ <console type='file'>
+ <source path='/var/log/libvirt/qemu/${name}-serial.log'/>
+ <target type='serial' port='0'/>
+ <alias name='serial0'/>
+ </console>
+ <!-- end -->
+ <graphics type='spice' port='-1' autoport='no'/>
+ <video>
+ <model type='qxl' ram='65536' vram='65536' heads='1'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+ </video>
+ <memballoon model='virtio'/>
+ <interface type='bridge'>
+ <source bridge='${bridge}'/>
+ ${bridgeconfig}
+ <model type='virtio'/>
+ </interface>
+ </devices>
+</domain>
diff --git a/opensteak/tools/templates_foreman/meta-data b/opensteak/tools/templates_foreman/meta-data
new file mode 100644
index 000000000..b4cb9b6ab
--- /dev/null
+++ b/opensteak/tools/templates_foreman/meta-data
@@ -0,0 +1,12 @@
+instance-id: ${name};
+network-interfaces: |
+ auto lo
+ iface lo inet loopback
+ auto eth0
+ iface eth0 inet static
+ address ${ip}
+ netmask ${netmaskshort}
+ gateway ${gateway}
+ dns-nameservers ${dns}
+ dns-search ${domain}
+local-hostname: ${name}
diff --git a/opensteak/tools/templates_foreman/user-data b/opensteak/tools/templates_foreman/user-data
new file mode 100644
index 000000000..281b5d46d
--- /dev/null
+++ b/opensteak/tools/templates_foreman/user-data
@@ -0,0 +1,25 @@
+#cloud-config
+#############################################
+# OPENSTEAK VM '${name}'
+#############################################
+password: ${password}
+chpasswd: { expire: False }
+ssh_pwauth: True
+dsmode: net
+hostname: ${name}
+#############################################
+# FIRST BOOT COMMAND
+# - reload main interface
+# - install puppet from puppetlabs
+# - remove cloud-init
+#############################################
+runcmd:
+ - [ sh, -c, "mount /dev/vdb /mnt"]
+ - [ sh, -c, "sudo bash /mnt/install.sh"]
+# This is the id_rsa.sansmotdepasse key
+ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDY15cdBmIs2XOpe4EiFCsaY6bmUmK/GysMoLl4UG51JCfJwvwoWCoA+6mDIbymZxhxq9IGxilp/yTA6WQ9s/5pBag1cUMJmFuda9PjOkXl04jgqh5tR6I+GZ97AvCg93KAECis5ubSqw1xOCj4utfEUtPoF1OuzqM/lE5mY4N6VKXn+fT7pCD6cifBEs6JHhVNvs5OLLp/tO8Pa3kKYQOdyS0xc3rh+t2lrzvKUSWGZbX+dLiFiEpjsUL3tDqzkEMNUn4pdv69OJuzWHCxRWPfdrY9Wg0j3mJesP29EBht+w+EC9/kBKq+1VKdmsXUXAcjEvjovVL8l1BrX3BY0R8D sansmotdepasse
+#############################################
+# FINAL MESSAGE AT END OF BOOT
+#############################################
+final_message: "The system '${name}' is finally up, after $$UPTIME seconds"