summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTim Rozet <trozet@redhat.com>2015-09-08 14:57:39 +0000
committerGerrit Code Review <gerrit@172.30.200.206>2015-09-08 14:57:39 +0000
commite65ed372ee84b1f634ec24002fb7bfd7d48fd7e7 (patch)
treeb56c3f4dae40c8efab7ddf177c7ae9475d7cf16e
parent7f8c963b4dbcc1e57e97e2637ad5a639594319fa (diff)
parent8f49d7a5efc0e847ecffcb05f6aff2186d8cecc9 (diff)
Merge "Merge branch 'master' into merge-master-arno" into stable/arno
-rw-r--r--common/docs/user-guide.rst19
-rw-r--r--common/puppet-opnfv/manifests/compute.pp43
-rw-r--r--common/puppet-opnfv/manifests/controller_networker.pp92
-rw-r--r--common/puppet-opnfv/manifests/external_net_presetup.pp2
-rw-r--r--common/puppet-opnfv/manifests/external_net_setup.pp5
-rw-r--r--common/puppet-opnfv/manifests/init.pp1
-rw-r--r--common/puppet-opnfv/manifests/odl_docker.pp50
-rw-r--r--common/puppet-opnfv/manifests/templates/br_ex.erb10
-rw-r--r--common/puppet-opnfv/manifests/templates/dockerfile/Dockerfile82
-rw-r--r--common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/check_feature.sh18
-rw-r--r--common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/speak.sh20
-rw-r--r--common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/start_odl_docker_container.sh48
-rwxr-xr-xcompass/ci/build.sh26
-rwxr-xr-xcompass/ci/deploy.sh14
-rwxr-xr-xcompass/ci/launch.sh65
-rwxr-xr-xcompass/ci/log.sh22
-rw-r--r--compass/deploy/ansible/openstack_juno/HA-ansible-multinodes.yml42
-rw-r--r--compass/deploy/ansible/openstack_juno/allinone.yml38
-rw-r--r--compass/deploy/ansible/openstack_juno/compute.yml9
-rw-r--r--compass/deploy/ansible/openstack_juno/controller.yml15
-rw-r--r--compass/deploy/ansible/openstack_juno/group_vars/all54
-rw-r--r--compass/deploy/ansible/openstack_juno/multinodes.yml75
-rw-r--r--compass/deploy/ansible/openstack_juno/network.yml8
-rw-r--r--compass/deploy/ansible/openstack_juno/single-controller.yml38
-rw-r--r--compass/deploy/ansible/openstack_juno/storage.yml8
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/handlers/main.yml6
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/tasks/cinder_config.yml20
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/tasks/cinder_install.yml20
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/tasks/main.yml13
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/templates/api-paste.ini71
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/templates/cinder.conf63
-rw-r--r--compass/deploy/ansible/roles/cinder-controller/templates/cinder_init.sh6
-rw-r--r--compass/deploy/ansible/roles/cinder-volume/files/loop.yml1
-rw-r--r--compass/deploy/ansible/roles/cinder-volume/handlers/main.yml3
-rw-r--r--compass/deploy/ansible/roles/cinder-volume/tasks/main.yml55
-rw-r--r--compass/deploy/ansible/roles/cinder-volume/templates/cinder.conf62
-rw-r--r--compass/deploy/ansible/roles/common/files/sources.list.d/cloudarchive-juno.list1
-rw-r--r--compass/deploy/ansible/roles/common/tasks/main.yml28
-rw-r--r--compass/deploy/ansible/roles/common/templates/hosts22
-rw-r--r--compass/deploy/ansible/roles/common/templates/ntp.conf56
-rw-r--r--compass/deploy/ansible/roles/dashboard/tasks/main.yml30
-rw-r--r--compass/deploy/ansible/roles/dashboard/templates/local_settings.py511
-rw-r--r--compass/deploy/ansible/roles/dashboard/templates/openstack-dashboard.conf14
-rw-r--r--compass/deploy/ansible/roles/database/files/my.cnf131
-rw-r--r--compass/deploy/ansible/roles/database/tasks/main.yml12
-rw-r--r--compass/deploy/ansible/roles/database/tasks/mariadb.yml46
-rw-r--r--compass/deploy/ansible/roles/database/tasks/mysql.yml22
-rw-r--r--compass/deploy/ansible/roles/database/templates/data.j239
-rw-r--r--compass/deploy/ansible/roles/database/templates/my.cnf134
-rw-r--r--compass/deploy/ansible/roles/database/templates/wsrep.cnf126
-rw-r--r--compass/deploy/ansible/roles/glance/handlers/main.yml6
-rw-r--r--compass/deploy/ansible/roles/glance/tasks/glance_config.yml29
-rw-r--r--compass/deploy/ansible/roles/glance/tasks/glance_install.yml26
-rw-r--r--compass/deploy/ansible/roles/glance/tasks/main.yml18
-rw-r--r--compass/deploy/ansible/roles/glance/tasks/nfs.yml41
-rw-r--r--compass/deploy/ansible/roles/glance/templates/glance-api.conf677
-rw-r--r--compass/deploy/ansible/roles/glance/templates/glance-registry.conf190
-rw-r--r--compass/deploy/ansible/roles/glance/templates/image_upload.sh2
-rw-r--r--compass/deploy/ansible/roles/ha/files/galera_chk10
-rw-r--r--compass/deploy/ansible/roles/ha/files/mysqlchk15
-rw-r--r--compass/deploy/ansible/roles/ha/files/notify.sh4
-rw-r--r--compass/deploy/ansible/roles/ha/handlers/main.yml9
-rw-r--r--compass/deploy/ansible/roles/ha/tasks/main.yml94
-rw-r--r--compass/deploy/ansible/roles/ha/templates/failover.j265
-rw-r--r--compass/deploy/ansible/roles/ha/templates/haproxy.cfg133
-rw-r--r--compass/deploy/ansible/roles/ha/templates/keepalived.conf42
-rw-r--r--compass/deploy/ansible/roles/keystone/tasks/keystone_config.yml16
-rw-r--r--compass/deploy/ansible/roles/keystone/tasks/keystone_install.yml29
-rw-r--r--compass/deploy/ansible/roles/keystone/tasks/main.yml13
-rw-r--r--compass/deploy/ansible/roles/keystone/templates/admin-openrc.sh6
-rw-r--r--compass/deploy/ansible/roles/keystone/templates/demo-openrc.sh5
-rw-r--r--compass/deploy/ansible/roles/keystone/templates/keystone.conf1317
-rw-r--r--compass/deploy/ansible/roles/keystone/templates/keystone_init43
-rw-r--r--compass/deploy/ansible/roles/monitor/files/check_service.sh7
-rw-r--r--compass/deploy/ansible/roles/monitor/files/root1
-rw-r--r--compass/deploy/ansible/roles/monitor/tasks/main.yml11
-rw-r--r--compass/deploy/ansible/roles/mq/tasks/main.yml5
-rw-r--r--compass/deploy/ansible/roles/mq/tasks/rabbitmq.yml45
-rw-r--r--compass/deploy/ansible/roles/mq/tasks/rabbitmq_cluster.yml27
-rw-r--r--compass/deploy/ansible/roles/mq/templates/.erlang.cookie1
-rw-r--r--compass/deploy/ansible/roles/mq/templates/rabbitmq-env.conf1
-rw-r--r--compass/deploy/ansible/roles/neutron-common/handlers/main.yml13
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/defaults/main.yml2
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/handlers/main.yml13
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/tasks/main.yml55
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/dhcp_agent.ini90
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf2
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/etc/xorp/config.boot25
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/l3_agent.ini81
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/metadata_agent.ini46
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/ml2_conf.ini108
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/neutron-network.conf465
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/neutron.conf466
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/neutron_init.sh4
-rw-r--r--compass/deploy/ansible/roles/neutron-compute/templates/nova.conf73
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/handlers/main.yml24
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/tasks/main.yml13
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/tasks/neutron_config.yml10
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/tasks/neutron_install.yml29
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/dhcp_agent.ini90
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf2
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/etc/xorp/config.boot25
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/l3_agent.ini81
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/metadata_agent.ini46
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/ml2_conf.ini108
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/neutron-network.conf465
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/neutron.conf466
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/neutron_init.sh4
-rw-r--r--compass/deploy/ansible/roles/neutron-controller/templates/nova.conf69
-rw-r--r--compass/deploy/ansible/roles/neutron-network/handlers/main.yml21
-rw-r--r--compass/deploy/ansible/roles/neutron-network/tasks/igmp-router.yml20
-rw-r--r--compass/deploy/ansible/roles/neutron-network/tasks/main.yml114
-rw-r--r--compass/deploy/ansible/roles/neutron-network/tasks/odl.yml13
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/dhcp_agent.ini90
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf2
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/etc/xorp/config.boot25
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/l3_agent.ini81
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/metadata_agent.ini46
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/ml2_conf.ini108
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/neutron-network.conf465
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/neutron.conf466
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/neutron_init.sh4
-rw-r--r--compass/deploy/ansible/roles/neutron-network/templates/nova.conf69
-rw-r--r--compass/deploy/ansible/roles/nova-compute/handlers/main.yml3
-rw-r--r--compass/deploy/ansible/roles/nova-compute/tasks/main.yml21
-rw-r--r--compass/deploy/ansible/roles/nova-compute/templates/nova-compute.conf7
-rw-r--r--compass/deploy/ansible/roles/nova-compute/templates/nova.conf73
-rw-r--r--compass/deploy/ansible/roles/nova-controller/handlers/main.yml24
-rw-r--r--compass/deploy/ansible/roles/nova-controller/tasks/main.yml13
-rw-r--r--compass/deploy/ansible/roles/nova-controller/tasks/nova_config.yml16
-rw-r--r--compass/deploy/ansible/roles/nova-controller/tasks/nova_install.yml35
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/dhcp_agent.ini90
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf2
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/etc/xorp/config.boot25
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/l3_agent.ini81
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/metadata_agent.ini46
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/ml2_conf.ini108
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/neutron-network.conf465
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/neutron.conf466
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/neutron_init.sh4
-rw-r--r--compass/deploy/ansible/roles/nova-controller/templates/nova.conf72
-rw-r--r--compass/deploy/ansible/roles/repo/tasks/main.yml6
-rw-r--r--compass/deploy/ansible/roles/repo/templates/sources.list1
-rw-r--r--compass/deploy/compass_vm.sh103
-rw-r--r--compass/deploy/conf/baremetal.conf20
-rw-r--r--compass/deploy/conf/base.conf24
-rw-r--r--compass/deploy/conf/cluster.conf20
-rw-r--r--compass/deploy/conf/five.conf4
-rw-r--r--compass/deploy/deploy-vm.sh9
-rw-r--r--compass/deploy/deploy_host.sh40
-rwxr-xr-x[-rw-r--r--]compass/deploy/func.sh17
-rw-r--r--compass/deploy/host_baremetal.sh9
-rw-r--r--compass/deploy/host_vm.sh59
-rwxr-xr-xcompass/deploy/network.sh70
-rw-r--r--compass/deploy/prepare.sh66
-rw-r--r--compass/deploy/remote_excute.exp23
-rw-r--r--compass/deploy/status_callback.py174
-rw-r--r--compass/deploy/template/network/bridge.xml5
-rw-r--r--compass/deploy/template/network/nat.xml10
-rw-r--r--compass/deploy/template/vm/compass.xml64
-rw-r--r--compass/deploy/template/vm/host.xml67
-rw-r--r--foreman/ci/Vagrantfile9
-rwxr-xr-xforeman/ci/bootstrap.sh5
-rwxr-xr-xforeman/ci/clean.sh217
-rwxr-xr-xforeman/ci/deploy.sh1437
-rw-r--r--foreman/ci/opnfv_ksgen_settings.yml5
-rw-r--r--foreman/ci/opnfv_ksgen_settings_no_HA.yml264
-rw-r--r--foreman/ci/reload_playbook.yml1
-rwxr-xr-xforeman/ci/vm_nodes_provision.sh50
-rw-r--r--foreman/docs/src/installation-instructions.rst16
-rw-r--r--fuel/build/Makefile10
-rw-r--r--fuel/build/docker/ubuntu-builder/Dockerfile3
-rwxr-xr-xfuel/build/docker/ubuntu-builder/install.sh25
-rwxr-xr-xfuel/build/f_odl_docker/Makefile51
-rwxr-xr-xfuel/build/f_odl_docker/dockerfile/Dockerfile72
-rwxr-xr-xfuel/build/f_odl_docker/dockerfile/container_scripts/check_feature.sh8
-rwxr-xr-xfuel/build/f_odl_docker/dockerfile/container_scripts/speak.sh17
-rwxr-xr-xfuel/build/f_odl_docker/dockerfile/container_scripts/start_odl_docker.sh38
-rw-r--r--fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp77
-rw-r--r--fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/change.sh219
-rwxr-xr-xfuel/build/f_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh192
-rwxr-xr-xfuel/build/f_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh54
-rwxr-xr-xfuel/build/f_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh95
-rw-r--r--fuel/build/f_odl_docker/scripts/config_net_odl.sh164
-rw-r--r--fuel/build/f_odl_docker/scripts/config_neutron_for_odl.sh146
-rwxr-xr-xfuel/build/f_odl_docker/scripts/prep_nets_for_odl.sh90
-rw-r--r--fuel/build/f_odl_docker/scripts/setup_ovs_for_odl.sh23
-rw-r--r--fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp2
-rw-r--r--fuel/build/opendaylight/Makefile102
-rw-r--r--fuel/build/opendaylight/README52
-rw-r--r--fuel/build/opendaylight/f_odl/Makefile49
-rw-r--r--fuel/build/opendaylight/f_odl/README49
-rw-r--r--fuel/build/opendaylight/f_odl/puppet/modules/opnfv/manifests/odl.pp13
-rw-r--r--fuel/build/opendaylight/f_odl/testing/README12
-rw-r--r--fuel/build/opendaylight/f_odl/testing/fake_init.pp13
-rwxr-xr-xfuel/build/opendaylight/make-odl-deb.sh314
-rw-r--r--fuel/build/opendaylight/odl_maven/settings.xml46
-rw-r--r--fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/fuel-master1
-rw-r--r--opensteak/ci/build.sh19
-rw-r--r--opensteak/ci/deploy.sh28
-rw-r--r--opensteak/config/common.yaml119
-rw-r--r--opensteak/config/infra.yaml81
-rw-r--r--opensteak/tools/README.rst52
-rw-r--r--opensteak/tools/config.yaml78
-rw-r--r--opensteak/tools/create_foreman.py236
-rw-r--r--opensteak/tools/files_foreman/id_rsa27
-rw-r--r--opensteak/tools/files_foreman/id_rsa.pub1
-rw-r--r--opensteak/tools/opensteak/.gitignore58
-rw-r--r--opensteak/tools/opensteak/__init__.py18
-rw-r--r--opensteak/tools/opensteak/argparser.py46
-rw-r--r--opensteak/tools/opensteak/conf.py72
-rw-r--r--opensteak/tools/opensteak/foreman.py60
-rw-r--r--opensteak/tools/opensteak/foreman_objects/.gitignore58
-rw-r--r--opensteak/tools/opensteak/foreman_objects/__init__.py18
-rw-r--r--opensteak/tools/opensteak/foreman_objects/api.py197
-rw-r--r--opensteak/tools/opensteak/foreman_objects/architectures.py49
-rw-r--r--opensteak/tools/opensteak/foreman_objects/compute_resources.py62
-rw-r--r--opensteak/tools/opensteak/foreman_objects/domains.py44
-rw-r--r--opensteak/tools/opensteak/foreman_objects/freeip.py79
-rw-r--r--opensteak/tools/opensteak/foreman_objects/hostgroups.py103
-rw-r--r--opensteak/tools/opensteak/foreman_objects/hosts.py142
-rw-r--r--opensteak/tools/opensteak/foreman_objects/item.py135
-rw-r--r--opensteak/tools/opensteak/foreman_objects/itemHost.py141
-rw-r--r--opensteak/tools/opensteak/foreman_objects/itemHostsGroup.py50
-rw-r--r--opensteak/tools/opensteak/foreman_objects/itemOverrideValues.py61
-rw-r--r--opensteak/tools/opensteak/foreman_objects/itemSmartClassParameter.py62
-rw-r--r--opensteak/tools/opensteak/foreman_objects/objects.py136
-rw-r--r--opensteak/tools/opensteak/foreman_objects/operatingsystems.py66
-rw-r--r--opensteak/tools/opensteak/foreman_objects/puppetClasses.py46
-rw-r--r--opensteak/tools/opensteak/foreman_objects/smart_proxies.py36
-rw-r--r--opensteak/tools/opensteak/foreman_objects/subnets.py67
-rw-r--r--opensteak/tools/opensteak/printer.py141
-rw-r--r--opensteak/tools/opensteak/templateparser.py34
-rw-r--r--opensteak/tools/opensteak/virsh.py174
-rw-r--r--opensteak/tools/templates_foreman/install.sh216
-rw-r--r--opensteak/tools/templates_foreman/kvm-config65
-rw-r--r--opensteak/tools/templates_foreman/meta-data12
-rw-r--r--opensteak/tools/templates_foreman/user-data25
238 files changed, 16150 insertions, 2830 deletions
diff --git a/common/docs/user-guide.rst b/common/docs/user-guide.rst
index 8e02224..08b2767 100644
--- a/common/docs/user-guide.rst
+++ b/common/docs/user-guide.rst
@@ -21,14 +21,13 @@ Version history
| **Date** | **Ver.** | **Author** | **Comment** |
| | | | |
+--------------------+--------------------+--------------------+--------------------+
-| 2015-05-28 | 0.0.1 | Christopher Price | Initial version |
+| 2015-06-04 | 1.0.0 | Christopher Price | Initial revision |
| | | (Ericsson AB) | |
+--------------------+--------------------+--------------------+--------------------+
-| 2015-06-02 | 0.0.2 | Christopher Price | Minor Updates |
-| | | (Ericsson AB) | |
+| 2015-06-05 | 1.0.1 | Christopher Price | Corrected links & |
+| | | (Ericsson AB) | e-mail address |
+--------------------+--------------------+--------------------+--------------------+
-
.. contents:: Table of Contents
:backlinks: none
@@ -60,16 +59,16 @@ Hardware Requirements
The Arno release of OPNFV is intended to be run as a baremetal deployment on a "Pharos compliant" lab infrastructure. The Pharos project in OPNFV is a community activity to provide guidance and establish requirements on hardware platforms supporting the Arno virtualisation platform.
-Prior to deploying the OPNFV platform it is important that the hardware infrastructure be configured according to the Pharos specification: http://artifacts.opnfv.org/pharos/docs/spec.html
+Prior to deploying the OPNFV platform it is important that the hardware infrastructure be configured according to the Pharos specification: https://www.opnfv.org/sites/opnfv/files/release/pharos-spec.arno.2015.1.0.pdf
Arno Platform Deployment
------------------------
The Arno platform supports installation and deployment using two deployment tools; a Foreman based deployment toolchain and a Fuel based deployment toolchain.
-In order to deploy the Arno release on a Pharos compliant lab using the Foreman deployment toolchain you should follow in the Foreman installation guide: http://artifacts.opnfv.org/genesis/foreman/docs/installation-instructions.html
+In order to deploy the Arno release on a Pharos compliant lab using the Foreman deployment toolchain you should follow in the Foreman installation guide: https://www.opnfv.org/sites/opnfv/files/release/foreman_install-guide.arno.2015.1.0.pdf
-In order to deploy the Arno release on a Pharos compliant lab using the Fuel deployment toolchain you should follow in the Fuel installation guide: http://artifacts.opnfv.org/genesis/fuel/docs/installation-instructions.html
+In order to deploy the Arno release on a Pharos compliant lab using the Fuel deployment toolchain you should follow in the Fuel installation guide: https://www.opnfv.org/sites/opnfv/files/release/install-guide.arno.2015.1.0.pdf
Enabling or disabling OpenDaylight and the native Neutron driver
----------------------------------------------------------------
@@ -79,7 +78,7 @@ You may find that you wish to adjust the system by enabling or disabling the nat
Deployment Validation
---------------------
-Once installed you should validate the deployment completed successfully by executing the automated basic platform validation routines outlined in the Arno testing documentation: http://artifacts.opnfv.org/functest/docs/functest.html
+Once installed you should validate the deployment completed successfully by executing the automated basic platform validation routines outlined in the Arno testing documentation: https://www.opnfv.org/sites/opnfv/files/release/functest.arno.2015.1.0.pdf
Operating the Arno platform
===========================
@@ -117,7 +116,7 @@ You can engage with the community to help us improve and further develop the OPN
- To access Jira for issue reporting or improvement proposals head to: https://jira.opnfv.org/
- To get started helping out developing the platform head to: https://wiki.opnfv.org/developer
-Alternatively if you are intending to invest your time as a user of the platform you can ask questions and request help from our mailing list at: mailto://support@opnfv.org
+Alternatively if you are intending to invest your time as a user of the platform you can ask questions and request help from our mailing list at: mailto://opnfv-users@lists.opnfv.org
License
=======
@@ -149,7 +148,7 @@ Fuel
`Fuel User Guide <http://docs.fuel-infra.org/openstack/fuel/fuel-6.0/user-guide.html>`_
:Authors: Christopher Price (christopher.price@ericsson.com)
-:Version: 0.0.2
+:Version: 1.0.1
**Documentation tracking**
diff --git a/common/puppet-opnfv/manifests/compute.pp b/common/puppet-opnfv/manifests/compute.pp
index 0b81757..2fed241 100644
--- a/common/puppet-opnfv/manifests/compute.pp
+++ b/common/puppet-opnfv/manifests/compute.pp
@@ -51,11 +51,11 @@ class opnfv::compute {
if !$ceilometer_metering_secret { $ceilometer_metering_secret = $single_password }
##HA Global params
- if $ha_flag {
+ if $ha_flag and str2bool($ha_flag) {
if $private_network == '' { fail('private_network is empty') }
if !$keystone_private_vip { fail('keystone_private_vip is empty') }
if !$glance_private_vip { fail('glance_private_vip is empty') }
- if !$nova_private_vip { fail('nova_private_vip is empty') }
+ if !$nova_public_vip { fail('nova_public_vip is empty') }
if !$nova_db_password { $nova_db_password = $single_password }
if !$nova_user_password { $nova_user_password = $single_password }
if !$controllers_ip_array { fail('controllers_ip_array is empty') }
@@ -78,19 +78,30 @@ class opnfv::compute {
} else {
##non HA params
- if $ovs_tunnel_if == '' { fail('ovs_tunnel_if is empty') }
- if !$private_ip { fail('private_ip is empty') }
- $keystone_private_vip = $private_ip
- $glance_private_vip = $private_ip
- $nova_private_vip = $private_ip
- $neutron_private_vip = $private_ip
- if !$nova_db_password { fail('nova_db_password is empty') }
- if !$nova_user_password { fail('nova_user_password is empty') }
- if !$odl_control_ip { $odl_control_ip = $private_ip }
- if !$mysql_ip { $mysql_ip = $private_ip }
- if !$amqp_ip { $amqp_ip = $private_ip }
- if !$amqp_username { $amqp_username = 'guest' }
- if !$amqp_password { $amqp_password = 'guest' }
+ ##Mandatory
+ if $private_network == '' { fail('private_network is empty') }
+ if ($odl_flag != '') and str2bool($odl_flag) {
+ if $odl_control_ip == '' { fail('odl_control_ip is empty') }
+ }
+ if $controller_ip == '' { fail('controller_ip is empty') }
+
+ ##Optional
+ ##Find private interface
+ $ovs_tunnel_if = get_nic_from_network("$private_network")
+ ##Find private ip
+ $private_ip = get_ip_from_nic("$ovs_tunnel_if")
+
+ $keystone_private_vip = $controller_ip
+ $glance_private_vip = $controller_ip
+ $nova_public_vip = $controller_ip
+ $neutron_private_vip = $controller_ip
+
+ if !$nova_db_password { $nova_db_password = $single_password }
+ if !$nova_user_password { $nova_user_password = $single_password }
+ if !$mysql_ip { $mysql_ip = $controller_ip }
+ if !$amqp_ip { $amqp_ip = $controller_ip }
+ if !$amqp_username { $amqp_username = $single_username }
+ if !$amqp_password { $amqp_password = $single_password }
if !$ceph_mon_host { $ceph_mon_host= ["$private_ip"] }
if !$ceph_mon_initial_members { $ceph_mon_initial_members = ["$::hostname"] }
}
@@ -103,7 +114,7 @@ class opnfv::compute {
libvirt_inject_password => 'false',
libvirt_inject_key => 'false',
libvirt_images_type => 'rbd',
- nova_host => $nova_private_vip,
+ nova_host => $nova_public_vip,
nova_db_password => $nova_db_password,
nova_user_password => $nova_user_password,
private_network => '',
diff --git a/common/puppet-opnfv/manifests/controller_networker.pp b/common/puppet-opnfv/manifests/controller_networker.pp
index 157bc8f..60cae34 100644
--- a/common/puppet-opnfv/manifests/controller_networker.pp
+++ b/common/puppet-opnfv/manifests/controller_networker.pp
@@ -302,6 +302,7 @@ class opnfv::controller_networker {
class { "quickstack::pacemaker::neutron":
agent_type => $this_agent,
enable_tunneling => 'true',
+ external_network_bridge => 'br-ex',
ml2_mechanism_drivers => $ml2_mech_drivers,
ml2_network_vlan_ranges => ["physnet1:10:50"],
odl_controller_ip => $odl_control_ip,
@@ -309,6 +310,18 @@ class opnfv::controller_networker {
ovs_tunnel_iface => $ovs_tunnel_if,
ovs_tunnel_types => ["vxlan"],
verbose => 'true',
+ neutron_conf_additional_params => { default_quota => 'default',
+ quota_network => '50',
+ quota_subnet => '50',
+ quota_port => 'default',
+ quota_security_group => '50',
+ quota_security_group_rule => 'default',
+ quota_vip => 'default',
+ quota_pool => 'default',
+ quota_router => '50',
+ quota_floatingip => '100',
+ network_auto_schedule => 'default',
+ },
}
if ($external_network_flag != '') and str2bool($external_network_flag) {
@@ -316,50 +329,47 @@ class opnfv::controller_networker {
}
} else {
- if $ovs_tunnel_if == '' { fail('ovs_tunnel_if is empty') }
- if $public_ip == '' { fail('public_ip is empty') }
- if $private_ip == '' { fail('private_ip is empty') }
-
- if $odl_control_ip == '' { $odl_control_ip = $private_ip }
-
- if $mysql_ip == '' { fail('mysql_ip is empty') }
- if $mysql_root_password == '' { fail('mysql_root_password is empty') }
- if $amqp_ip == '' { fail('amqp_ip is empty') }
-
- if $memcache_ip == '' { fail('memcache_ip is empty') }
- if $neutron_ip == '' { fail('neutron_ip is empty') }
-
- if $keystone_db_password == '' { fail('keystone_db_password is empty') }
-
- if $horizon_secret_key == '' { fail('horizon_secret_key is empty') }
-
- if $nova_user_password == '' { fail('nova_user_password is empty') }
- if $nova_db_password == '' { fail('nova_db_password is empty') }
-
- if $cinder_user_password == '' { fail('cinder_user_password is empty') }
- if $cinder_db_password == '' { fail('cinder_db_password is empty') }
-
- if $glance_user_password == '' { fail('glance_user_password is empty') }
- if $glance_db_password == '' { fail('glance_db_password is empty') }
-
- if $neutron_user_password == '' { fail('neutron_user_password is empty') }
- if $neutron_db_password == '' { fail('neutron_db_password is empty') }
- if $neutron_metadata_shared_secret == '' { fail('neutron_metadata_shared_secret is empty') }
-
- if $ceilometer_user_password == '' { fail('ceilometer_user_password is empty') }
- if $ceilometer_metering_secret == '' { fail('ceilometer_user_password is empty') }
-
- if $heat_user_password == '' { fail('heat_user_password is empty') }
- if $heat_db_password == '' { fail('heat_db_password is empty') }
- if $heat_auth_encrypt_key == '' { fail('heat_auth_encrypt_key is empty') }
-
- if $swift_user_password == '' { fail('swift_user_password is empty') }
- if $swift_shared_secret == '' { fail('swift_shared_secret is empty') }
- if $swift_admin_password == '' { fail('swift_admin_password is empty') }
+ ##Mandatory Non-HA parameters
+ if $private_network == '' { fail('private_network is empty') }
+ if $public_network == '' { fail('public_network is empty') }
+ ##Optional Non-HA parameters
if !$amqp_username { $amqp_username = $single_username }
if !$amqp_password { $amqp_password = $single_password }
+ if !$mysql_root_password { $mysql_root_password = $single_password }
+ if !$keystone_db_password { $keystone_db_password = $single_password }
+ if !$horizon_secret_key { $horizon_secret_key = $single_password }
+ if !$nova_db_password { $nova_db_password = $single_password }
+ if !$nova_user_password { $nova_user_password = $single_password }
+ if !$cinder_db_password { $cinder_db_password = $single_password }
+ if !$cinder_user_password { $cinder_user_password = $single_password }
+ if !$glance_db_password { $glance_db_password = $single_password }
+ if !$glance_user_password { $glance_user_password = $single_password }
+ if !$neutron_db_password { $neutron_db_password = $single_password }
+ if !$neutron_user_password { $neutron_user_password = $single_password }
+ if !$neutron_metadata_shared_secret { $neutron_metadata_shared_secret = $single_password }
+ if !$ceilometer_user_password { $ceilometer_user_password = $single_password }
+ if !$ceilometer_metering_secret { $ceilometer_metering_secret = $single_password }
+ if !$heat_user_password { $heat_user_password = $single_password }
+ if !$heat_db_password { $heat_db_password = $single_password }
+ if !$heat_auth_encryption_key { $heat_auth_encryption_key = 'octopus1octopus1' }
+ if !$swift_user_password { $swift_user_password = $single_password }
+ if !$swift_shared_secret { $swift_shared_secret = $single_password }
+ if !$swift_admin_password { $swift_admin_password = $single_password }
+ ##Find private interface
+ $ovs_tunnel_if = get_nic_from_network("$private_network")
+ ##Find private ip
+ $private_ip = get_ip_from_nic("$ovs_tunnel_if")
+ #Find public NIC
+ $public_nic = get_nic_from_network("$public_network")
+ $public_ip = get_ip_from_nic("$public_nic")
+
+ if !$mysql_ip { $mysql_ip = $private_ip }
+ if !$amqp_ip { $amqp_ip = $private_ip }
+ if !$memcache_ip { $memcache_ip = $private_ip }
+ if !$neutron_ip { $neutron_ip = $private_ip }
+ if !$odl_control_ip { $odl_control_ip = $private_ip }
class { "quickstack::neutron::controller_networker":
admin_email => $admin_email,
@@ -414,6 +424,8 @@ class opnfv::controller_networker {
horizon_cert => $quickstack::params::horizon_cert,
horizon_key => $quickstack::params::horizon_key,
+ keystonerc => true,
+
ml2_mechanism_drivers => $ml2_mech_drivers,
#neutron => true,
diff --git a/common/puppet-opnfv/manifests/external_net_presetup.pp b/common/puppet-opnfv/manifests/external_net_presetup.pp
index b7c7c5f..96038c0 100644
--- a/common/puppet-opnfv/manifests/external_net_presetup.pp
+++ b/common/puppet-opnfv/manifests/external_net_presetup.pp
@@ -85,7 +85,7 @@ class opnfv::external_net_presetup {
owner => 'root',
group => 'root',
mode => '0644',
- content => template('trystack/br_ex.erb'),
+ content => template('opnfv/br_ex.erb'),
before => Class["quickstack::pacemaker::params"],
}
->
diff --git a/common/puppet-opnfv/manifests/external_net_setup.pp b/common/puppet-opnfv/manifests/external_net_setup.pp
index af00f20..fc014d4 100644
--- a/common/puppet-opnfv/manifests/external_net_setup.pp
+++ b/common/puppet-opnfv/manifests/external_net_setup.pp
@@ -60,7 +60,7 @@ class opnfv::external_net_setup {
provider_network_type => flat,
provider_physical_network => 'physnet1',
router_external => true,
- tenant_name => 'admin',
+ tenant_name => 'services',
}
->
neutron_subnet { 'provider_subnet':
@@ -70,8 +70,9 @@ class opnfv::external_net_setup {
gateway_ip => $public_gateway,
allocation_pools => [ "start=${public_allocation_start},end=${public_allocation_end}" ],
dns_nameservers => $public_dns,
+ enable_dhcp => false,
network_name => 'provider_network',
- tenant_name => 'admin',
+ tenant_name => 'services',
}
->
neutron_router { 'provider_router':
diff --git a/common/puppet-opnfv/manifests/init.pp b/common/puppet-opnfv/manifests/init.pp
index 7b68df5..d26bd7a 100644
--- a/common/puppet-opnfv/manifests/init.pp
+++ b/common/puppet-opnfv/manifests/init.pp
@@ -18,7 +18,6 @@ class opnfv {
include opnfv::resolver
include opnfv::ntp
include opnfv::add_packages
- include opnfv::odl_docker
include opnfv::opncheck
}
diff --git a/common/puppet-opnfv/manifests/odl_docker.pp b/common/puppet-opnfv/manifests/odl_docker.pp
deleted file mode 100644
index 6e70ba0..0000000
--- a/common/puppet-opnfv/manifests/odl_docker.pp
+++ /dev/null
@@ -1,50 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# daniel.smith@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-class opnfv::odl_docker
-{
- case $::fuel_settings['role'] {
- /controller/: {
-
- file { "/opt":
- ensure => "directory",
- }
-
- file { "/opt/opnfv":
- ensure => "directory",
- owner => "root",
- group => "root",
- mode => 777,
- }
-
- file { "/opt/opnfv/odl":
- ensure => "directory",
- }
-
- file { "/opt/opnfv/odl/odl_docker_image.tar":
- ensure => present,
- source => "/etc/puppet/modules/opnfv/odl_docker/odl_docker_image.tar",
- mode => 750,
- }
-
- file { "/opt/opnfv/odl/docker-latest":
- ensure => present,
- source => "/etc/puppet/modules/opnfv/odl_docker/docker-latest",
- mode => 750,
- }
-
- file { "/opt/opnfv/odl/start_odl_conatiner.sh":
- ensure => present,
- source => "/etc/puppet/modules/opnfv/scripts/start_odl_container.sh",
- mode => 750,
- }
- }
- }
-}
-
diff --git a/common/puppet-opnfv/manifests/templates/br_ex.erb b/common/puppet-opnfv/manifests/templates/br_ex.erb
new file mode 100644
index 0000000..6c0e7e7
--- /dev/null
+++ b/common/puppet-opnfv/manifests/templates/br_ex.erb
@@ -0,0 +1,10 @@
+DEVICE=br-ex
+DEVICETYPE=ovs
+IPADDR=<%= @public_nic_ip %>
+NETMASK=<%= @public_nic_netmask %>
+GATEWAY=<%= @public_gateway %>
+BOOTPROTO=static
+ONBOOT=yes
+TYPE=OVSBridge
+PROMISC=yes
+PEERDNS=no
diff --git a/common/puppet-opnfv/manifests/templates/dockerfile/Dockerfile b/common/puppet-opnfv/manifests/templates/dockerfile/Dockerfile
deleted file mode 100644
index 80a92d8..0000000
--- a/common/puppet-opnfv/manifests/templates/dockerfile/Dockerfile
+++ /dev/null
@@ -1,82 +0,0 @@
-####################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# daniel.smith@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-#
-# DOCKERFILE TO CREATE ODL IN CONTAINER AND EXPOSE DLUX AND OVSDB TO ODL
-#
-#############################################################################
-
-
-#Set the base image - note: the current release of Karaf is using Jdk7 and alot of 12.04, so we will use it rather than 14.04 and backport a ton of stuff
-FROM ubuntu:12.04
-
-# Maintainer Info
-MAINTAINER Daniel Smith
-
-
-#Run apt-get update one start just to check for updates when building
-RUN echo "Updating APT"
-RUN apt-get update
-RUN echo "Adding wget"
-RUN apt-get install -y wget
-RUN apt-get install -y net-tools
-RUN apt-get install -y openjdk-7-jre
-RUN apt-get install -y openjdk-7-jdk
-RUN apt-get install -y openssh-server
-RUN apt-get install -y vim
-RUN apt-get install -y expect
-RUN apt-get install -y daemontools
-RUN mkdir -p /opt/odl_source
-RUN bash -c 'echo "export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64" >> ~/.bashrc'
-
-
-
-#Now lets got and fetch the ODL distribution
-RUN echo "Fetching ODL"
-RUN wget https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.3-Helium-SR3/distribution-karaf-0.2.3-Helium-SR3.tar.gz -O /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz
-
-RUN echo "Untarring ODL inplace"
-RUN mkdir -p /opt/odl
-RUN tar zxvf /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz -C /opt/odl
-
-RUN echo "Installing DLUX and other features into ODL"
-#COPY dockerfile/container_scripts/start_odl_docker.sh /etc/init.d/start_odl_docker.sh
-COPY container_scripts/start_odl_docker_container.sh /etc/init.d/
-COPY container_scripts/speak.sh /etc/init.d/
-#COPY dockerfile/container_scripts/speak.sh /etc/init.d/speak.sh
-RUN chmod 777 /etc/init.d/start_odl_docker_container.sh
-RUN chmod 777 /etc/init.d/speak.sh
-
-
-
-# Expose the ports
-
-# PORTS FOR BASE SYSTEM AND DLUX
-EXPOSE 8101
-EXPOSE 6633
-EXPOSE 1099
-EXPOSE 43506
-EXPOSE 8181
-EXPOSE 8185
-EXPOSE 9000
-EXPOSE 39378
-EXPOSE 33714
-EXPOSE 44444
-EXPOSE 6653
-
-# PORTS FOR OVSDB AND ODL CONTROL
-EXPOSE 12001
-EXPOSE 6640
-EXPOSE 8080
-EXPOSE 7800
-EXPOSE 55130
-EXPOSE 52150
-EXPOSE 36826
-
-# set the ENTRYPOINT - An entry point allows us to run this container as an exectuable
-CMD ["/etc/init.d/start_odl_docker_container.sh"]
diff --git a/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/check_feature.sh b/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/check_feature.sh
deleted file mode 100644
index 533942e..0000000
--- a/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/check_feature.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# daniel.smith@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-#!/usr/bin/expect
-spawn /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/client
-expect "root>"
-send "feature:list | grep -i odl-restconf\r"
-send "\r\r\r"
-expect "root>"
-send "logout\r"
-
-
diff --git a/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/speak.sh b/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/speak.sh
deleted file mode 100644
index 95bbaf4..0000000
--- a/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/speak.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/expect
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# daniel.smith@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-#
-# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB
-# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY
-#################################################################################
-
-spawn /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/client
-expect "root>"
-send "feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core"
-send "\r\r\r"
-expect "root>"
-send "logout\r"
diff --git a/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/start_odl_docker_container.sh b/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/start_odl_docker_container.sh
deleted file mode 100644
index 8ae05f7..0000000
--- a/common/puppet-opnfv/manifests/templates/dockerfile/container_scripts/start_odl_docker_container.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/bash
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# daniel.smith@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-#
-# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB
-# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY
-#################################################################################
-# Start up script for calling karaf / ODL inside a docker container.
-#
-# This script will also call a couple expect scripts to load the feature set that we want
-
-
-#ENV
-export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
-
-#MAIN
-echo "Starting up the da Sheilds..."
-/opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/karaf server &
-echo "Sleeping 5 bad hack"
-sleep 10
-echo "should see stuff listening now"
-netstat -na
-echo " should see proess running for karaf"
-ps -efa
-echo " Starting the packages we want"
-/etc/init.d/speak.sh
-echo "Printout the status - if its right, you should see 8181 appear now"
-netstat -na
-ps -efa
-
-
-
-## This is a loop that keeps our container going currently, prinout the "status of karaf" to the docker logs every minute
-## Cheap - but effective
-while true;
-do
- echo "Checking status of ODL:"
- /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/status
- sleep 60
-done
-
-
diff --git a/compass/ci/build.sh b/compass/ci/build.sh
index 4e5b87b..2b7fd9a 100755
--- a/compass/ci/build.sh
+++ b/compass/ci/build.sh
@@ -84,7 +84,7 @@ EOF
# BEGIN of variables to customize
#
BUILD_BASE=$(readlink -e ../build/)
-RESULT_DIR="${BUILD_BASE}/release"
+export RESULT_DIR="${BUILD_BASE}/release"
BUILD_SPEC="${BUILD_BASE}/config.mk"
CACHE_DIR="cache"
LOCAL_CACHE_ARCH_NAME="compass-cache"
@@ -112,11 +112,11 @@ DEBUG=0
INTEGRATION_TEST=0
FULL_INTEGRATION_TEST=0
INTERACTIVE=0
-BUILD_CACHE_URI=
+export BUILD_CACHE_URI=
BUILD_SPEC=
BUILD_DIR=
BUILD_LOG=
-BUILD_VERSION=
+export BUILD_VERSION=
MAKE_ARGS=
#
# END of script assigned variables
@@ -133,6 +133,8 @@ source ${INCLUDE_DIR}/build.sh.debug
############################################################################
# BEGIN of main
#
+build_prepare
+
while getopts "s:c:v:f:l:r:RtTh" OPTION
do
case $OPTION in
@@ -376,15 +378,15 @@ mkdir -p ${BUILD_DIR}
cp ${BUILD_BASE}/.versions ${BUILD_DIR}
cp ${RESULT_DIR}/*.iso* ${BUILD_DIR}
-if [ $POPULATE_CACHE -eq 1 ]; then
- if [ ! -z ${BUILD_CACHE_URI} ]; then
- echo "Building cache ..."
- tar --dereference -C ${BUILD_BASE} -caf ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${CACHE_DIR}
- echo "Uploading cache ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}"
- ${REMOTE_ACCESS_METHD} -T ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}.tgz
- rm ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz
- fi
-fi
+#if [ $POPULATE_CACHE -eq 1 ]; then
+# if [ ! -z ${BUILD_CACHE_URI} ]; then
+# echo "Building cache ..."
+# tar --dereference -C ${BUILD_BASE} -caf ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${CACHE_DIR}
+# echo "Uploading cache ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}"
+# ${REMOTE_ACCESS_METHD} -T ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}.tgz
+# rm ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz
+# fi
+#fi
echo "Success!!!"
exit 0
#
diff --git a/compass/ci/deploy.sh b/compass/ci/deploy.sh
index fe754aa..197bf63 100755
--- a/compass/ci/deploy.sh
+++ b/compass/ci/deploy.sh
@@ -1,5 +1,9 @@
-SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
-CONF_NAME=$1
-source ${SCRIPT_DIR}/../deploy/prepare.sh || exit $?
-source ${SCRIPT_DIR}/../deploy/setup-env.sh || exit $?
-source ${SCRIPT_DIR}/../deploy/deploy-vm.sh || exit $?
+#set -x
+COMPASS_DIR=`cd ${BASH_SOURCE[0]%/*}/../;pwd`
+export COMPASS_DIR
+
+apt-get install screen
+screen -ls |grep deploy|awk -F. '{print $1}'|xargs kill -9
+screen -wipe
+#screen -dmSL deploy bash $COMPASS_DIR/ci/launch.sh $*
+$COMPASS_DIR/ci/launch.sh $*
diff --git a/compass/ci/launch.sh b/compass/ci/launch.sh
new file mode 100755
index 0000000..316b06f
--- /dev/null
+++ b/compass/ci/launch.sh
@@ -0,0 +1,65 @@
+#set -x
+WORK_DIR=$COMPASS_DIR/ci/work
+
+if [[ $# -ge 1 ]];then
+ CONF_NAME=$1
+else
+ CONF_NAME=cluster
+fi
+
+source ${COMPASS_DIR}/ci/log.sh
+source ${COMPASS_DIR}/deploy/conf/${CONF_NAME}.conf
+source ${COMPASS_DIR}/deploy/prepare.sh
+source ${COMPASS_DIR}/deploy/network.sh
+
+if [[ ! -z $VIRT_NUMBER ]];then
+ source ${COMPASS_DIR}/deploy/host_vm.sh
+else
+ source ${COMPASS_DIR}/deploy/host_baremetal.sh
+fi
+
+source ${COMPASS_DIR}/deploy/compass_vm.sh
+source ${COMPASS_DIR}/deploy/deploy_host.sh
+
+######################### main process
+
+if ! prepare_env;then
+ echo "prepare_env failed"
+ exit 1
+fi
+
+log_info "########## get host mac begin #############"
+machines=`get_host_macs`
+if [[ -z $machines ]];then
+ log_error "get_host_macs failed"
+ exit 1
+fi
+
+log_info "deploy host macs: $machines"
+export machines
+
+log_info "########## set up network begin #############"
+if ! create_nets;then
+ log_error "create_nets failed"
+ exit 1
+fi
+
+if ! launch_compass;then
+ log_error "launch_compass failed"
+ exit 1
+fi
+if [[ ! -z $VIRT_NUMBER ]];then
+ if ! launch_host_vms;then
+ log_error "launch_host_vms failed"
+ exit 1
+ fi
+fi
+if ! deploy_host;then
+ #tear_down_machines
+ #tear_down_compass
+ exit 1
+else
+ #tear_down_machines
+ #tear_down_compass
+ exit 0
+fi
diff --git a/compass/ci/log.sh b/compass/ci/log.sh
new file mode 100755
index 0000000..f54fdca
--- /dev/null
+++ b/compass/ci/log.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+reset=`tput sgr0`
+red=`tput setaf 1`
+green=`tput setaf 2`
+yellow=`tput setaf 3`
+
+function log_info() {
+ echo -e "${green}$*${reset}"
+}
+
+function log_warn() {
+ echo -e "${yellow}$*${reset}"
+}
+
+function log_error() {
+ echo -e "${red}$*${reset}"
+}
+
+function log_progress() {
+ echo -en "${yellow}$*\r${reset}"
+}
+
diff --git a/compass/deploy/ansible/openstack_juno/HA-ansible-multinodes.yml b/compass/deploy/ansible/openstack_juno/HA-ansible-multinodes.yml
new file mode 100644
index 0000000..9c1d7e7
--- /dev/null
+++ b/compass/deploy/ansible/openstack_juno/HA-ansible-multinodes.yml
@@ -0,0 +1,42 @@
+---
+- hosts: all
+ remote_user: root
+ sudo: true
+ roles:
+ - repo
+ - common
+
+- hosts: ha
+ remote_user: root
+ sudo: True
+ roles:
+ - ha
+
+- hosts: controller
+ remote_user: root
+ sudo: True
+ roles:
+ - database
+ - mq
+ - keystone
+ - nova-controller
+ - neutron-controller
+ - cinder-controller
+ - glance
+ - neutron-common
+ - neutron-network
+ - dashboard
+
+- hosts: compute
+ remote_user: root
+ sudo: True
+ roles:
+ - nova-compute
+ - neutron-compute
+ - cinder-volume
+
+- hosts: all
+ remote_user: root
+ sudo: True
+ roles:
+ - monitor
diff --git a/compass/deploy/ansible/openstack_juno/allinone.yml b/compass/deploy/ansible/openstack_juno/allinone.yml
new file mode 100644
index 0000000..15220ca
--- /dev/null
+++ b/compass/deploy/ansible/openstack_juno/allinone.yml
@@ -0,0 +1,38 @@
+---
+- hosts: all
+ remote_user: root
+ sudo: true
+ roles:
+ - repo
+
+- hosts: controller
+ sudo: True
+ roles:
+ - common
+ - database
+ - mq
+ - keystone
+ - nova-controller
+ - neutron-controller
+ - dashboard
+ - cinder-controller
+ - glance
+
+- hosts: network
+ sudo: True
+ roles:
+ - common
+ - neutron-network
+
+- hosts: storage
+ sudo: True
+ roles:
+ - common
+ - cinder-volume
+
+- hosts: compute
+ sudo: True
+ roles:
+ - common
+ - nova-compute
+ - neutron-compute
diff --git a/compass/deploy/ansible/openstack_juno/compute.yml b/compass/deploy/ansible/openstack_juno/compute.yml
new file mode 100644
index 0000000..b2679c0
--- /dev/null
+++ b/compass/deploy/ansible/openstack_juno/compute.yml
@@ -0,0 +1,9 @@
+---
+- hosts: all
+ remote_user: vagrant
+ sudo: True
+ roles:
+ - repo
+ - common
+ - nova-compute
+ - neutron-compute
diff --git a/compass/deploy/ansible/openstack_juno/controller.yml b/compass/deploy/ansible/openstack_juno/controller.yml
new file mode 100644
index 0000000..7f4a10e
--- /dev/null
+++ b/compass/deploy/ansible/openstack_juno/controller.yml
@@ -0,0 +1,15 @@
+---
+- hosts: controller
+ remote_user: root
+ sudo: True
+ roles:
+ - repo
+ - common
+ - database
+ - mq
+ - keystone
+ - nova-controller
+ - neutron-controller
+ - dashboard
+ - cinder-controller
+ - glance
diff --git a/compass/deploy/ansible/openstack_juno/group_vars/all b/compass/deploy/ansible/openstack_juno/group_vars/all
new file mode 100644
index 0000000..5643fcd
--- /dev/null
+++ b/compass/deploy/ansible/openstack_juno/group_vars/all
@@ -0,0 +1,54 @@
+controller_host: 10.1.0.11
+network_host: 10.1.0.12
+compute_host: 10.1.0.13
+storage_host: 10.1.0.14
+odl_controller: 10.1.0.15
+
+DEBUG: False
+VERBOSE: False
+NTP_SERVER_LOCAL: controller
+DB_HOST: "{{ controller_host }}"
+MQ_BROKER: rabbitmq
+
+OPENSTACK_REPO: cloudarchive-juno.list
+ADMIN_TOKEN: admin
+CEILOMETER_TOKEN: c095d479023a0fd58a54
+RABBIT_PASS: guest
+KEYSTONE_DBPASS: keystone_db_secret
+DEMO_PASS: demo_secret
+ADMIN_PASS: admin_secret
+GLANCE_DBPASS: glance_db_secret
+GLANCE_PASS: glance_secret
+NOVA_DBPASS: nova_db_secret
+NOVA_PASS: nova_secret
+DASH_DBPASS: dash_db_secret
+CINDER_DBPASS: cinder_db_secret
+CINDER_PASS: cinder_secret
+NEUTRON_DBPASS: neutron_db_secret
+NEUTRON_PASS: netron_secret
+NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan']
+NEUTRON_TENANT_NETWORK_TYPES: ['vxlan']
+#NEUTRON_MECHANISM_DRIVERS: ['opendaylight']
+NEUTRON_MECHANISM_DRIVERS: ['openvswitch']
+NEUTRON_TUNNEL_TYPES: ['vxlan']
+METADATA_SECRET: metadata_secret
+INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS: 10.1.1.21
+INTERFACE_NAME: eth2
+
+EXTERNAL_NETWORK_CIDR: 203.0.113.0/24
+EXTERNAL_NETWORK_GATEWAY: 203.0.113.1
+FLOATING_IP_START: 203.0.113.101
+FLOATING_IP_END: 203.0.113.200
+
+juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main
+build_in_image: http://cdn.download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
+build_in_image_name: cirros-0.3.3-x86_64-disk.img
+
+physical_device: /dev/sdb
+
+internal_interface: ansible_eth1
+internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}"
+
+odl_username: admin
+odl_password: admin
+odl_api_port: 8080
diff --git a/compass/deploy/ansible/openstack_juno/multinodes.yml b/compass/deploy/ansible/openstack_juno/multinodes.yml
new file mode 100644
index 0000000..ffd29d5
--- /dev/null
+++ b/compass/deploy/ansible/openstack_juno/multinodes.yml
@@ -0,0 +1,75 @@
+---
+- hosts: all
+ remote_user: root
+ sudo: true
+ roles:
+ - repo
+
+- hosts: database
+ sudo: True
+ roles:
+ - common
+ - database
+
+- hosts: messaging
+ sudo: True
+ roles:
+ - common
+ - mq
+
+- hosts: identity
+ sudo: True
+ roles:
+ - common
+ - keystone
+
+- hosts: compute-controller
+ sudo: True
+ roles:
+ - common
+ - nova-controller
+
+- hosts: network-server
+ sudo: True
+ roles:
+ - common
+ - neutron-controller
+
+- hosts: storage-controller
+ sudo: True
+ roles:
+ - common
+ - cinder-controller
+
+- hosts: image
+ sudo: True
+ roles:
+ - common
+ - glance
+
+- hosts: dashboard
+ sudo: True
+ roles:
+ - common
+ - dashboard
+
+- hosts: network-worker
+ sudo: True
+ roles:
+ - common
+ - neutron-network
+
+- hosts: storage-volume
+ sudo: True
+ roles:
+ - common
+ - cinder-volume
+
+- hosts: compute-worker
+ sudo: True
+ roles:
+ - common
+ - nova-compute
+ - neutron-compute
+
+
diff --git a/compass/deploy/ansible/openstack_juno/network.yml b/compass/deploy/ansible/openstack_juno/network.yml
new file mode 100644
index 0000000..558f317
--- /dev/null
+++ b/compass/deploy/ansible/openstack_juno/network.yml
@@ -0,0 +1,8 @@
+---
+- hosts: all
+ remote_user: vagrant
+ sudo: True
+ roles:
+ - repo
+ - common
+ - neutron-network
diff --git a/compass/deploy/ansible/openstack_juno/single-controller.yml b/compass/deploy/ansible/openstack_juno/single-controller.yml
new file mode 100644
index 0000000..15220ca
--- /dev/null
+++ b/compass/deploy/ansible/openstack_juno/single-controller.yml
@@ -0,0 +1,38 @@
+---
+- hosts: all
+ remote_user: root
+ sudo: true
+ roles:
+ - repo
+
+- hosts: controller
+ sudo: True
+ roles:
+ - common
+ - database
+ - mq
+ - keystone
+ - nova-controller
+ - neutron-controller
+ - dashboard
+ - cinder-controller
+ - glance
+
+- hosts: network
+ sudo: True
+ roles:
+ - common
+ - neutron-network
+
+- hosts: storage
+ sudo: True
+ roles:
+ - common
+ - cinder-volume
+
+- hosts: compute
+ sudo: True
+ roles:
+ - common
+ - nova-compute
+ - neutron-compute
diff --git a/compass/deploy/ansible/openstack_juno/storage.yml b/compass/deploy/ansible/openstack_juno/storage.yml
new file mode 100644
index 0000000..3c0aa41
--- /dev/null
+++ b/compass/deploy/ansible/openstack_juno/storage.yml
@@ -0,0 +1,8 @@
+---
+- hosts: all
+ remote_user: vagrant
+ sudo: True
+ roles:
+ - repo
+ - common
+ - cinder-volume
diff --git a/compass/deploy/ansible/roles/cinder-controller/handlers/main.yml b/compass/deploy/ansible/roles/cinder-controller/handlers/main.yml
new file mode 100644
index 0000000..ef671dd
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-controller/handlers/main.yml
@@ -0,0 +1,6 @@
+---
+- name: restart cinder-scheduler
+ service: name=cinder-scheduler state=restarted enabled=yes
+- name: restart cinder-api
+ service: name=cinder-api state=restarted enabled=yes
+
diff --git a/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_config.yml b/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_config.yml
new file mode 100644
index 0000000..7796cf7
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_config.yml
@@ -0,0 +1,20 @@
+---
+- name: sync cinder db
+ shell: su -s /bin/sh -c "cinder-manage db sync" cinder && cinder
+ register: result
+ until: result.rc == 0
+ retries: 5
+ delay: 3
+ notify:
+ - restart cinder-scheduler
+ - restart cinder-api
+
+- meta: flush_handlers
+
+- name: upload cinder keystone register script
+ template: src=cinder_init.sh dest=/opt/cinder_init.sh mode=0744
+
+- name: run cinder register script
+ shell: for i in {0..5}; do /opt/cinder_init.sh && touch cinder_init_complete; if [ $? != 0 ]; then sleep 5; else break; fi; done
+ args:
+ creates: cinder_init_complete
diff --git a/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_install.yml b/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_install.yml
new file mode 100644
index 0000000..03ad432
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_install.yml
@@ -0,0 +1,20 @@
+---
+- name: install cinder packages
+ apt: name={{ item }} state=present force=yes
+ with_items:
+ - cinder-api
+ - cinder-scheduler
+ - python-cinderclient
+
+- name: generate cinder service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - cinder-api
+ - cinder-scheduler
+
+- name: upload cinder conf
+ template: src=cinder.conf dest=/etc/cinder/cinder.conf
+ notify:
+ - restart cinder-scheduler
+ - restart cinder-api
+
diff --git a/compass/deploy/ansible/roles/cinder-controller/tasks/main.yml b/compass/deploy/ansible/roles/cinder-controller/tasks/main.yml
new file mode 100644
index 0000000..1dbe91f
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-controller/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+- include: cinder_install.yml
+ tags:
+ - install
+ - cinder-install
+ - cinder
+
+- include: cinder_config.yml
+ when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == ''
+ tags:
+ - config
+ - cinder-config
+ - cinder
diff --git a/compass/deploy/ansible/roles/cinder-controller/templates/api-paste.ini b/compass/deploy/ansible/roles/cinder-controller/templates/api-paste.ini
new file mode 100644
index 0000000..b568a17
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-controller/templates/api-paste.ini
@@ -0,0 +1,71 @@
+#############
+# OpenStack #
+#############
+
+[composite:osapi_volume]
+use = call:cinder.api:root_app_factory
+/: apiversions
+/v1: openstack_volume_api_v1
+/v2: openstack_volume_api_v2
+
+[composite:openstack_volume_api_v1]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv1
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
+
+[composite:openstack_volume_api_v2]
+use = call:cinder.api.middleware.auth:pipeline_factory
+noauth = request_id faultwrap sizelimit osprofiler noauth apiv2
+keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
+
+[filter:request_id]
+paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMiddleware.factory
+
+[filter:faultwrap]
+paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
+hmac_keys = SECRET_KEY
+enabled = yes
+
+[filter:noauth]
+paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
+
+[filter:sizelimit]
+paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
+
+[app:apiv1]
+paste.app_factory = cinder.api.v1.router:APIRouter.factory
+
+[app:apiv2]
+paste.app_factory = cinder.api.v2.router:APIRouter.factory
+
+[pipeline:apiversions]
+pipeline = faultwrap osvolumeversionapp
+
+[app:osvolumeversionapp]
+paste.app_factory = cinder.api.versions:Versions.factory
+
+[filter:authtoken]
+paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
+# auth_host = 127.0.0.1
+# auth_port = 35357
+# auth_protocol = http
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = cinder
+admin_password = {{ CINDER_PASS }}
+
+##########
+# Shared #
+##########
+
+[filter:keystonecontext]
+paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
diff --git a/compass/deploy/ansible/roles/cinder-controller/templates/cinder.conf b/compass/deploy/ansible/roles/cinder-controller/templates/cinder.conf
new file mode 100644
index 0000000..e34fd2f
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-controller/templates/cinder.conf
@@ -0,0 +1,63 @@
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+volume_group = cinder-volumes
+verbose = {{ VERBOSE }}
+debug = {{ DEBUG }}
+auth_strategy = keystone
+state_path = /var/lib/cinder
+lock_path = /var/lock/cinder
+notification_driver=cinder.openstack.common.notifier.rpc_notifier
+volumes_dir = /var/lib/cinder/volumes
+
+log_file=/var/log/cinder/cinder.log
+
+control_exchange = cinder
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_port = 5672
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+my_ip = {{ storage_controller_host }}
+
+glance_host = {{ HA_VIP }}
+glance_port = 9292
+api_rate_limit = False
+storage_availability_zone = nova
+
+quota_volumes = 10
+quota_gigabytes=1000
+quota_driver=cinder.quota.DbQuotaDriver
+
+osapi_volume_listen = {{ storage_controller_host }}
+osapi_volume_listen_port = 8776
+
+db_backend = sqlalchemy
+volume_name_template = volume-%s
+snapshot_name_template = snapshot-%s
+
+max_gigabytes=10000
+volume_group=cinder-volumes
+
+volume_clear=zero
+volume_clear_size=10
+
+iscsi_ip_address={{ storage_controller_host }}
+iscsi_port=3260
+iscsi_helper=tgtadm
+
+volumes_dir=/var/lib/cinder/volumes
+
+volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = cinder
+admin_password = {{ CINDER_PASS }}
+
+[database]
+connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
diff --git a/compass/deploy/ansible/roles/cinder-controller/templates/cinder_init.sh b/compass/deploy/ansible/roles/cinder-controller/templates/cinder_init.sh
new file mode 100644
index 0000000..0ec61b6
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-controller/templates/cinder_init.sh
@@ -0,0 +1,6 @@
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=cinder --pass={{ CINDER_PASS }} --email=cinder@example.com
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=cinder --tenant=service --role=admin
+
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=cinder --type=volume --description="OpenStack Block Storage"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ volume / {print $2}') --publicurl=http://{{ HA_VIP }}:8776/v1/%\(tenant_id\)s --internalurl=http://{{ HA_VIP }}:8776/v1/%\(tenant_id\)s --adminurl=http://{{ HA_VIP }}:8776/v1/%\(tenant_id\)s
+
diff --git a/compass/deploy/ansible/roles/cinder-volume/files/loop.yml b/compass/deploy/ansible/roles/cinder-volume/files/loop.yml
new file mode 100644
index 0000000..e872652
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-volume/files/loop.yml
@@ -0,0 +1 @@
+physical_device: /dev/loop0
diff --git a/compass/deploy/ansible/roles/cinder-volume/handlers/main.yml b/compass/deploy/ansible/roles/cinder-volume/handlers/main.yml
new file mode 100644
index 0000000..ad917ce
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-volume/handlers/main.yml
@@ -0,0 +1,3 @@
+---
+- name: restart cinder-volume
+ service: name=cinder-volume state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/cinder-volume/tasks/main.yml b/compass/deploy/ansible/roles/cinder-volume/tasks/main.yml
new file mode 100644
index 0000000..8c0e626
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-volume/tasks/main.yml
@@ -0,0 +1,55 @@
+---
+- name: install cinder-volume and lvm2 packages
+ apt: name={{ item }} state=present force=yes
+ with_items:
+ - cinder-volume
+ - lvm2
+
+- name: generate cinder volume service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - cinder-volume
+
+- name: check if physical device exists
+ stat: path={{ physical_device }}
+ register: st
+
+- name: repace physical_device if st is false
+ local_action: copy src=loop.yml dest=/tmp/loop.yml
+ when: st.stat.exists == False
+
+- name: load loop.yml
+ include_vars: /tmp/loop.yml
+ when: st.stat.exists == False
+
+- name: check if cinder-volumes is mounted
+ shell: ls /mnt
+ register: cindervolumes
+
+- name: get available partition size
+ shell: df / | awk '$3 ~ /[0-9]+/ { print $4 }'
+ register: partition_size
+
+- name: if not mounted, mount it
+ shell: dd if=/dev/zero of=/mnt/cinder-volumes
+ bs=1 count=0 seek={{ partition_size.stdout }}
+ when: cindervolumes.stdout != 'cinder-volumes'
+
+- name: get first lo device
+ shell: ls /dev/loop* | egrep 'loop[0-9]+'|sed -n 1p
+ register: first_lo
+ when: cindervolumes.stdout != 'cinder-volumes'
+
+- name: do a losetup on /mnt/cinder-volumes
+ shell: losetup {{ first_lo.stdout }} /mnt/cinder-volumes
+ when: cindervolumes.stdout != 'cinder-volumes'
+
+- name: create physical and group volumes
+ lvg: vg=cinder-volumes pvs={{ physical_device }}
+ vg_options=--force
+
+- name: upload cinder-volume configuration
+ template: src=cinder.conf dest=/etc/cinder/cinder.conf
+ backup=yes
+ notify:
+ - restart cinder-volume
diff --git a/compass/deploy/ansible/roles/cinder-volume/templates/cinder.conf b/compass/deploy/ansible/roles/cinder-volume/templates/cinder.conf
new file mode 100644
index 0000000..aa3b8cc
--- /dev/null
+++ b/compass/deploy/ansible/roles/cinder-volume/templates/cinder.conf
@@ -0,0 +1,62 @@
+[DEFAULT]
+rootwrap_config = /etc/cinder/rootwrap.conf
+api_paste_confg = /etc/cinder/api-paste.ini
+iscsi_helper = tgtadm
+volume_name_template = volume-%s
+volume_group = cinder-volumes
+verbose = True
+auth_strategy = keystone
+state_path = /var/lib/cinder
+lock_path = /var/lock/cinder
+notification_driver=cinder.openstack.common.notifier.rpc_notifier
+volumes_dir = /var/lib/cinder/volumes
+
+log_file=/var/log/cinder/cinder.log
+
+control_exchange = cinder
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_port = 5672
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+my_ip = {{ storage_controller_host }}
+
+glance_host = {{ HA_VIP }}
+glance_port = 9292
+api_rate_limit = False
+storage_availability_zone = nova
+
+quota_volumes = 10
+quota_gigabytes=1000
+quota_driver=cinder.quota.DbQuotaDriver
+
+osapi_volume_listen = {{ storage_controller_host }}
+osapi_volume_listen_port = 8776
+
+db_backend = sqlalchemy
+volume_name_template = volume-%s
+snapshot_name_template = snapshot-%s
+
+max_gigabytes=10000
+volume_group=cinder-volumes
+
+volume_clear=zero
+volume_clear_size=10
+
+iscsi_ip_address={{ storage_controller_host }}
+iscsi_port=3260
+iscsi_helper=tgtadm
+
+volumes_dir=/var/lib/cinder/volumes
+
+volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = cinder
+admin_password = {{ CINDER_PASS }}
+
+[database]
+connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
diff --git a/compass/deploy/ansible/roles/common/files/sources.list.d/cloudarchive-juno.list b/compass/deploy/ansible/roles/common/files/sources.list.d/cloudarchive-juno.list
new file mode 100644
index 0000000..920f3d2
--- /dev/null
+++ b/compass/deploy/ansible/roles/common/files/sources.list.d/cloudarchive-juno.list
@@ -0,0 +1 @@
+deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main
diff --git a/compass/deploy/ansible/roles/common/tasks/main.yml b/compass/deploy/ansible/roles/common/tasks/main.yml
new file mode 100644
index 0000000..ce595f5
--- /dev/null
+++ b/compass/deploy/ansible/roles/common/tasks/main.yml
@@ -0,0 +1,28 @@
+---
+- name: install ubuntu-cloud-keyring(ubuntu)
+ apt: name={{ item }} state=latest
+ with_items:
+ - ubuntu-cloud-keyring
+
+- name: update hosts files to all hosts
+ template: src=hosts
+ dest=/etc/hosts
+ backup=yes
+
+- name: install common packages
+ apt: name={{ item }} state=latest
+ with_items:
+ - python-pip
+ - python-dev
+ - python-mysqldb
+ - ntp
+
+- name: restart ntp
+ command: su -s /bin/sh -c "service ntp stop; ntpd -gq; hwclock --systohc"
+ ignore_errors: True
+
+- name: update ntp conf
+ template: src=ntp.conf dest=/etc/ntp.conf backup=yes
+
+- name: restart ntp
+ service: name=ntp state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/common/templates/hosts b/compass/deploy/ansible/roles/common/templates/hosts
new file mode 100644
index 0000000..9d27c0a
--- /dev/null
+++ b/compass/deploy/ansible/roles/common/templates/hosts
@@ -0,0 +1,22 @@
+# compute-controller
+10.145.89.136 host-136
+# database
+10.145.89.136 host-136
+# messaging
+10.145.89.136 host-136
+# storage-controller
+10.145.89.138 host-138
+# image
+10.145.89.138 host-138
+# identity
+10.145.89.136 host-136
+# network-server
+10.145.89.138 host-138
+# dashboard
+10.145.89.136 host-136
+# storage-volume
+10.145.89.139 host-139
+# network-worker
+10.145.89.139 host-139
+# compute-worker
+10.145.89.137 host-137
diff --git a/compass/deploy/ansible/roles/common/templates/ntp.conf b/compass/deploy/ansible/roles/common/templates/ntp.conf
new file mode 100644
index 0000000..c613809
--- /dev/null
+++ b/compass/deploy/ansible/roles/common/templates/ntp.conf
@@ -0,0 +1,56 @@
+# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help
+
+driftfile /var/lib/ntp/ntp.drift
+
+
+# Enable this if you want statistics to be logged.
+#statsdir /var/log/ntpstats/
+
+statistics loopstats peerstats clockstats
+filegen loopstats file loopstats type day enable
+filegen peerstats file peerstats type day enable
+filegen clockstats file clockstats type day enable
+
+# Specify one or more NTP servers.
+
+# Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board
+# on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for
+# more information.
+server {{ NTP_SERVER_LOCAL }}
+server 0.ubuntu.pool.ntp.org
+server 1.ubuntu.pool.ntp.org
+server 2.ubuntu.pool.ntp.org
+server 3.ubuntu.pool.ntp.org
+
+# Use Ubuntu's ntp server as a fallback.
+server ntp.ubuntu.com
+
+# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for
+# details. The web page <http://support.ntp.org/bin/view/Support/AccessRestrictions>
+# might also be helpful.
+#
+# Note that "restrict" applies to both servers and clients, so a configuration
+# that might be intended to block requests from certain clients could also end
+# up blocking replies from your own upstream servers.
+
+# By default, exchange time with everybody, but don't allow configuration.
+restrict -4 default kod notrap nomodify nopeer noquery
+restrict -6 default kod notrap nomodify nopeer noquery
+
+# Local users may interrogate the ntp server more closely.
+restrict 127.0.0.1
+restrict ::1
+
+# Clients from this (example!) subnet have unlimited access, but only if
+# cryptographically authenticated.
+#restrict 192.168.123.0 mask 255.255.255.0 notrust
+
+
+# If you want to provide time to your local subnet, change the next line.
+# (Again, the address is an example only.)
+#broadcast 192.168.123.255
+
+# If you want to listen to time broadcasts on your local subnet, de-comment the
+# next lines. Please do this only if you trust everybody on the network!
+#disable auth
+#broadcastclient
diff --git a/compass/deploy/ansible/roles/dashboard/tasks/main.yml b/compass/deploy/ansible/roles/dashboard/tasks/main.yml
new file mode 100644
index 0000000..465b996
--- /dev/null
+++ b/compass/deploy/ansible/roles/dashboard/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+- name: install dashboard packages
+ apt: name={{ item }} state=present force=yes
+ with_items:
+ - apache2
+ - memcached
+ - libapache2-mod-wsgi
+ - openstack-dashboard
+
+- name: remove ubuntu theme
+ apt: name=openstack-dashboard-ubuntu-theme
+ state=absent
+
+## horizon configuration is already enabled in apache2/conf-enabled
+## by openstack-dashboard package deploy script.
+#- name: update dashboard conf
+# template: src=openstack-dashboard.conf
+# dest=/etc/apache2/sites-available/openstack-dashboard.conf
+# backup=yes
+
+- name: update horizon settings
+ template: src=local_settings.py
+ dest=/etc/openstack-dashboard/local_settings.py
+ backup=yes
+
+- name: restart apache2
+ service: name=apache2 state=restarted enabled=yes
+
+- name: restart memcached
+ service: name=memcached state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/dashboard/templates/local_settings.py b/compass/deploy/ansible/roles/dashboard/templates/local_settings.py
new file mode 100644
index 0000000..87e06e3
--- /dev/null
+++ b/compass/deploy/ansible/roles/dashboard/templates/local_settings.py
@@ -0,0 +1,511 @@
+import os
+
+from django.utils.translation import ugettext_lazy as _
+
+from openstack_dashboard import exceptions
+
+DEBUG = True
+TEMPLATE_DEBUG = DEBUG
+
+# Required for Django 1.5.
+# If horizon is running in production (DEBUG is False), set this
+# with the list of host/domain names that the application can serve.
+# For more information see:
+# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
+#ALLOWED_HOSTS = ['horizon.example.com', ]
+
+# Set SSL proxy settings:
+# For Django 1.4+ pass this header from the proxy after terminating the SSL,
+# and don't forget to strip it from the client's request.
+# For more information see:
+# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header
+# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
+
+# If Horizon is being served through SSL, then uncomment the following two
+# settings to better secure the cookies from security exploits
+#CSRF_COOKIE_SECURE = True
+#SESSION_COOKIE_SECURE = True
+
+# Overrides for OpenStack API versions. Use this setting to force the
+# OpenStack dashboard to use a specific API version for a given service API.
+# NOTE: The version should be formatted as it appears in the URL for the
+# service API. For example, The identity service APIs have inconsistent
+# use of the decimal point, so valid options would be "2.0" or "3".
+# OPENSTACK_API_VERSIONS = {
+# "identity": 3,
+# "volume": 2
+# }
+
+# Set this to True if running on multi-domain model. When this is enabled, it
+# will require user to enter the Domain name in addition to username for login.
+# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
+
+# Overrides the default domain used when running on single-domain model
+# with Keystone V3. All entities will be created in the default domain.
+# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
+
+# Set Console type:
+# valid options would be "AUTO", "VNC", "SPICE" or "RDP"
+# CONSOLE_TYPE = "AUTO"
+
+# Default OpenStack Dashboard configuration.
+HORIZON_CONFIG = {
+ 'dashboards': ('project', 'admin', 'settings',),
+ 'default_dashboard': 'project',
+ 'user_home': 'openstack_dashboard.views.get_user_home',
+ 'ajax_queue_limit': 10,
+ 'auto_fade_alerts': {
+ 'delay': 3000,
+ 'fade_duration': 1500,
+ 'types': ['alert-success', 'alert-info']
+ },
+ 'help_url': "http://docs.openstack.org",
+ 'exceptions': {'recoverable': exceptions.RECOVERABLE,
+ 'not_found': exceptions.NOT_FOUND,
+ 'unauthorized': exceptions.UNAUTHORIZED},
+}
+
+# Specify a regular expression to validate user passwords.
+# HORIZON_CONFIG["password_validator"] = {
+# "regex": '.*',
+# "help_text": _("Your password does not meet the requirements.")
+# }
+
+# Disable simplified floating IP address management for deployments with
+# multiple floating IP pools or complex network requirements.
+# HORIZON_CONFIG["simple_ip_management"] = False
+
+# Turn off browser autocompletion for the login form if so desired.
+# HORIZON_CONFIG["password_autocomplete"] = "off"
+
+LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
+
+# Set custom secret key:
+# You can either set it to a specific value or you can let horizion generate a
+# default secret key that is unique on this machine, e.i. regardless of the
+# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there
+# may be situations where you would want to set this explicitly, e.g. when
+# multiple dashboard instances are distributed on different machines (usually
+# behind a load-balancer). Either you have to make sure that a session gets all
+# requests routed to the same dashboard instance or you set the same SECRET_KEY
+# for all of them.
+from horizon.utils import secret_key
+SECRET_KEY = 'AJDSKLAJDKASJDKASJDKSAJDKSJAKDSA'
+# We recommend you use memcached for development; otherwise after every reload
+# of the django development server, you will have to login again. To use
+# memcached set CACHES to something like
+CACHES = {
+ 'default': {
+ 'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache',
+ 'LOCATION' : '127.0.0.1:11211',
+ }
+}
+
+#CACHES = {
+# 'default': {
+# 'BACKEND' : 'django.core.cache.backends.locmem.LocMemCache'
+# }
+#}
+
+# Enable the Ubuntu theme if it is present.
+try:
+ from ubuntu_theme import *
+except ImportError:
+ pass
+
+# Default Ubuntu apache configuration uses /horizon as the application root.
+# Configure auth redirects here accordingly.
+LOGIN_URL='/horizon/auth/login/'
+LOGOUT_URL='/horizon/auth/logout/'
+LOGIN_REDIRECT_URL='/horizon'
+
+# The Ubuntu package includes pre-compressed JS and compiled CSS to allow
+# offline compression by default. To enable online compression, install
+# the node-less package and enable the following option.
+COMPRESS_OFFLINE = True
+
+# By default, validation of the HTTP Host header is disabled. Production
+# installations should have this set accordingly. For more information
+# see https://docs.djangoproject.com/en/dev/ref/settings/.
+ALLOWED_HOSTS = ['{{ dashboard_host }}']
+
+# Send email to the console by default
+EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
+# Or send them to /dev/null
+#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
+
+# Configure these for your outgoing email host
+# EMAIL_HOST = 'smtp.my-company.com'
+# EMAIL_PORT = 25
+# EMAIL_HOST_USER = 'djangomail'
+# EMAIL_HOST_PASSWORD = 'top-secret!'
+
+# For multiple regions uncomment this configuration, and add (endpoint, title).
+# AVAILABLE_REGIONS = [
+# ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
+# ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
+# ]
+
+OPENSTACK_HOST = "{{ HA_VIP }}"
+OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
+OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
+
+# Disable SSL certificate checks (useful for self-signed certificates):
+# OPENSTACK_SSL_NO_VERIFY = True
+
+# The CA certificate to use to verify SSL connections
+# OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'
+
+# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
+# capabilities of the auth backend for Keystone.
+# If Keystone has been configured to use LDAP as the auth backend then set
+# can_edit_user to False and name to 'ldap'.
+#
+# TODO(tres): Remove these once Keystone has an API to identify auth backend.
+OPENSTACK_KEYSTONE_BACKEND = {
+ 'name': 'native',
+ 'can_edit_user': True,
+ 'can_edit_group': True,
+ 'can_edit_project': True,
+ 'can_edit_domain': True,
+ 'can_edit_role': True
+}
+
+#Setting this to True, will add a new "Retrieve Password" action on instance,
+#allowing Admin session password retrieval/decryption.
+#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False
+
+# The Xen Hypervisor has the ability to set the mount point for volumes
+# attached to instances (other Hypervisors currently do not). Setting
+# can_set_mount_point to True will add the option to set the mount point
+# from the UI.
+OPENSTACK_HYPERVISOR_FEATURES = {
+ 'can_set_mount_point': False,
+ 'can_set_password': False,
+}
+
+# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
+# services provided by neutron. Options currently available are load
+# balancer service, security groups, quotas, VPN service.
+OPENSTACK_NEUTRON_NETWORK = {
+ 'enable_lb': False,
+ 'enable_firewall': False,
+ 'enable_quotas': True,
+ 'enable_vpn': False,
+ # The profile_support option is used to detect if an external router can be
+ # configured via the dashboard. When using specific plugins the
+ # profile_support can be turned on if needed.
+ 'profile_support': None,
+ #'profile_support': 'cisco',
+}
+
+# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
+# in the OpenStack Dashboard related to the Image service, such as the list
+# of supported image formats.
+# OPENSTACK_IMAGE_BACKEND = {
+# 'image_formats': [
+# ('', ''),
+# ('aki', _('AKI - Amazon Kernel Image')),
+# ('ami', _('AMI - Amazon Machine Image')),
+# ('ari', _('ARI - Amazon Ramdisk Image')),
+# ('iso', _('ISO - Optical Disk Image')),
+# ('qcow2', _('QCOW2 - QEMU Emulator')),
+# ('raw', _('Raw')),
+# ('vdi', _('VDI')),
+# ('vhd', _('VHD')),
+# ('vmdk', _('VMDK'))
+# ]
+# }
+
+# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for
+# image custom property attributes that appear on image detail pages.
+IMAGE_CUSTOM_PROPERTY_TITLES = {
+ "architecture": _("Architecture"),
+ "kernel_id": _("Kernel ID"),
+ "ramdisk_id": _("Ramdisk ID"),
+ "image_state": _("Euca2ools state"),
+ "project_id": _("Project ID"),
+ "image_type": _("Image Type")
+}
+
+# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
+# in the Keystone service catalog. Use this setting when Horizon is running
+# external to the OpenStack environment. The default is 'publicURL'.
+#OPENSTACK_ENDPOINT_TYPE = "publicURL"
+
+# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the
+# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints
+# in the Keystone service catalog. Use this setting when Horizon is running
+# external to the OpenStack environment. The default is None. This
+# value should differ from OPENSTACK_ENDPOINT_TYPE if used.
+#SECONDARY_ENDPOINT_TYPE = "publicURL"
+
+# The number of objects (Swift containers/objects or images) to display
+# on a single page before providing a paging element (a "more" link)
+# to paginate results.
+API_RESULT_LIMIT = 1000
+API_RESULT_PAGE_SIZE = 20
+
+# The timezone of the server. This should correspond with the timezone
+# of your entire OpenStack installation, and hopefully be in UTC.
+TIME_ZONE = "UTC"
+
+# When launching an instance, the menu of available flavors is
+# sorted by RAM usage, ascending. If you would like a different sort order,
+# you can provide another flavor attribute as sorting key. Alternatively, you
+# can provide a custom callback method to use for sorting. You can also provide
+# a flag for reverse sort. For more info, see
+# http://docs.python.org/2/library/functions.html#sorted
+# CREATE_INSTANCE_FLAVOR_SORT = {
+# 'key': 'name',
+# # or
+# 'key': my_awesome_callback_method,
+# 'reverse': False,
+# }
+
+# The Horizon Policy Enforcement engine uses these values to load per service
+# policy rule files. The content of these files should match the files the
+# OpenStack services are using to determine role based access control in the
+# target installation.
+
+# Path to directory containing policy.json files
+#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
+# Map of local copy of service policy files
+#POLICY_FILES = {
+# 'identity': 'keystone_policy.json',
+# 'compute': 'nova_policy.json',
+# 'volume': 'cinder_policy.json',
+# 'image': 'glance_policy.json',
+#}
+
+# Trove user and database extension support. By default support for
+# creating users and databases on database instances is turned on.
+# To disable these extensions set the permission here to something
+# unusable such as ["!"].
+# TROVE_ADD_USER_PERMS = []
+# TROVE_ADD_DATABASE_PERMS = []
+
+LOGGING = {
+ 'version': 1,
+ # When set to True this will disable all logging except
+ # for loggers specified in this configuration dictionary. Note that
+ # if nothing is specified here and disable_existing_loggers is True,
+ # django.db.backends will still log unless it is disabled explicitly.
+ 'disable_existing_loggers': False,
+ 'handlers': {
+ 'null': {
+ 'level': 'DEBUG',
+ 'class': 'django.utils.log.NullHandler',
+ },
+ 'console': {
+ # Set the level to "DEBUG" for verbose output logging.
+ 'level': 'INFO',
+ 'class': 'logging.StreamHandler',
+ },
+ },
+ 'loggers': {
+ # Logging from django.db.backends is VERY verbose, send to null
+ # by default.
+ 'django.db.backends': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ 'requests': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ 'horizon': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'openstack_dashboard': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'novaclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'cinderclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'keystoneclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'glanceclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'neutronclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'heatclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'ceilometerclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'troveclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'swiftclient': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'openstack_auth': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'nose.plugins.manager': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'django': {
+ 'handlers': ['console'],
+ 'level': 'DEBUG',
+ 'propagate': False,
+ },
+ 'iso8601': {
+ 'handlers': ['null'],
+ 'propagate': False,
+ },
+ }
+}
+
+# 'direction' should not be specified for all_tcp/udp/icmp.
+# It is specified in the form.
+SECURITY_GROUP_RULES = {
+ 'all_tcp': {
+ 'name': 'ALL TCP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '1',
+ 'to_port': '65535',
+ },
+ 'all_udp': {
+ 'name': 'ALL UDP',
+ 'ip_protocol': 'udp',
+ 'from_port': '1',
+ 'to_port': '65535',
+ },
+ 'all_icmp': {
+ 'name': 'ALL ICMP',
+ 'ip_protocol': 'icmp',
+ 'from_port': '-1',
+ 'to_port': '-1',
+ },
+ 'ssh': {
+ 'name': 'SSH',
+ 'ip_protocol': 'tcp',
+ 'from_port': '22',
+ 'to_port': '22',
+ },
+ 'smtp': {
+ 'name': 'SMTP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '25',
+ 'to_port': '25',
+ },
+ 'dns': {
+ 'name': 'DNS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '53',
+ 'to_port': '53',
+ },
+ 'http': {
+ 'name': 'HTTP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '80',
+ 'to_port': '80',
+ },
+ 'pop3': {
+ 'name': 'POP3',
+ 'ip_protocol': 'tcp',
+ 'from_port': '110',
+ 'to_port': '110',
+ },
+ 'imap': {
+ 'name': 'IMAP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '143',
+ 'to_port': '143',
+ },
+ 'ldap': {
+ 'name': 'LDAP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '389',
+ 'to_port': '389',
+ },
+ 'https': {
+ 'name': 'HTTPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '443',
+ 'to_port': '443',
+ },
+ 'smtps': {
+ 'name': 'SMTPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '465',
+ 'to_port': '465',
+ },
+ 'imaps': {
+ 'name': 'IMAPS',
+ 'ip_protocol': 'tcp',
+ 'from_port': '993',
+ 'to_port': '993',
+ },
+ 'pop3s': {
+ 'name': 'POP3S',
+ 'ip_protocol': 'tcp',
+ 'from_port': '995',
+ 'to_port': '995',
+ },
+ 'ms_sql': {
+ 'name': 'MS SQL',
+ 'ip_protocol': 'tcp',
+ 'from_port': '1433',
+ 'to_port': '1433',
+ },
+ 'mysql': {
+ 'name': 'MYSQL',
+ 'ip_protocol': 'tcp',
+ 'from_port': '3306',
+ 'to_port': '3306',
+ },
+ 'rdp': {
+ 'name': 'RDP',
+ 'ip_protocol': 'tcp',
+ 'from_port': '3389',
+ 'to_port': '3389',
+ },
+}
+
+FLAVOR_EXTRA_KEYS = {
+ 'flavor_keys': [
+ ('quota:read_bytes_sec', _('Quota: Read bytes')),
+ ('quota:write_bytes_sec', _('Quota: Write bytes')),
+ ('quota:cpu_quota', _('Quota: CPU')),
+ ('quota:cpu_period', _('Quota: CPU period')),
+ ('quota:inbound_average', _('Quota: Inbound average')),
+ ('quota:outbound_average', _('Quota: Outbound average')),
+ ]
+}
+
diff --git a/compass/deploy/ansible/roles/dashboard/templates/openstack-dashboard.conf b/compass/deploy/ansible/roles/dashboard/templates/openstack-dashboard.conf
new file mode 100644
index 0000000..a5a791a
--- /dev/null
+++ b/compass/deploy/ansible/roles/dashboard/templates/openstack-dashboard.conf
@@ -0,0 +1,14 @@
+<VirtualHost *:80>
+
+WSGIScriptAlias / /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi
+WSGIDaemonProcess horizon user=www-data group=www-data processes=3 threads=10
+Alias /static /usr/share/openstack-dashboard/openstack_dashboard/static/
+
+<Directory /usr/share/openstack-dashboard/openstack_dashboard/wsgi>
+Order allow,deny
+Allow from all
+</Directory>
+
+
+</VirtualHost>
+
diff --git a/compass/deploy/ansible/roles/database/files/my.cnf b/compass/deploy/ansible/roles/database/files/my.cnf
new file mode 100644
index 0000000..d61f947
--- /dev/null
+++ b/compass/deploy/ansible/roles/database/files/my.cnf
@@ -0,0 +1,131 @@
+#
+# The MySQL database server configuration file.
+#
+# You can copy this to one of:
+# - "/etc/mysql/my.cnf" to set global options,
+# - "~/.my.cnf" to set user-specific options.
+#
+# One can use all long options that the program supports.
+# Run program with --help to get a list of available options and with
+# --print-defaults to see which it would actually understand and use.
+#
+# For explanations see
+# http://dev.mysql.com/doc/mysql/en/server-system-variables.html
+
+# This will be passed to all mysql clients
+# It has been reported that passwords should be enclosed with ticks/quotes
+# escpecially if they contain "#" chars...
+# Remember to edit /etc/mysql/debian.cnf when changing the socket location.
+[client]
+port = 3306
+socket = /var/run/mysqld/mysqld.sock
+
+# Here is entries for some specific programs
+# The following values assume you have at least 32M ram
+
+# This was formally known as [safe_mysqld]. Both versions are currently parsed.
+[mysqld_safe]
+socket = /var/run/mysqld/mysqld.sock
+nice = 0
+
+[mysqld]
+#
+# * Basic Settings
+#
+user = mysql
+pid-file = /var/run/mysqld/mysqld.pid
+socket = /var/run/mysqld/mysqld.sock
+port = 3306
+basedir = /usr
+datadir = /var/lib/mysql
+tmpdir = /tmp
+lc-messages-dir = /usr/share/mysql
+skip-external-locking
+#
+# Instead of skip-networking the default is now to listen only on
+# localhost which is more compatible and is not less secure.
+bind-address = 0.0.0.0
+#
+# * Fine Tuning
+#
+key_buffer = 16M
+max_allowed_packet = 16M
+thread_stack = 192K
+thread_cache_size = 8
+# This replaces the startup script and checks MyISAM tables if needed
+# the first time they are touched
+myisam-recover = BACKUP
+#max_connections = 100
+#table_cache = 64
+#thread_concurrency = 10
+#
+# * Query Cache Configuration
+#
+query_cache_limit = 1M
+query_cache_size = 16M
+#
+# * Logging and Replication
+#
+# Both location gets rotated by the cronjob.
+# Be aware that this log type is a performance killer.
+# As of 5.1 you can enable the log at runtime!
+#general_log_file = /var/log/mysql/mysql.log
+#general_log = 1
+#
+# Error log - should be very few entries.
+#
+log_error = /var/log/mysql/error.log
+#
+# Here you can see queries with especially long duration
+#log_slow_queries = /var/log/mysql/mysql-slow.log
+#long_query_time = 2
+#log-queries-not-using-indexes
+#
+# The following can be used as easy to replay backup logs or for replication.
+# note: if you are setting up a replication slave, see README.Debian about
+# other settings you may need to change.
+#server-id = 1
+#log_bin = /var/log/mysql/mysql-bin.log
+expire_logs_days = 10
+max_binlog_size = 100M
+#binlog_do_db = include_database_name
+#binlog_ignore_db = include_database_name
+#
+# * InnoDB
+#
+# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/.
+# Read the manual for more InnoDB related options. There are many!
+#
+# * Security Features
+#
+# Read the manual, too, if you want chroot!
+# chroot = /var/lib/mysql/
+#
+# For generating SSL certificates I recommend the OpenSSL GUI "tinyca".
+#
+# ssl-ca=/etc/mysql/cacert.pem
+# ssl-cert=/etc/mysql/server-cert.pem
+# ssl-key=/etc/mysql/server-key.pem
+default-storage-engine = innodb
+innodb_file_per_table
+collation-server = utf8_general_ci
+init-connect = 'SET NAMES utf8'
+character-set-server = utf8
+
+[mysqldump]
+quick
+quote-names
+max_allowed_packet = 16M
+
+[mysql]
+#no-auto-rehash # faster start of mysql but no tab completition
+
+[isamchk]
+key_buffer = 16M
+
+#
+# * IMPORTANT: Additional settings that can override those from this file!
+# The files must end with '.cnf', otherwise they'll be ignored.
+#
+!includedir /etc/mysql/conf.d/
+
diff --git a/compass/deploy/ansible/roles/database/tasks/main.yml b/compass/deploy/ansible/roles/database/tasks/main.yml
new file mode 100644
index 0000000..e66f0cd
--- /dev/null
+++ b/compass/deploy/ansible/roles/database/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: copy data.sh
+ template: src=data.j2 dest=/opt/data.sh mode=777
+ tags:
+ - mysql_user
+
+- include: mysql.yml
+ when: HA_CLUSTER is not defined
+
+- include: mariadb.yml
+ when: HA_CLUSTER is defined
+
diff --git a/compass/deploy/ansible/roles/database/tasks/mariadb.yml b/compass/deploy/ansible/roles/database/tasks/mariadb.yml
new file mode 100644
index 0000000..093dfd1
--- /dev/null
+++ b/compass/deploy/ansible/roles/database/tasks/mariadb.yml
@@ -0,0 +1,46 @@
+---
+- name: install python-mysqldb
+ apt: name={{ item }} state=present force=yes
+ with_items:
+ - libaio1
+ - libssl0.9.8
+ #- mariadb-client-5.5
+ - mysql-client-5.5
+ - python-mysqldb
+ - mysql-server-wsrep
+ - galera
+
+- name: create mysql log directy
+ file: path=/var/log/mysql state=directory owner=mysql group=mysql mode=0755
+
+- name: update mariadb my.cnf
+ template: src=my.cnf dest=/etc/mysql/my.cnf backup=yes
+
+- name: update galera wsrep.cnf
+ template: src=wsrep.cnf dest=/etc/mysql/conf.d/wsrep.cnf backup=yes
+
+- name: update wsrep_sst_rsync uid
+ lineinfile: dest=/usr/bin/wsrep_sst_rsync state=absent regexp="\s*uid = \$MYUID$" backup=yes
+
+- name: update wsrep_sst_rsync gid
+ lineinfile: dest=/usr/bin/wsrep_sst_rsync state=absent regexp="\s*gid = \$MYGID$" backup=yes
+
+- name: manually restart mysql server
+ service: name=mysql state=restarted enabled=yes
+ register: result
+ until: result|success
+ retries: 5
+ delay: 5
+ tags:
+ - mysql_restart
+
+- name: generate mysql service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - mysql
+
+- name: create database/user
+ shell: /opt/data.sh
+ when: HA_CLUSTER[inventory_hostname] == ''
+ tags:
+ - mysql_user
diff --git a/compass/deploy/ansible/roles/database/tasks/mysql.yml b/compass/deploy/ansible/roles/database/tasks/mysql.yml
new file mode 100644
index 0000000..327b656
--- /dev/null
+++ b/compass/deploy/ansible/roles/database/tasks/mysql.yml
@@ -0,0 +1,22 @@
+---
+- name: install mysql client and server packages
+ apt: name={{ item }} state=present
+ with_items:
+ - python-mysqldb
+ - mysql-server
+
+- name: create mysql log directy
+ file: path=/var/log/mysql state=directory owner=mysql group=mysql mode=0755
+
+- name: update mysql my.cnf
+ copy: src=my.cnf
+ dest=/etc/mysql/my.cnf
+ backup=yes
+
+- name: manually restart mysql server
+ shell: service mysql restart
+
+- name: create database/user
+ shell: /opt/data.sh
+ tags:
+ - mysql_user
diff --git a/compass/deploy/ansible/roles/database/templates/data.j2 b/compass/deploy/ansible/roles/database/templates/data.j2
new file mode 100644
index 0000000..c894b32
--- /dev/null
+++ b/compass/deploy/ansible/roles/database/templates/data.j2
@@ -0,0 +1,39 @@
+#!/bin/sh
+mysql -uroot -Dmysql <<EOF
+drop database if exists keystone;
+drop database if exists glance;
+drop database if exists neutron;
+drop database if exists nova;
+drop database if exists cinder;
+
+CREATE DATABASE keystone;
+{% for host in ['%', 'localhost', inventory_hostname] %}
+GRANT ALL ON keystone.* TO 'keystone'@'{{ host }}' IDENTIFIED BY '{{ KEYSTONE_DBPASS }}';
+{% endfor %}
+
+CREATE DATABASE glance;
+{% for host in ['%', 'localhost', inventory_hostname] %}
+GRANT ALL ON glance.* TO 'glance'@'{{ host }}' IDENTIFIED BY '{{ GLANCE_DBPASS }}';
+{% endfor %}
+
+CREATE DATABASE neutron;
+{% for host in ['%', 'localhost', inventory_hostname] %}
+GRANT ALL ON neutron.* TO 'neutron'@'{{ host }}' IDENTIFIED BY '{{ NEUTRON_DBPASS }}';
+{% endfor %}
+
+CREATE DATABASE nova;
+{% for host in ['%', 'localhost', inventory_hostname] %}
+GRANT ALL ON nova.* TO 'nova'@'{{ host }}' IDENTIFIED BY '{{ NOVA_DBPASS }}';
+{% endfor %}
+
+CREATE DATABASE cinder;
+{% for host in ['%', 'localhost', inventory_hostname] %}
+GRANT ALL ON cinder.* TO 'cinder'@'{{ host }}' IDENTIFIED BY '{{ CINDER_DBPASS }}';
+{% endfor %}
+
+{% if WSREP_SST_USER is defined %}
+{% for host in ['%', 'localhost', inventory_hostname] %}
+GRANT ALL ON *.* TO '{{ WSREP_SST_USER }}'@'{{ host }}' IDENTIFIED BY '{{ WSREP_SST_PASS }}';
+{% endfor %}
+{% endif %}
+EOF
diff --git a/compass/deploy/ansible/roles/database/templates/my.cnf b/compass/deploy/ansible/roles/database/templates/my.cnf
new file mode 100644
index 0000000..165d619
--- /dev/null
+++ b/compass/deploy/ansible/roles/database/templates/my.cnf
@@ -0,0 +1,134 @@
+#
+# The MySQL database server configuration file.
+#
+# You can copy this to one of:
+# - "/etc/mysql/my.cnf" to set global options,
+# - "~/.my.cnf" to set user-specific options.
+#
+# One can use all long options that the program supports.
+# Run program with --help to get a list of available options and with
+# --print-defaults to see which it would actually understand and use.
+#
+# For explanations see
+# http://dev.mysql.com/doc/mysql/en/server-system-variables.html
+
+# This will be passed to all mysql clients
+# It has been reported that passwords should be enclosed with ticks/quotes
+# escpecially if they contain "#" chars...
+# Remember to edit /etc/mysql/debian.cnf when changing the socket location.
+[client]
+port = 3306
+socket = /var/run/mysqld/mysqld.sock
+
+# Here is entries for some specific programs
+# The following values assume you have at least 32M ram
+
+# This was formally known as [safe_mysqld]. Both versions are currently parsed.
+[mysqld_safe]
+socket = /var/run/mysqld/mysqld.sock
+nice = 0
+
+[mysqld]
+#
+# * Basic Settings
+#
+user = mysql
+pid-file = /var/run/mysqld/mysqld.pid
+socket = /var/run/mysqld/mysqld.sock
+port = 3306
+basedir = /usr
+datadir = /var/lib/mysql
+tmpdir = /tmp
+lc-messages-dir = /usr/share/mysql
+skip-external-locking
+skip-name-resolve
+#
+# Instead of skip-networking the default is now to listen only on
+# localhost which is more compatible and is not less secure.
+#bind-address = {{ hostvars[inventory_hostname]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}
+bind-address = {{ HA_VIP }}
+#
+# * Fine Tuning
+#
+key_buffer = 16M
+max_allowed_packet = 16M
+thread_stack = 192K
+thread_cache_size = 8
+# This replaces the startup script and checks MyISAM tables if needed
+# the first time they are touched
+myisam-recover = BACKUP
+max_connections = 2000
+max_connect_errors = 8000
+#table_cache = 64
+#thread_concurrency = 10
+#
+# * Query Cache Configuration
+#
+query_cache_limit = 1M
+query_cache_size = 16M
+#
+# * Logging and Replication
+#
+# Both location gets rotated by the cronjob.
+# Be aware that this log type is a performance killer.
+# As of 5.1 you can enable the log at runtime!
+general_log_file = /var/log/mysql/mysql.log
+#general_log = 1
+#
+# Error log - should be very few entries.
+#
+log_error = /var/log/mysql/error.log
+#
+# Here you can see queries with especially long duration
+#log_slow_queries = /var/log/mysql/mysql-slow.log
+#long_query_time = 2
+#log-queries-not-using-indexes
+#
+# The following can be used as easy to replay backup logs or for replication.
+# note: if you are setting up a replication slave, see README.Debian about
+# other settings you may need to change.
+#server-id = 1
+#log_bin = /var/log/mysql/mysql-bin.log
+expire_logs_days = 10
+max_binlog_size = 100M
+#binlog_do_db = include_database_name
+#binlog_ignore_db = include_database_name
+#
+# * InnoDB
+#
+# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/.
+# Read the manual for more InnoDB related options. There are many!
+#
+# * Security Features
+#
+# Read the manual, too, if you want chroot!
+# chroot = /var/lib/mysql/
+#
+# For generating SSL certificates I recommend the OpenSSL GUI "tinyca".
+#
+# ssl-ca=/etc/mysql/cacert.pem
+# ssl-cert=/etc/mysql/server-cert.pem
+# ssl-key=/etc/mysql/server-key.pem
+default-storage-engine = innodb
+innodb_file_per_table
+collation-server = utf8_general_ci
+init-connect = 'SET NAMES utf8'
+character-set-server = utf8
+
+[mysqldump]
+quick
+quote-names
+max_allowed_packet = 16M
+
+[mysql]
+#no-auto-rehash # faster start of mysql but no tab completition
+
+[isamchk]
+key_buffer = 16M
+
+#
+# * IMPORTANT: Additional settings that can override those from this file!
+# The files must end with '.cnf', otherwise they'll be ignored.
+#
+!includedir /etc/mysql/conf.d/
+
diff --git a/compass/deploy/ansible/roles/database/templates/wsrep.cnf b/compass/deploy/ansible/roles/database/templates/wsrep.cnf
new file mode 100644
index 0000000..b9e9424
--- /dev/null
+++ b/compass/deploy/ansible/roles/database/templates/wsrep.cnf
@@ -0,0 +1,126 @@
+# This file contains wsrep-related mysqld options. It should be included
+# in the main MySQL configuration file.
+#
+# Options that need to be customized:
+# - wsrep_provider
+# - wsrep_cluster_address
+# - wsrep_sst_auth
+# The rest of defaults should work out of the box.
+
+##
+## mysqld options _MANDATORY_ for correct opration of the cluster
+##
+[mysqld]
+
+# (This must be substituted by wsrep_format)
+binlog_format=ROW
+
+# Currently only InnoDB storage engine is supported
+default-storage-engine=innodb
+
+# to avoid issues with 'bulk mode inserts' using autoinc
+innodb_autoinc_lock_mode=2
+
+# This is a must for paralell applying
+innodb_locks_unsafe_for_binlog=1
+
+# Query Cache is not supported with wsrep
+query_cache_size=0
+query_cache_type=0
+
+# Override bind-address
+# In some systems bind-address defaults to 127.0.0.1, and with mysqldump SST
+# it will have (most likely) disastrous consequences on donor node
+#bind-address={{ hostvars[inventory_hostname]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}
+bind-address={{ HA_VIP }}
+
+##
+## WSREP options
+##
+
+# Full path to wsrep provider library or 'none'
+wsrep_provider=/usr/lib/galera/libgalera_smm.so
+
+# Provider specific configuration options
+#wsrep_provider_options=
+
+# Logical cluster name. Should be the same for all nodes.
+wsrep_cluster_name="my_wsrep_cluster"
+
+# Group communication system handle
+wsrep_cluster_address=gcomm://{{ HA_CLUSTER[inventory_hostname] }}
+
+# Human-readable node name (non-unique). Hostname by default.
+#wsrep_node_name=
+
+# Base replication <address|hostname>[:port] of the node.
+# The values supplied will be used as defaults for state transfer receiving,
+# listening ports and so on. Default: address of the first network interface.
+wsrep_node_address={{ hostvars[inventory_hostname]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}
+
+# Address for incoming client connections. Autodetect by default.
+#wsrep_node_incoming_address=
+
+# How many threads will process writesets from other nodes
+wsrep_slave_threads=1
+
+# DBUG options for wsrep provider
+#wsrep_dbug_option
+
+# Generate fake primary keys for non-PK tables (required for multi-master
+# and parallel applying operation)
+wsrep_certify_nonPK=1
+
+# Maximum number of rows in write set
+wsrep_max_ws_rows=131072
+
+# Maximum size of write set
+wsrep_max_ws_size=1073741824
+
+# to enable debug level logging, set this to 1
+wsrep_debug=1
+
+# convert locking sessions into transactions
+wsrep_convert_LOCK_to_trx=0
+
+# how many times to retry deadlocked autocommits
+wsrep_retry_autocommit=1
+
+# change auto_increment_increment and auto_increment_offset automatically
+wsrep_auto_increment_control=1
+
+# retry autoinc insert, which failed for duplicate key error
+wsrep_drupal_282555_workaround=0
+
+# enable "strictly synchronous" semantics for read operations
+wsrep_causal_reads=0
+
+# Command to call when node status or cluster membership changes.
+# Will be passed all or some of the following options:
+# --status - new status of this node
+# --uuid - UUID of the cluster
+# --primary - whether the component is primary or not ("yes"/"no")
+# --members - comma-separated list of members
+# --index - index of this node in the list
+wsrep_notify_cmd=
+
+##
+## WSREP State Transfer options
+##
+
+# State Snapshot Transfer method
+wsrep_sst_method=rsync
+
+# Address on THIS node to receive SST at. DON'T SET IT TO DONOR ADDRESS!!!
+# (SST method dependent. Defaults to the first IP of the first interface)
+#wsrep_sst_receive_address=
+
+# SST authentication string. This will be used to send SST to joining nodes.
+# Depends on SST method. For mysqldump method it is root:<root password>
+wsrep_sst_auth={{ WSREP_SST_USER }}:{{ WSREP_SST_PASS }}
+
+# Desired SST donor name.
+#wsrep_sst_donor=
+
+# Protocol version to use
+# wsrep_protocol_version=
diff --git a/compass/deploy/ansible/roles/glance/handlers/main.yml b/compass/deploy/ansible/roles/glance/handlers/main.yml
new file mode 100644
index 0000000..d8eaa44
--- /dev/null
+++ b/compass/deploy/ansible/roles/glance/handlers/main.yml
@@ -0,0 +1,6 @@
+---
+- name: restart glance-api
+ service: name=glance-api state=restarted enabled=yes
+
+- name: restart glance-registry
+ service: name=glance-registry state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/glance/tasks/glance_config.yml b/compass/deploy/ansible/roles/glance/tasks/glance_config.yml
new file mode 100644
index 0000000..28392a3
--- /dev/null
+++ b/compass/deploy/ansible/roles/glance/tasks/glance_config.yml
@@ -0,0 +1,29 @@
+---
+- name: init glance db version
+ shell: glance-manage db_version_control 0
+
+- name: sync glance db
+ shell: sleep 15; su -s /bin/sh -c "glance-manage db_sync" glance
+ register: result
+ until: result.rc == 0
+ retries: 5
+ delay: 3
+ notify:
+ - restart glance-registry
+ - restart glance-api
+
+- meta: flush_handlers
+
+- name: place image upload script
+ template: src=image_upload.sh dest=/opt/image_upload.sh mode=0744
+
+- name: download cirros image file
+ get_url: url={{ build_in_image }} dest=/opt/{{ build_in_image_name }}
+
+- name: wait for 9292 port to become available
+ wait_for: host={{ image_host }} port=9292 delay=5
+
+- name: run image upload
+ shell: for i in {0..5}; do /opt/image_upload.sh && touch image_upload_completed; if [ $? != 0 ] ;then sleep 5; else break;fi;done
+ args:
+ creates: image_upload_completed
diff --git a/compass/deploy/ansible/roles/glance/tasks/glance_install.yml b/compass/deploy/ansible/roles/glance/tasks/glance_install.yml
new file mode 100644
index 0000000..505b3b0
--- /dev/null
+++ b/compass/deploy/ansible/roles/glance/tasks/glance_install.yml
@@ -0,0 +1,26 @@
+---
+- name: install glance packages
+ apt: name={{ item }} state=latest force=yes
+ with_items:
+ - glance
+ - python-glanceclient
+
+- name: generate glance service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - glance-registry
+ - glance-api
+
+- name: update glance conf
+ template: src={{ item }} dest=/etc/glance/{{ item }}
+ backup=yes
+ with_items:
+ - glance-api.conf
+ - glance-registry.conf
+ notify:
+ - restart glance-registry
+ - restart glance-api
+
+- name: remove default sqlite db
+ shell: rm /var/lib/glance/glance.sqlite || touch glance.sqllite.db.removed
+
diff --git a/compass/deploy/ansible/roles/glance/tasks/main.yml b/compass/deploy/ansible/roles/glance/tasks/main.yml
new file mode 100644
index 0000000..296f0dc
--- /dev/null
+++ b/compass/deploy/ansible/roles/glance/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+- include: glance_install.yml
+ tags:
+ - install
+ - glance_install
+ - glance
+
+- include: nfs.yml
+ tags:
+ - nfs
+
+- include: glance_config.yml
+ when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == ''
+ tags:
+ - config
+ - glance_config
+ - glance
+
diff --git a/compass/deploy/ansible/roles/glance/tasks/nfs.yml b/compass/deploy/ansible/roles/glance/tasks/nfs.yml
new file mode 100644
index 0000000..c03ab4d
--- /dev/null
+++ b/compass/deploy/ansible/roles/glance/tasks/nfs.yml
@@ -0,0 +1,41 @@
+---
+- name: get nfs server
+ local_action: shell /sbin/ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6| grep "10" -m 1 |awk '{print $2}'|tr -d "addr:"
+ register: ip_info
+ run_once: True
+
+- name: install nfs
+ local_action: yum name=nfs-utils state=present
+ run_once: True
+
+- name: create image directory
+ local_action: file path=/opt/images state=directory mode=0777
+ run_once: True
+
+- name: update nfs config
+ local_action: lineinfile dest=/etc/exports state=present
+ regexp="/opt/images *(rw,insecure,sync,all_squash)"
+ line="/opt/images *(rw,insecure,sync,all_squash)"
+ run_once: True
+
+- name: restart nfs service
+ local_action: service name=nfs state=restarted enabled=yes
+ run_once: True
+
+- name: install nfs comm
+ apt: name=nfs-common state=present
+
+- name: get mount info
+ command: mount
+ register: mount_info
+
+- name: mount image directory
+ shell: |
+ mount -t nfs -onfsvers=3 {{ item }}:/opt/images /var/lib/glance/images
+ sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab
+ echo {{ item }}:/opt/images /var/lib/glance/images/ nfs nfsvers=3 >> /etc/fstab
+ when: mount_info.stdout.find('images') == -1
+ with_items:
+ ip_info.stdout_lines
+ retries: 5
+ delay: 3
diff --git a/compass/deploy/ansible/roles/glance/templates/glance-api.conf b/compass/deploy/ansible/roles/glance/templates/glance-api.conf
new file mode 100644
index 0000000..763539e
--- /dev/null
+++ b/compass/deploy/ansible/roles/glance/templates/glance-api.conf
@@ -0,0 +1,677 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+#verbose = False
+
+# Show debugging output in logs (sets DEBUG log level output)
+#debug = False
+
+# Which backend scheme should Glance use by default is not specified
+# in a request to add a new image to Glance? Known schemes are determined
+# by the known_stores option below.
+# Default: 'file'
+# "default_store" option has been moved to [glance_store] section in
+# Juno release
+
+# List of which store classes and store class locations are
+# currently known to glance at startup.
+# Existing but disabled stores:
+# glance.store.rbd.Store,
+# glance.store.s3.Store,
+# glance.store.swift.Store,
+# glance.store.sheepdog.Store,
+# glance.store.cinder.Store,
+# glance.store.gridfs.Store,
+# glance.store.vmware_datastore.Store,
+#known_stores = glance.store.filesystem.Store,
+# glance.store.http.Store
+
+
+# Maximum image size (in bytes) that may be uploaded through the
+# Glance API server. Defaults to 1 TB.
+# WARNING: this value should only be increased after careful consideration
+# and must be set to a value under 8 EB (9223372036854775808).
+#image_size_cap = 1099511627776
+
+# Address to bind the API server
+bind_host = {{ image_host }}
+
+# Port the bind the API server to
+bind_port = 9292
+
+# Log to this file. Make sure you do not set the same log file for both the API
+# and registry servers!
+#
+# If `log_file` is omitted and `use_syslog` is false, then log messages are
+# sent to stdout as a fallback.
+log_file = /var/log/glance/api.log
+
+# Backlog requests when creating socket
+backlog = 4096
+
+# TCP_KEEPIDLE value in seconds when creating socket.
+# Not supported on OS X.
+#tcp_keepidle = 600
+
+# API to use for accessing data. Default value points to sqlalchemy
+# package, it is also possible to use: glance.db.registry.api
+# data_api = glance.db.sqlalchemy.api
+
+# Number of Glance API worker processes to start.
+# On machines with more than one CPU increasing this value
+# may improve performance (especially if using SSL with
+# compression turned on). It is typically recommended to set
+# this value to the number of CPUs present on your machine.
+workers = 1
+
+# Maximum line size of message headers to be accepted.
+# max_header_line may need to be increased when using large tokens
+# (typically those generated by the Keystone v3 API with big service
+# catalogs)
+# max_header_line = 16384
+
+# Role used to identify an authenticated user as administrator
+#admin_role = admin
+
+# Allow unauthenticated users to access the API with read-only
+# privileges. This only applies when using ContextMiddleware.
+#allow_anonymous_access = False
+
+# Allow access to version 1 of glance api
+#enable_v1_api = True
+
+# Allow access to version 2 of glance api
+#enable_v2_api = True
+
+# Return the URL that references where the data is stored on
+# the backend storage system. For example, if using the
+# file system store a URL of 'file:///path/to/image' will
+# be returned to the user in the 'direct_url' meta-data field.
+# The default value is false.
+#show_image_direct_url = False
+
+# Send headers containing user and tenant information when making requests to
+# the v1 glance registry. This allows the registry to function as if a user is
+# authenticated without the need to authenticate a user itself using the
+# auth_token middleware.
+# The default value is false.
+#send_identity_headers = False
+
+# Supported values for the 'container_format' image attribute
+#container_formats=ami,ari,aki,bare,ovf,ova
+
+# Supported values for the 'disk_format' image attribute
+#disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso
+
+# Directory to use for lock files. Default to a temp directory
+# (string value). This setting needs to be the same for both
+# glance-scrubber and glance-api.
+#lock_path=<None>
+
+# Property Protections config file
+# This file contains the rules for property protections and the roles/policies
+# associated with it.
+# If this config value is not specified, by default, property protections
+# won't be enforced.
+# If a value is specified and the file is not found, then the glance-api
+# service will not start.
+#property_protection_file =
+
+# Specify whether 'roles' or 'policies' are used in the
+# property_protection_file.
+# The default value for property_protection_rule_format is 'roles'.
+#property_protection_rule_format = roles
+
+# Specifies how long (in hours) a task is supposed to live in the tasks DB
+# after succeeding or failing before getting soft-deleted.
+# The default value for task_time_to_live is 48 hours.
+# task_time_to_live = 48
+
+# This value sets what strategy will be used to determine the image location
+# order. Currently two strategies are packaged with Glance 'location_order'
+# and 'store_type'.
+#location_strategy = location_order
+
+# ================= Syslog Options ============================
+
+# Send logs to syslog (/dev/log) instead of to file specified
+# by `log_file`
+#use_syslog = False
+
+# Facility to use. If unset defaults to LOG_USER.
+#syslog_log_facility = LOG_LOCAL0
+
+# ================= SSL Options ===============================
+
+# Certificate file to use when starting API server securely
+#cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+#key_file = /path/to/keyfile
+
+# CA certificate file to use to verify connecting clients
+#ca_file = /path/to/cafile
+
+# ================= Security Options ==========================
+
+# AES key for encrypting store 'location' metadata, including
+# -- if used -- Swift or S3 credentials
+# Should be set to a random string of length 16, 24 or 32 bytes
+#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
+
+# ============ Registry Options ===============================
+
+# Address to find the registry server
+registry_host = {{ internal_ip }}
+
+# Port the registry server is listening on
+registry_port = 9191
+
+# What protocol to use when connecting to the registry server?
+# Set to https for secure HTTP communication
+registry_client_protocol = http
+
+# The path to the key file to use in SSL connections to the
+# registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file
+#registry_client_key_file = /path/to/key/file
+
+# The path to the cert file to use in SSL connections to the
+# registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file
+#registry_client_cert_file = /path/to/cert/file
+
+# The path to the certifying authority cert file to use in SSL connections
+# to the registry server, if any. Alternately, you may set the
+# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file
+#registry_client_ca_file = /path/to/ca/file
+
+# When using SSL in connections to the registry server, do not require
+# validation via a certifying authority. This is the registry's equivalent of
+# specifying --insecure on the command line using glanceclient for the API
+# Default: False
+#registry_client_insecure = False
+
+# The period of time, in seconds, that the API server will wait for a registry
+# request to complete. A value of '0' implies no timeout.
+# Default: 600
+#registry_client_timeout = 600
+
+# Whether to automatically create the database tables.
+# Default: False
+#db_auto_create = False
+
+# Enable DEBUG log messages from sqlalchemy which prints every database
+# query and response.
+# Default: False
+#sqlalchemy_debug = True
+
+# Pass the user's token through for API requests to the registry.
+# Default: True
+#use_user_token = True
+
+# If 'use_user_token' is not in effect then admin credentials
+# can be specified. Requests to the registry on behalf of
+# the API will use these credentials.
+# Admin user name
+#admin_user = None
+# Admin password
+#admin_password = None
+# Admin tenant name
+#admin_tenant_name = None
+# Keystone endpoint
+#auth_url = None
+# Keystone region
+#auth_region = None
+# Auth strategy
+#auth_strategy = keystone
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when images are create, updated or deleted.
+# There are three methods of sending notifications, logging (via the
+# log_file directive), rabbit (via a rabbitmq queue), qpid (via a Qpid
+# message queue), or noop (no notifications sent, the default)
+# NOTE: THIS CONFIGURATION OPTION HAS BEEN DEPRECATED IN FAVOR OF `notification_driver`
+# notifier_strategy = default
+
+# Driver or drivers to handle sending notifications
+# notification_driver = noop
+
+# Default publisher_id for outgoing notifications.
+# default_publisher_id = image.localhost
+
+# Configuration options if sending notifications via rabbitmq (these are
+# the defaults)
+rabbit_host = localhost
+rabbit_port = 5672
+rabbit_use_ssl = false
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+rabbit_virtual_host = /
+rabbit_notification_exchange = glance
+rabbit_notification_topic = notifications
+rabbit_durable_queues = False
+
+# Configuration options if sending notifications via Qpid (these are
+# the defaults)
+qpid_notification_exchange = glance
+qpid_notification_topic = notifications
+qpid_hostname = localhost
+qpid_port = 5672
+qpid_username =
+qpid_password =
+qpid_sasl_mechanisms =
+qpid_reconnect_timeout = 0
+qpid_reconnect_limit = 0
+qpid_reconnect_interval_min = 0
+qpid_reconnect_interval_max = 0
+qpid_reconnect_interval = 0
+qpid_heartbeat = 5
+# Set to 'ssl' to enable SSL
+qpid_protocol = tcp
+qpid_tcp_nodelay = True
+
+# ============ Filesystem Store Options ========================
+
+# Directory that the Filesystem backend store
+# writes image data to
+# this option has been moved to [glance_store] for Juno release
+# filesystem_store_datadir = /var/lib/glance/images/
+
+# A list of directories where image data can be stored.
+# This option may be specified multiple times for specifying multiple store
+# directories. Either one of filesystem_store_datadirs or
+# filesystem_store_datadir option is required. A priority number may be given
+# after each directory entry, separated by a ":".
+# When adding an image, the highest priority directory will be selected, unless
+# there is not enough space available in cases where the image size is already
+# known. If no priority is given, it is assumed to be zero and the directory
+# will be considered for selection last. If multiple directories have the same
+# priority, then the one with the most free space available is selected.
+# If same store is specified multiple times then BadStoreConfiguration
+# exception will be raised.
+#filesystem_store_datadirs = /var/lib/glance/images/:1
+
+# A path to a JSON file that contains metadata describing the storage
+# system. When show_multiple_locations is True the information in this
+# file will be returned with any location that is contained in this
+# store.
+#filesystem_store_metadata_file = None
+
+# ============ Swift Store Options =============================
+
+# Version of the authentication service to use
+# Valid versions are '2' for keystone and '1' for swauth and rackspace
+swift_store_auth_version = 2
+
+# Address where the Swift authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified, default to 'https://'
+# For swauth, use something like '127.0.0.1:8080/v1.0/'
+swift_store_auth_address = 127.0.0.1:5000/v2.0/
+
+# User to authenticate against the Swift authentication service
+# If you use Swift authentication service, set it to 'account':'user'
+# where 'account' is a Swift storage account and 'user'
+# is a user in that account
+swift_store_user = jdoe:jdoe
+
+# Auth key for the user authenticating against the
+# Swift authentication service
+swift_store_key = a86850deb2742ec3cb41518e26aa2d89
+
+# Container within the account that the account should use
+# for storing images in Swift
+swift_store_container = glance
+
+# Do we create the container if it does not exist?
+swift_store_create_container_on_put = False
+
+# What size, in MB, should Glance start chunking image files
+# and do a large object manifest in Swift? By default, this is
+# the maximum object size in Swift, which is 5GB
+swift_store_large_object_size = 5120
+
+# When doing a large object manifest, what size, in MB, should
+# Glance write chunks to Swift? This amount of data is written
+# to a temporary disk buffer during the process of chunking
+# the image file, and the default is 200MB
+swift_store_large_object_chunk_size = 200
+
+# Whether to use ServiceNET to communicate with the Swift storage servers.
+# (If you aren't RACKSPACE, leave this False!)
+#
+# To use ServiceNET for authentication, prefix hostname of
+# `swift_store_auth_address` with 'snet-'.
+# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
+swift_enable_snet = False
+
+# If set to True enables multi-tenant storage mode which causes Glance images
+# to be stored in tenant specific Swift accounts.
+#swift_store_multi_tenant = False
+
+# A list of swift ACL strings that will be applied as both read and
+# write ACLs to the containers created by Glance in multi-tenant
+# mode. This grants the specified tenants/users read and write access
+# to all newly created image objects. The standard swift ACL string
+# formats are allowed, including:
+# <tenant_id>:<username>
+# <tenant_name>:<username>
+# *:<username>
+# Multiple ACLs can be combined using a comma separated list, for
+# example: swift_store_admin_tenants = service:glance,*:admin
+#swift_store_admin_tenants =
+
+# The region of the swift endpoint to be used for single tenant. This setting
+# is only necessary if the tenant has multiple swift endpoints.
+#swift_store_region =
+
+# If set to False, disables SSL layer compression of https swift requests.
+# Setting to 'False' may improve performance for images which are already
+# in a compressed format, eg qcow2. If set to True, enables SSL layer
+# compression (provided it is supported by the target swift proxy).
+#swift_store_ssl_compression = True
+
+# The number of times a Swift download will be retried before the
+# request fails
+#swift_store_retry_get_count = 0
+
+# ============ S3 Store Options =============================
+
+# Address where the S3 authentication service lives
+# Valid schemes are 'http://' and 'https://'
+# If no scheme specified, default to 'http://'
+s3_store_host = 127.0.0.1:8080/v1.0/
+
+# User to authenticate against the S3 authentication service
+s3_store_access_key = <20-char AWS access key>
+
+# Auth key for the user authenticating against the
+# S3 authentication service
+s3_store_secret_key = <40-char AWS secret key>
+
+# Container within the account that the account should use
+# for storing images in S3. Note that S3 has a flat namespace,
+# so you need a unique bucket name for your glance images. An
+# easy way to do this is append your AWS access key to "glance".
+# S3 buckets in AWS *must* be lowercased, so remember to lowercase
+# your AWS access key if you use it in your bucket name below!
+s3_store_bucket = <lowercased 20-char aws access key>glance
+
+# Do we create the bucket if it does not exist?
+s3_store_create_bucket_on_put = False
+
+# When sending images to S3, the data will first be written to a
+# temporary buffer on disk. By default the platform's temporary directory
+# will be used. If required, an alternative directory can be specified here.
+#s3_store_object_buffer_dir = /path/to/dir
+
+# When forming a bucket url, boto will either set the bucket name as the
+# subdomain or as the first token of the path. Amazon's S3 service will
+# accept it as the subdomain, but Swift's S3 middleware requires it be
+# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'.
+#s3_store_bucket_url_format = subdomain
+
+# ============ RBD Store Options =============================
+
+# Ceph configuration file path
+# If using cephx authentication, this file should
+# include a reference to the right keyring
+# in a client.<USER> section
+#rbd_store_ceph_conf = /etc/ceph/ceph.conf
+
+# RADOS user to authenticate as (only applicable if using cephx)
+# If <None>, a default will be chosen based on the client. section
+# in rbd_store_ceph_conf
+#rbd_store_user = <None>
+
+# RADOS pool in which images are stored
+#rbd_store_pool = images
+
+# RADOS images will be chunked into objects of this size (in megabytes).
+# For best performance, this should be a power of two
+#rbd_store_chunk_size = 8
+
+# ============ Sheepdog Store Options =============================
+
+sheepdog_store_address = localhost
+
+sheepdog_store_port = 7000
+
+# Images will be chunked into objects of this size (in megabytes).
+# For best performance, this should be a power of two
+sheepdog_store_chunk_size = 64
+
+# ============ Cinder Store Options ===============================
+
+# Info to match when looking for cinder in the service catalog
+# Format is : separated values of the form:
+# <service_type>:<service_name>:<endpoint_type> (string value)
+#cinder_catalog_info = volume:cinder:publicURL
+
+# Override service catalog lookup with template for cinder endpoint
+# e.g. http://localhost:8776/v1/%(project_id)s (string value)
+#cinder_endpoint_template = <None>
+
+# Region name of this node (string value)
+#os_region_name = <None>
+
+# Location of ca certicates file to use for cinder client requests
+# (string value)
+#cinder_ca_certificates_file = <None>
+
+# Number of cinderclient retries on failed http calls (integer value)
+#cinder_http_retries = 3
+
+# Allow to perform insecure SSL requests to cinder (boolean value)
+#cinder_api_insecure = False
+
+# ============ VMware Datastore Store Options =====================
+
+# ESX/ESXi or vCenter Server target system.
+# The server value can be an IP address or a DNS name
+# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com
+#vmware_server_host = <None>
+
+# Server username (string value)
+#vmware_server_username = <None>
+
+# Server password (string value)
+#vmware_server_password = <None>
+
+# Inventory path to a datacenter (string value)
+# Value optional when vmware_server_ip is an ESX/ESXi host: if specified
+# should be `ha-datacenter`.
+#vmware_datacenter_path = <None>
+
+# Datastore associated with the datacenter (string value)
+#vmware_datastore_name = <None>
+
+# The number of times we retry on failures
+# e.g., socket error, etc (integer value)
+#vmware_api_retry_count = 10
+
+# The interval used for polling remote tasks
+# invoked on VMware ESX/VC server in seconds (integer value)
+#vmware_task_poll_interval = 5
+
+# Absolute path of the folder containing the images in the datastore
+# (string value)
+#vmware_store_image_dir = /openstack_glance
+
+# Allow to perform insecure SSL requests to the target system (boolean value)
+#vmware_api_insecure = False
+
+# ============ Delayed Delete Options =============================
+
+# Turn on/off delayed delete
+delayed_delete = False
+
+# Delayed delete time in seconds
+scrub_time = 43200
+
+# Directory that the scrubber will use to remind itself of what to delete
+# Make sure this is also set in glance-scrubber.conf
+scrubber_datadir = /var/lib/glance/scrubber
+
+# =============== Quota Options ==================================
+
+# The maximum number of image members allowed per image
+#image_member_quota = 128
+
+# The maximum number of image properties allowed per image
+#image_property_quota = 128
+
+# The maximum number of tags allowed per image
+#image_tag_quota = 128
+
+# The maximum number of locations allowed per image
+#image_location_quota = 10
+
+# Set a system wide quota for every user. This value is the total number
+# of bytes that a user can use across all storage systems. A value of
+# 0 means unlimited.
+#user_storage_quota = 0
+
+# =============== Image Cache Options =============================
+
+# Base directory that the Image Cache uses
+image_cache_dir = /var/lib/glance/image-cache/
+
+# =============== Manager Options =================================
+
+# DEPRECATED. TO BE REMOVED IN THE JUNO RELEASE.
+# Whether or not to enforce that all DB tables have charset utf8.
+# If your database tables do not have charset utf8 you will
+# need to convert before this option is removed. This option is
+# only relevant if your database engine is MySQL.
+#db_enforce_mysql_charset = True
+
+# =============== Glance Store ====================================
+[glance_store]
+# Moved from [DEFAULT], for Juno release
+default_store = file
+filesystem_store_datadir = /var/lib/glance/images/
+
+# =============== Database Options =================================
+
+[database]
+# The file name to use with SQLite (string value)
+sqlite_db = /var/lib/glance/glance.sqlite
+
+# If True, SQLite uses synchronous mode (boolean value)
+#sqlite_synchronous = True
+
+# The backend to use for db (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+backend = sqlalchemy
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle sql connections are reaped (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a sql connection
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = <None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on
+# connection lost (boolean value)
+#use_db_reconnect = False
+
+# seconds between db connection retries (integer value)
+#db_retry_interval = 1
+
+# Whether to increase interval between db connection retries,
+# up to db_max_retry_interval (boolean value)
+#db_inc_retry_interval = True
+
+# max seconds between db connection retries, if
+# db_inc_retry_interval is enabled (integer value)
+#db_max_retry_interval = 10
+
+# maximum db connection retries before error is raised.
+# (setting -1 implies an infinite retry count) (integer value)
+#db_max_retries = 20
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = glance
+admin_password = {{ GLANCE_PASS }}
+
+[paste_deploy]
+# Name of the paste configuration file that defines the available pipelines
+#config_file = glance-api-paste.ini
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-api-keystone], you would configure the flavor below
+# as 'keystone'.
+flavor= keystone
+
+[store_type_location_strategy]
+# The scheme list to use to get store preference order. The scheme must be
+# registered by one of the stores defined by the 'known_stores' config option.
+# This option will be applied when you using 'store_type' option as image
+# location strategy defined by the 'location_strategy' config option.
+#store_type_preference =
diff --git a/compass/deploy/ansible/roles/glance/templates/glance-registry.conf b/compass/deploy/ansible/roles/glance/templates/glance-registry.conf
new file mode 100644
index 0000000..8d731a2
--- /dev/null
+++ b/compass/deploy/ansible/roles/glance/templates/glance-registry.conf
@@ -0,0 +1,190 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+#verbose = False
+
+# Show debugging output in logs (sets DEBUG log level output)
+#debug = False
+
+# Address to bind the registry server
+bind_host = {{ internal_ip }}
+
+# Port the bind the registry server to
+bind_port = 9191
+
+# Log to this file. Make sure you do not set the same log file for both the API
+# and registry servers!
+#
+# If `log_file` is omitted and `use_syslog` is false, then log messages are
+# sent to stdout as a fallback.
+log_file = /var/log/glance/registry.log
+
+# Backlog requests when creating socket
+backlog = 4096
+
+# TCP_KEEPIDLE value in seconds when creating socket.
+# Not supported on OS X.
+#tcp_keepidle = 600
+
+# API to use for accessing data. Default value points to sqlalchemy
+# package.
+#data_api = glance.db.sqlalchemy.api
+
+# Enable Registry API versions individually or simultaneously
+#enable_v1_registry = True
+#enable_v2_registry = True
+
+# Limit the api to return `param_limit_max` items in a call to a container. If
+# a larger `limit` query param is provided, it will be reduced to this value.
+api_limit_max = 1000
+
+# If a `limit` query param is not provided in an api request, it will
+# default to `limit_param_default`
+limit_param_default = 25
+
+# Role used to identify an authenticated user as administrator
+#admin_role = admin
+
+# Whether to automatically create the database tables.
+# Default: False
+#db_auto_create = False
+
+# Enable DEBUG log messages from sqlalchemy which prints every database
+# query and response.
+# Default: False
+#sqlalchemy_debug = True
+
+# ================= Syslog Options ============================
+
+# Send logs to syslog (/dev/log) instead of to file specified
+# by `log_file`
+#use_syslog = False
+
+# Facility to use. If unset defaults to LOG_USER.
+#syslog_log_facility = LOG_LOCAL1
+
+# ================= SSL Options ===============================
+
+# Certificate file to use when starting registry server securely
+#cert_file = /path/to/certfile
+
+# Private key file to use when starting registry server securely
+#key_file = /path/to/keyfile
+
+# CA certificate file to use to verify connecting clients
+#ca_file = /path/to/cafile
+
+# ================= Database Options ==========================
+
+[database]
+# The file name to use with SQLite (string value)
+sqlite_db = /var/lib/glance/glance.sqlite
+
+# If True, SQLite uses synchronous mode (boolean value)
+#sqlite_synchronous = True
+
+# The backend to use for db (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+backend = sqlalchemy
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode = TRADITIONAL
+
+# Timeout before idle sql connections are reaped (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout = 3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = <None>
+
+# Maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a sql connection
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = <None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on
+# connection lost (boolean value)
+#use_db_reconnect = False
+
+# seconds between db connection retries (integer value)
+#db_retry_interval = 1
+
+# Whether to increase interval between db connection retries,
+# up to db_max_retry_interval (boolean value)
+#db_inc_retry_interval = True
+
+# max seconds between db connection retries, if
+# db_inc_retry_interval is enabled (integer value)
+#db_max_retry_interval = 10
+
+# maximum db connection retries before error is raised.
+# (setting -1 implies an infinite retry count) (integer value)
+#db_max_retries = 20
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = glance
+admin_password = {{ GLANCE_PASS }}
+
+[paste_deploy]
+# Name of the paste configuration file that defines the available pipelines
+#config_file = glance-registry-paste.ini
+
+# Partial name of a pipeline in your paste configuration file with the
+# service name removed. For example, if your paste section name is
+# [pipeline:glance-registry-keystone], you would configure the flavor below
+# as 'keystone'.
+flavor= keystone
diff --git a/compass/deploy/ansible/roles/glance/templates/image_upload.sh b/compass/deploy/ansible/roles/glance/templates/image_upload.sh
new file mode 100644
index 0000000..9dd1fa8
--- /dev/null
+++ b/compass/deploy/ansible/roles/glance/templates/image_upload.sh
@@ -0,0 +1,2 @@
+sleep 10
+glance --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ HA_VIP }}:35357/v2.0 image-create --name="cirros" --disk-format=qcow2 --container-format=bare --is-public=true < /opt/{{ build_in_image_name }} && touch glance.import.completed
diff --git a/compass/deploy/ansible/roles/ha/files/galera_chk b/compass/deploy/ansible/roles/ha/files/galera_chk
new file mode 100644
index 0000000..9fd165c
--- /dev/null
+++ b/compass/deploy/ansible/roles/ha/files/galera_chk
@@ -0,0 +1,10 @@
+#! /bin/sh
+
+code=`mysql -uroot -e "show status" | awk '/Threads_running/{print $2}'`
+
+if [ "$code"=="1" ]
+then
+ echo "HTTP/1.1 200 OK\r\n"
+else
+ echo "HTTP/1.1 503 Service Unavailable\r\n"
+fi
diff --git a/compass/deploy/ansible/roles/ha/files/mysqlchk b/compass/deploy/ansible/roles/ha/files/mysqlchk
new file mode 100644
index 0000000..2c03f19
--- /dev/null
+++ b/compass/deploy/ansible/roles/ha/files/mysqlchk
@@ -0,0 +1,15 @@
+# default: off
+# description: An xinetd internal service which echo's characters back to
+# clients.
+# This is the tcp version.
+service mysqlchk
+{
+ disable = no
+ flags = REUSE
+ socket_type = stream
+ protocol = tcp
+ user = root
+ wait = no
+ server = /usr/local/bin/galera_chk
+ port = 9200
+}
diff --git a/compass/deploy/ansible/roles/ha/files/notify.sh b/compass/deploy/ansible/roles/ha/files/notify.sh
new file mode 100644
index 0000000..5edffe8
--- /dev/null
+++ b/compass/deploy/ansible/roles/ha/files/notify.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+python /usr/local/bin/failover.py $1
+mysql -uroot -e"flush hosts"
+service mysql restart
diff --git a/compass/deploy/ansible/roles/ha/handlers/main.yml b/compass/deploy/ansible/roles/ha/handlers/main.yml
new file mode 100644
index 0000000..a02c686
--- /dev/null
+++ b/compass/deploy/ansible/roles/ha/handlers/main.yml
@@ -0,0 +1,9 @@
+---
+- name: restart haproxy
+ service: name=haproxy state=restarted enabled=yes
+
+- name: restart xinetd
+ service: name=xinetd state=restarted enabled=yes
+
+- name: restart keepalived
+ service: name=keepalived state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/ha/tasks/main.yml b/compass/deploy/ansible/roles/ha/tasks/main.yml
new file mode 100644
index 0000000..a00c21a
--- /dev/null
+++ b/compass/deploy/ansible/roles/ha/tasks/main.yml
@@ -0,0 +1,94 @@
+---
+- name: install keepalived xinet haproxy
+ apt: name={{ item }} state=present
+ with_items:
+ - keepalived
+ - xinetd
+ - haproxy
+
+- name: generate ha service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - keepalived
+ - xinetd
+ - haproxy
+
+- name: install pexpect
+ pip: name=pexpect state=present
+
+- name: activate ip_nonlocal_bind
+ sysctl: name=net.ipv4.ip_nonlocal_bind value=1
+ state=present reload=yes
+
+- name: set net.ipv4.tcp_keepalive_intvl
+ sysctl: name=net.ipv4.tcp_keepalive_intvl value=1
+ state=present reload=yes
+
+- name: set net.ipv4.tcp_keepalive_probes
+ sysctl: name=net.ipv4.tcp_keepalive_probes value=5
+ state=present reload=yes
+
+- name: set net.ipv4.tcp_keepalive_time
+ sysctl: name=net.ipv4.tcp_keepalive_time value=5
+ state=present reload=yes
+
+- name: update haproxy cfg
+ template: src=haproxy.cfg dest=/etc/haproxy/haproxy.cfg
+ notify: restart haproxy
+
+- name: set haproxy enable flag
+ lineinfile: dest=/etc/default/haproxy state=present
+ regexp="ENABLED=*"
+ line="ENABLED=1"
+ notify: restart haproxy
+
+- name: set haproxy log
+ lineinfile: dest=/etc/rsyslog.conf state=present
+ regexp="local0.* /var/log/haproxy.log"
+ line="local0.* /var/log/haproxy.log"
+
+- name: set rsyslog udp module
+ lineinfile: dest=/etc/rsyslog.conf state=present
+ regexp="^#$ModLoad imudp"
+ line="$ModLoad imudp"
+
+- name: set rsyslog udp port
+ lineinfile: dest=/etc/rsyslog.conf state=present
+ regexp="^#$UDPServerRun 514"
+ line="$UDPServerRun 514"
+
+- name: copy galera_chk file
+ copy: src=galera_chk dest=/usr/local/bin/galera_chk mode=0777
+
+- name: copy notify file
+ copy: src=notify.sh dest=/usr/local/bin/notify.sh mode=0777
+
+- name: copy notify template file
+ template: src=failover.j2 dest=/usr/local/bin/failover.py mode=0777
+
+- name: add network service
+ lineinfile: dest=/etc/services state=present
+ line="mysqlchk 9200/tcp"
+ insertafter="Local services"
+ notify: restart xinetd
+
+- name: copy mysqlchk file
+ copy: src=mysqlchk dest=/etc/xinetd.d/mysqlchk mode=0777
+ notify: restart xinetd
+
+- name: set keepalived start param
+ lineinfile: dest=/etc/default/keepalived state=present
+ regexp="^DAEMON_ARGS=*"
+ line="DAEMON_ARGS=\"-D -d -S 1\""
+
+- name: set keepalived log
+ lineinfile: dest=/etc/rsyslog.conf state=present
+ regexp="local1.* /var/log/keepalived.log"
+ line="local1.* /var/log/keepalived.log"
+
+- name: update keepalived info
+ template: src=keepalived.conf dest=/etc/keepalived/keepalived.conf
+ notify: restart keepalived
+
+- name: restart rsyslog
+ shell: service rsyslog restart
diff --git a/compass/deploy/ansible/roles/ha/templates/failover.j2 b/compass/deploy/ansible/roles/ha/templates/failover.j2
new file mode 100644
index 0000000..b03c737
--- /dev/null
+++ b/compass/deploy/ansible/roles/ha/templates/failover.j2
@@ -0,0 +1,65 @@
+import ConfigParser, os, socket
+import logging as LOG
+import pxssh
+import sys
+import re
+
+LOG_FILE="/var/log/mysql_failover"
+try:
+ os.remove(LOG_FILE)
+except:
+ pass
+
+LOG.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename=LOG_FILE,level=LOG.DEBUG)
+ha_vip = {{ HA_VIP }}
+LOG.info("ha_vip: %s" % ha_vip)
+
+#ha_vip = "10.1.0.50"
+galera_path = '/etc/mysql/conf.d/wsrep.cnf'
+pattern = re.compile(r"gcomm://(?P<prev_ip>.*)")
+
+def ssh_get_hostname(ip):
+ try:
+ s = pxssh.pxssh()
+ s.login("%s" % ip, "root", "root")
+ s.sendline('hostname') # run a command
+ s.prompt() # match the prompt
+ result = s.before.strip() # print everything before the prompt.
+ return result.split(os.linesep)[1]
+ except pxssh.ExceptionPxssh as e:
+ LOG.error("pxssh failed on login.")
+ raise
+
+def failover(mode):
+ config = ConfigParser.ConfigParser()
+ config.optionxform = str
+ config.readfp(open(galera_path))
+ wsrep_cluster_address = config.get("mysqld", "wsrep_cluster_address")
+ wsrep_cluster_address = pattern.match(wsrep_cluster_address).groupdict()["prev_ip"]
+
+ LOG.info("old wsrep_cluster_address = %s" % wsrep_cluster_address)
+
+ if mode == "master":
+ # refresh wsrep_cluster_address to null
+ LOG.info("I'm being master, set wsrep_cluster_address to null")
+ wsrep_cluster_address = ""
+
+ elif mode == "backup":
+ # refresh wsrep_cluster_address to master int ip
+ hostname = ssh_get_hostname(ha_vip)
+ wsrep_cluster_address = socket.gethostbyname(hostname)
+ LOG.info("I'm being slave, set wsrep_cluster_address to master internal ip")
+
+ LOG.info("new wsrep_cluster_address = %s" % wsrep_cluster_address)
+ wsrep_cluster_address = "gcomm://%s" % wsrep_cluster_address
+ config.set("mysqld", "wsrep_cluster_address", wsrep_cluster_address)
+ with open(galera_path, 'wb') as fp:
+ #config.write(sys.stdout)
+ config.write(fp)
+
+ os.system("service mysql restart")
+ LOG.info("failover success!!!")
+
+if __name__ == "__main__":
+ LOG.debug("call me: %s" % sys.argv)
+ failover(sys.argv[1])
diff --git a/compass/deploy/ansible/roles/ha/templates/haproxy.cfg b/compass/deploy/ansible/roles/ha/templates/haproxy.cfg
new file mode 100644
index 0000000..4ed528a
--- /dev/null
+++ b/compass/deploy/ansible/roles/ha/templates/haproxy.cfg
@@ -0,0 +1,133 @@
+
+global
+ #chroot /var/run/haproxy
+ daemon
+ user haproxy
+ group haproxy
+ maxconn 4000
+ pidfile /var/run/haproxy/haproxy.pid
+ #log 127.0.0.1 local0
+ tune.bufsize 1000000
+ stats socket /var/run/haproxy.sock
+ stats timeout 2m
+
+defaults
+ log global
+ maxconn 8000
+ option redispatch
+ option dontlognull
+ option splice-auto
+ timeout http-request 10s
+ timeout queue 1m
+ timeout connect 10s
+ timeout client 6m
+ timeout server 6m
+ timeout check 10s
+ retries 5
+
+listen proxy-glance_registry_cluster
+ bind {{ HA_VIP }}:9191
+ option tcpka
+ option tcplog
+ balance source
+{% for host in groups['controller'] %}
+ server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:9191 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-glance_api_cluster
+ bind {{ HA_VIP }}:9292
+ option tcpka
+ option httpchk
+ option tcplog
+ balance source
+{% for host in groups['controller'] %}
+ server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:9292 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-nova-novncproxy
+ bind {{ HA_VIP }}:6080
+ option tcpka
+ option tcplog
+ balance source
+{% for host in groups['controller'] %}
+ server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:6080 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-network
+ bind {{ HA_VIP }}:9696
+ option tcpka
+ option tcplog
+ balance source
+{% for host in groups['controller'] %}
+ server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:9696 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-volume
+ bind {{ HA_VIP }}:8776
+ option tcpka
+ option httpchk
+ option tcplog
+ balance source
+{% for host in groups['controller'] %}
+ server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8776 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-keystone_admin_cluster
+ bind {{ HA_VIP }}:35357
+ option tcpka
+ option httpchk
+ option tcplog
+ balance source
+{% for host in groups['controller'] %}
+ server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:35357 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-keystone_public_internal_cluster
+ bind {{ HA_VIP }}:5000
+ option tcpka
+ option httpchk
+ option tcplog
+ balance source
+{% for host in groups['controller'] %}
+ server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:5000 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-nova_compute_api_cluster
+ bind {{ HA_VIP }}:8774
+ mode tcp
+ option httpchk
+ option tcplog
+ balance source
+{% for host in groups['controller'] %}
+ server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8774 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-nova_metadata_api_cluster
+ bind {{ HA_VIP }}:8775
+ option tcpka
+ option tcplog
+ balance source
+{% for host in groups['controller'] %}
+ server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8775 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen proxy-cinder_api_cluster
+ bind {{ HA_VIP }}:8776
+ mode tcp
+ option httpchk
+ option tcplog
+ balance source
+{% for host in groups['controller'] %}
+ server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8776 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
+listen stats
+ mode http
+ bind 0.0.0.0:8888
+ stats enable
+ stats refresh 30s
+ stats uri /
+ stats realm Global\ statistics
+ stats auth admin:admin
+
+
diff --git a/compass/deploy/ansible/roles/ha/templates/keepalived.conf b/compass/deploy/ansible/roles/ha/templates/keepalived.conf
new file mode 100644
index 0000000..0b49137
--- /dev/null
+++ b/compass/deploy/ansible/roles/ha/templates/keepalived.conf
@@ -0,0 +1,42 @@
+global_defs {
+
+ notification_email{
+ root@huawei.com
+ }
+
+ notification_email_from keepalived@huawei.com
+
+ smtp_server localhost
+
+ smtp_connect_timeout 30
+
+ router_id NodeA
+
+}
+
+vrrp_instance VI_1 {
+
+ interface {{ INTERNAL_INTERFACE }}
+ virtual_router_id 51
+ state BACKUP
+ nopreempt
+ advert_int 1
+{% for host in groups['controller'] %}
+{% if host == inventory_hostname %}
+ priority {{ 100 - loop.index0 * 5 }}
+{% endif %}
+{% endfor %}
+
+ authentication {
+ auth_type PASS
+ auth_pass 1111
+ }
+
+ virtual_ipaddress {
+ {{ HA_VIP }} dev {{ INTERNAL_INTERFACE }}
+ }
+
+ notify_master "/usr/local/bin/notify.sh master"
+ notify_backup "/usr/local/bin/notify.sh backup"
+}
+
diff --git a/compass/deploy/ansible/roles/keystone/tasks/keystone_config.yml b/compass/deploy/ansible/roles/keystone/tasks/keystone_config.yml
new file mode 100644
index 0000000..3203b26
--- /dev/null
+++ b/compass/deploy/ansible/roles/keystone/tasks/keystone_config.yml
@@ -0,0 +1,16 @@
+---
+- name: keystone-manage db-sync
+ shell: su -s /bin/sh -c "keystone-manage db_sync"
+ register: result
+ until: result.rc == 0
+ retries: 5
+ delay: 3
+
+- name: place keystone init script under /opt/
+ template: src=keystone_init dest=/opt/keystone_init mode=0744
+
+- name: run keystone_init
+ shell: /opt/keystone_init && touch keystone_init_complete || keystone_init_failed
+ args:
+ creates: keystone_init_complete
+
diff --git a/compass/deploy/ansible/roles/keystone/tasks/keystone_install.yml b/compass/deploy/ansible/roles/keystone/tasks/keystone_install.yml
new file mode 100644
index 0000000..e69c069
--- /dev/null
+++ b/compass/deploy/ansible/roles/keystone/tasks/keystone_install.yml
@@ -0,0 +1,29 @@
+---
+- name: install keystone packages
+ apt: name=keystone state=present force=yes
+
+- name: generate keystone service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - keystone
+
+- name: update keystone conf
+ template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes
+
+- name: delete sqlite database
+ shell: rm /var/lib/keystone/keystone.db || echo sqllite database already removed
+
+- name: cron job to purge expired tokens hourly
+ shell: (crontab -l -u keystone 2>&1 | grep -q token_flush) || echo '@hourly /usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1' >> /var/spool/cron/crontabs/keystone
+
+- name: modify keystone cron rights
+ file: path=/var/spool/cron/crontabs/keystone mode=0600
+
+- name: keystone source files
+ template: src={{ item }} dest=/opt/{{ item }}
+ with_items:
+ - admin-openrc.sh
+ - demo-openrc.sh
+
+- name: manually start keystone
+ service: name=keystone state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/keystone/tasks/main.yml b/compass/deploy/ansible/roles/keystone/tasks/main.yml
new file mode 100644
index 0000000..2f36e91
--- /dev/null
+++ b/compass/deploy/ansible/roles/keystone/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+- include: keystone_install.yml
+ tags:
+ - install
+ - keystone_install
+ - keystone
+
+- include: keystone_config.yml
+ when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == ''
+ tags:
+ - config
+ - keystone_config
+ - keystone
diff --git a/compass/deploy/ansible/roles/keystone/templates/admin-openrc.sh b/compass/deploy/ansible/roles/keystone/templates/admin-openrc.sh
new file mode 100644
index 0000000..f2e0d61
--- /dev/null
+++ b/compass/deploy/ansible/roles/keystone/templates/admin-openrc.sh
@@ -0,0 +1,6 @@
+# Verify the Identity Service installation
+export OS_PASSWORD={{ ADMIN_PASS }}
+export OS_TENANT_NAME=admin
+export OS_AUTH_URL=http://{{ HA_VIP }}:35357/v2.0
+export OS_USERNAME=ADMIN
+
diff --git a/compass/deploy/ansible/roles/keystone/templates/demo-openrc.sh b/compass/deploy/ansible/roles/keystone/templates/demo-openrc.sh
new file mode 100644
index 0000000..8bdc51b
--- /dev/null
+++ b/compass/deploy/ansible/roles/keystone/templates/demo-openrc.sh
@@ -0,0 +1,5 @@
+export OS_USERNAME=demo
+export OS_PASSWORD={{ DEMO_PASS }}
+export OS_TENANT_NAME=demo
+export OS_AUTH_URL=http://{{ HA_VIP }}:35357/v2.0
+
diff --git a/compass/deploy/ansible/roles/keystone/templates/keystone.conf b/compass/deploy/ansible/roles/keystone/templates/keystone.conf
new file mode 100644
index 0000000..fc8bf1f
--- /dev/null
+++ b/compass/deploy/ansible/roles/keystone/templates/keystone.conf
@@ -0,0 +1,1317 @@
+[DEFAULT]
+
+admin_token={{ ADMIN_TOKEN }}
+
+public_bind_host= {{ identity_host }}
+
+admin_bind_host= {{ identity_host }}
+
+#compute_port=8774
+
+#admin_port=35357
+
+#public_port=5000
+
+# The base public endpoint URL for keystone that are
+# advertised to clients (NOTE: this does NOT affect how
+# keystone listens for connections) (string value).
+# Defaults to the base host URL of the request. Eg a
+# request to http://server:5000/v2.0/users will
+# default to http://server:5000. You should only need
+# to set this value if the base URL contains a path
+# (eg /prefix/v2.0) or the endpoint should be found on
+# a different server.
+#public_endpoint=http://localhost:%(public_port)s/
+
+# The base admin endpoint URL for keystone that are advertised
+# to clients (NOTE: this does NOT affect how keystone listens
+# for connections) (string value).
+# Defaults to the base host URL of the request. Eg a
+# request to http://server:35357/v2.0/users will
+# default to http://server:35357. You should only need
+# to set this value if the base URL contains a path
+# (eg /prefix/v2.0) or the endpoint should be found on
+# a different server.
+#admin_endpoint=http://localhost:%(admin_port)s/
+
+# onready allows you to send a notification when the process
+# is ready to serve For example, to have it notify using
+# systemd, one could set shell command: "onready = systemd-
+# notify --ready" or a module with notify() method: "onready =
+# keystone.common.systemd". (string value)
+#onready=<None>
+
+# enforced by optional sizelimit middleware
+# (keystone.middleware:RequestBodySizeLimiter). (integer
+# value)
+#max_request_body_size=114688
+
+# limit the sizes of user & tenant ID/names. (integer value)
+#max_param_size=64
+
+# similar to max_param_size, but provides an exception for
+# token values. (integer value)
+#max_token_size=8192
+
+# During a SQL upgrade member_role_id will be used to create a
+# new role that will replace records in the
+# user_tenant_membership table with explicit role grants.
+# After migration, the member_role_id will be used in the API
+# add_user_to_project. (string value)
+#member_role_id=9fe2ff9ee4384b1894a90878d3e92bab
+
+# During a SQL upgrade member_role_id will be used to create a
+# new role that will replace records in the
+# user_tenant_membership table with explicit role grants.
+# After migration, member_role_name will be ignored. (string
+# value)
+#member_role_name=_member_
+
+# The value passed as the keyword "rounds" to passlib encrypt
+# method. (integer value)
+#crypt_strength=40000
+
+# Set this to True if you want to enable TCP_KEEPALIVE on
+# server sockets i.e. sockets used by the keystone wsgi server
+# for client connections. (boolean value)
+#tcp_keepalive=false
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server
+# socket. Only applies if tcp_keepalive is True. Not supported
+# on OS X. (integer value)
+#tcp_keepidle=600
+
+# The maximum number of entities that will be returned in a
+# collection can be set with list_limit, with no limit set by
+# default. This global limit may be then overridden for a
+# specific driver, by specifying a list_limit in the
+# appropriate section (e.g. [assignment]). (integer value)
+#list_limit=<None>
+
+# Set this to false if you want to enable the ability for
+# user, group and project entities to be moved between domains
+# by updating their domain_id. Allowing such movement is not
+# recommended if the scope of a domain admin is being
+# restricted by use of an appropriate policy file (see
+# policy.v3cloudsample as an example). (boolean value)
+#domain_id_immutable=true
+
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in amqp. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in amqp. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size=30
+
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call. (list value)
+#allowed_rpc_exception_modules=oslo.messaging.exceptions,nova.exception,cinder.exception,exceptions
+# Qpid broker hostname. (string value)
+#qpid_hostname=localhost
+
+# Qpid broker port. (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs. (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+#qpid_username=
+
+# Password for Qpid connection. (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth.
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats. (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+#qpid_protocol=tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+#qpid_tcp_nodelay=true
+
+# The qpid topology version to use. Version 1 is what was
+# originally used by impl_qpid. Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+# SSL version to use (valid only if SSL enabled). valid values
+# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
+# distributions. (string value)
+#kombu_ssl_version=
+
+# SSL key file (valid only if SSL enabled). (string value)
+#kombu_ssl_keyfile=
+
+# SSL cert file (valid only if SSL enabled). (string value)
+#kombu_ssl_certfile=
+
+# SSL certification authority file (valid only if SSL
+# enabled). (string value)
+#kombu_ssl_ca_certs=
+
+# How long to wait before reconnecting in response to an AMQP
+# consumer cancel notification. (floating point value)
+#kombu_reconnect_delay=1.0
+
+# The RabbitMQ broker address where a single node is used.
+# (string value)
+#rabbit_host=localhost
+
+# The RabbitMQ broker port where a single node is used.
+# (integer value)
+#rabbit_port=5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+#rabbit_use_ssl=false
+
+# The RabbitMQ userid. (string value)
+rabbit_userid={{ RABBIT_USER }}
+
+# The RabbitMQ password. (string value)
+rabbit_password={{ RABBIT_PASS }}
+
+# the RabbitMQ login method (string value)
+#rabbit_login_method=AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+#rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ. (integer
+# value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to
+# RabbitMQ. (integer value)
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
+
+# If passed, use a fake RabbitMQ provider. (boolean value)
+#fake_rabbit=false
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=keystone
+
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
+
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+# Host to locate redis. (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server (optional). (string value)
+#password=<None>
+
+# Size of RPC greenthread pool. (integer value)
+#rpc_thread_pool_size=64
+
+# Driver or drivers to handle sending notifications. (multi
+# valued)
+#notification_driver=
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+#notification_topics=notifications
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout=60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend
+# option and driver specific configuration. (string value)
+#transport_url=<None>
+
+# The messaging driver to use, defaults to rabbit. Other
+# drivers include qpid and zmq. (string value)
+#rpc_backend=rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the
+# transport_url option. (string value)
+#control_exchange=openstack
+
+
+#
+# Options defined in keystone.notifications
+#
+
+# Default publisher_id for outgoing notifications (string
+# value)
+#default_publisher_id=<None>
+
+
+#
+# Options defined in keystone.middleware.ec2_token
+#
+
+# URL to get token from ec2 request. (string value)
+#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens
+
+# Required if EC2 server requires client certificate. (string
+# value)
+#keystone_ec2_keyfile=<None>
+
+# Client certificate key filename. Required if EC2 server
+# requires client certificate. (string value)
+#keystone_ec2_certfile=<None>
+
+# A PEM encoded certificate authority to use when verifying
+# HTTPS connections. Defaults to the system CAs. (string
+# value)
+#keystone_ec2_cafile=<None>
+
+# Disable SSL certificate verification. (boolean value)
+#keystone_ec2_insecure=false
+
+
+#
+# Options defined in keystone.openstack.common.eventlet_backdoor
+#
+
+# Enable eventlet backdoor. Acceptable values are 0, <port>,
+# and <start>:<end>, where 0 results in listening on a random
+# tcp port number; <port> results in listening on the
+# specified port number (and not enabling backdoor if that
+# port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range
+# of port numbers. The chosen port is displayed in the
+# service's log file. (string value)
+#backdoor_port=<None>
+
+
+#
+# Options defined in keystone.openstack.common.lockutils
+#
+
+# Whether to disable inter-process locks (boolean value)
+#disable_process_locking=false
+
+# Directory to use for lock files. (string value)
+#lock_path=<None>
+
+
+#
+# Options defined in keystone.openstack.common.log
+#
+
+# Print debugging output (set logging level to DEBUG instead
+# of default WARNING level). (boolean value)
+debug={{ DEBUG }}
+
+# Print more verbose output (set logging level to INFO instead
+# of default WARNING level). (boolean value)
+verbose={{ VERBOSE }}
+
+# Log output to standard error (boolean value)
+#use_stderr=true
+
+# Format string to use for log messages with context (string
+# value)
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages without context
+# (string value)
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG (string
+# value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format
+# (string value)
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs (list value)
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN
+
+# Publish error events (boolean value)
+#publish_errors=false
+
+# Make deprecations fatal (boolean value)
+#fatal_deprecations=false
+
+# If an instance is passed with the log message, format it
+# like this (string value)
+#instance_format="[instance: %(uuid)s] "
+
+# If an instance UUID is passed with the log message, format
+# it like this (string value)
+#instance_uuid_format="[instance: %(uuid)s] "
+
+# The name of logging configuration file. It does not disable
+# existing loggers, but just appends specified logging
+# configuration to any other existing logging options. Please
+# see the Python logging module documentation for details on
+# logging configuration files. (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append=<None>
+
+# DEPRECATED. A logging.Formatter log message format string
+# which may use any of the available logging.LogRecord
+# attributes. This option is deprecated. Please use
+# logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format=<None>
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is
+# set, logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file=<None>
+
+# (Optional) The base directory used for relative --log-file
+# paths (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+log_dir = /var/log/keystone
+
+# Use syslog for logging. Existing syslog format is DEPRECATED
+# during I, and then will be changed in J to honor RFC5424
+# (boolean value)
+#use_syslog=false
+
+# (Optional) Use syslog rfc5424 format for logging. If
+# enabled, will add APP-NAME (RFC5424) before the MSG part of
+# the syslog message. The old format without APP-NAME is
+# deprecated in I, and will be removed in J. (boolean value)
+#use_syslog_rfc_format=false
+
+# Syslog facility to receive log lines (string value)
+#syslog_log_facility=LOG_USER
+
+
+#
+# Options defined in keystone.openstack.common.policy
+#
+
+# JSON file containing policy (string value)
+#policy_file=policy.json
+
+# Rule enforced when requested rule is not found (string
+# value)
+#policy_default_rule=default
+
+
+[assignment]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Assignment backend driver. (string value)
+#driver=<None>
+
+# Toggle for assignment caching. This has no effect unless
+# global caching is enabled. (boolean value)
+#caching=true
+
+# TTL (in seconds) to cache assignment data. This has no
+# effect unless global caching is enabled. (integer value)
+#cache_time=<None>
+
+# Maximum number of entities that will be returned in an
+# assignment collection. (integer value)
+#list_limit=<None>
+
+
+[auth]
+
+#
+# Options defined in keystone
+#
+
+# Default auth methods. (list value)
+#methods=external,password,token
+
+# The password auth plugin module. (string value)
+#password=keystone.auth.plugins.password.Password
+
+# The token auth plugin module. (string value)
+#token=keystone.auth.plugins.token.Token
+
+# The external (REMOTE_USER) auth plugin module. (string
+# value)
+#external=keystone.auth.plugins.external.DefaultDomain
+
+
+[cache]
+
+#
+# Options defined in keystone
+#
+
+# Prefix for building the configuration dictionary for the
+# cache region. This should not need to be changed unless
+# there is another dogpile.cache region with the same
+# configuration name. (string value)
+#config_prefix=cache.keystone
+
+# Default TTL, in seconds, for any cached item in the
+# dogpile.cache region. This applies to any cached method that
+# doesn't have an explicit cache expiration time defined for
+# it. (integer value)
+#expiration_time=600
+
+# Dogpile.cache backend module. It is recommended that
+# Memcache (dogpile.cache.memcache) or Redis
+# (dogpile.cache.redis) be used in production deployments.
+# Small workloads (single process) like devstack can use the
+# dogpile.cache.memory backend. (string value)
+#backend=keystone.common.cache.noop
+
+# Use a key-mangling function (sha1) to ensure fixed length
+# cache-keys. This is toggle-able for debugging purposes, it
+# is highly recommended to always leave this set to True.
+# (boolean value)
+#use_key_mangler=true
+
+# Arguments supplied to the backend module. Specify this
+# option once per argument to be passed to the dogpile.cache
+# backend. Example format: "<argname>:<value>". (multi valued)
+#backend_argument=
+
+# Proxy Classes to import that will affect the way the
+# dogpile.cache backend functions. See the dogpile.cache
+# documentation on changing-backend-behavior. Comma delimited
+# list e.g. my.dogpile.proxy.Class, my.dogpile.proxyClass2.
+# (list value)
+#proxies=
+
+# Global toggle for all caching using the should_cache_fn
+# mechanism. (boolean value)
+#enabled=false
+
+# Extra debugging from the cache backend (cache keys,
+# get/set/delete/etc calls) This is only really useful if you
+# need to see the specific cache-backend get/set/delete calls
+# with the keys/values. Typically this should be left set to
+# False. (boolean value)
+#debug_cache_backend=false
+
+
+[catalog]
+
+#
+# Options defined in keystone
+#
+
+# Catalog template file name for use with the template catalog
+# backend. (string value)
+#template_file=default_catalog.templates
+
+# Keystone catalog backend driver. (string value)
+#driver=keystone.catalog.backends.sql.Catalog
+
+# Maximum number of entities that will be returned in a
+# catalog collection. (integer value)
+#list_limit=<None>
+
+
+[credential]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Credential backend driver. (string value)
+#driver=keystone.credential.backends.sql.Credential
+
+
+[database]
+
+#
+# Options defined in keystone.openstack.common.db.options
+#
+
+# The file name to use with SQLite (string value)
+#sqlite_db=keystone.sqlite
+
+# If True, SQLite uses synchronous mode (boolean value)
+#sqlite_synchronous=true
+
+# The backend to use for db (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend=sqlalchemy
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection=<None>
+connection = mysql://keystone:{{ KEYSTONE_DBPASS }}@{{ db_host }}/keystone
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode=TRADITIONAL
+
+# Timeout before idle sql connections are reaped (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size=<None>
+
+# Maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries=10
+
+# Interval between retries of opening a sql connection
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval=10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow=<None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug=0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace=false
+
+# If set, use this value for pool_timeout with sqlalchemy
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout=<None>
+
+# Enable the experimental use of database reconnect on
+# connection lost (boolean value)
+#use_db_reconnect=false
+
+# seconds between db connection retries (integer value)
+#db_retry_interval=1
+
+# Whether to increase interval between db connection retries,
+# up to db_max_retry_interval (boolean value)
+#db_inc_retry_interval=true
+
+# max seconds between db connection retries, if
+# db_inc_retry_interval is enabled (integer value)
+#db_max_retry_interval=10
+
+# maximum db connection retries before error is raised.
+# (setting -1 implies an infinite retry count) (integer value)
+#db_max_retries=20
+
+
+[ec2]
+
+#
+# Options defined in keystone
+#
+
+# Keystone EC2Credential backend driver. (string value)
+#driver=keystone.contrib.ec2.backends.kvs.Ec2
+
+
+[endpoint_filter]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Endpoint Filter backend driver (string value)
+#driver=keystone.contrib.endpoint_filter.backends.sql.EndpointFilter
+
+# Toggle to return all active endpoints if no filter exists.
+# (boolean value)
+#return_all_endpoints_if_no_filter=true
+
+
+[federation]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Federation backend driver. (string value)
+#driver=keystone.contrib.federation.backends.sql.Federation
+
+# Value to be used when filtering assertion parameters from
+# the environment. (string value)
+#assertion_prefix=
+
+
+[identity]
+
+#
+# Options defined in keystone
+#
+
+# This references the domain to use for all Identity API v2
+# requests (which are not aware of domains). A domain with
+# this ID will be created for you by keystone-manage db_sync
+# in migration 008. The domain referenced by this ID cannot
+# be deleted on the v3 API, to prevent accidentally breaking
+# the v2 API. There is nothing special about this domain,
+# other than the fact that it must exist to order to maintain
+# support for your v2 clients. (string value)
+#default_domain_id=default
+
+# A subset (or all) of domains can have their own identity
+# driver, each with their own partial configuration file in a
+# domain configuration directory. Only values specific to the
+# domain need to be placed in the domain specific
+# configuration file. This feature is disabled by default; set
+# to True to enable. (boolean value)
+#domain_specific_drivers_enabled=false
+
+# Path for Keystone to locate the domain specificidentity
+# configuration files if domain_specific_drivers_enabled is
+# set to true. (string value)
+#domain_config_dir=/etc/keystone/domains
+
+# Keystone Identity backend driver. (string value)
+#driver=keystone.identity.backends.sql.Identity
+
+# Maximum supported length for user passwords; decrease to
+# improve performance. (integer value)
+#max_password_length=4096
+
+# Maximum number of entities that will be returned in an
+# identity collection. (integer value)
+#list_limit=<None>
+
+
+[kvs]
+
+#
+# Options defined in keystone
+#
+
+# Extra dogpile.cache backend modules to register with the
+# dogpile.cache library. (list value)
+#backends=
+
+# Prefix for building the configuration dictionary for the KVS
+# region. This should not need to be changed unless there is
+# another dogpile.cache region with the same configuration
+# name. (string value)
+#config_prefix=keystone.kvs
+
+# Toggle to disable using a key-mangling function to ensure
+# fixed length keys. This is toggle-able for debugging
+# purposes, it is highly recommended to always leave this set
+# to True. (boolean value)
+#enable_key_mangler=true
+
+# Default lock timeout for distributed locking. (integer
+# value)
+#default_lock_timeout=5
+
+
+[ldap]
+
+#
+# Options defined in keystone
+#
+
+# URL for connecting to the LDAP server. (string value)
+#url=ldap://localhost
+
+# User BindDN to query the LDAP server. (string value)
+#user=<None>
+
+# Password for the BindDN to query the LDAP server. (string
+# value)
+#password=<None>
+
+# LDAP server suffix (string value)
+#suffix=cn=example,cn=com
+
+# If true, will add a dummy member to groups. This is required
+# if the objectclass for groups requires the "member"
+# attribute. (boolean value)
+#use_dumb_member=false
+
+# DN of the "dummy member" to use when "use_dumb_member" is
+# enabled. (string value)
+#dumb_member=cn=dumb,dc=nonexistent
+
+# allow deleting subtrees. (boolean value)
+#allow_subtree_delete=false
+
+# The LDAP scope for queries, this can be either "one"
+# (onelevel/singleLevel) or "sub" (subtree/wholeSubtree).
+# (string value)
+#query_scope=one
+
+# Maximum results per page; a value of zero ("0") disables
+# paging. (integer value)
+#page_size=0
+
+# The LDAP dereferencing option for queries. This can be
+# either "never", "searching", "always", "finding" or
+# "default". The "default" option falls back to using default
+# dereferencing configured by your ldap.conf. (string value)
+#alias_dereferencing=default
+
+# Override the system's default referral chasing behavior for
+# queries. (boolean value)
+#chase_referrals=<None>
+
+# Search base for users. (string value)
+#user_tree_dn=<None>
+
+# LDAP search filter for users. (string value)
+#user_filter=<None>
+
+# LDAP objectClass for users. (string value)
+#user_objectclass=inetOrgPerson
+
+# LDAP attribute mapped to user id. (string value)
+#user_id_attribute=cn
+
+# LDAP attribute mapped to user name. (string value)
+#user_name_attribute=sn
+
+# LDAP attribute mapped to user email. (string value)
+#user_mail_attribute=email
+
+# LDAP attribute mapped to password. (string value)
+#user_pass_attribute=userPassword
+
+# LDAP attribute mapped to user enabled flag. (string value)
+#user_enabled_attribute=enabled
+
+# Bitmask integer to indicate the bit that the enabled value
+# is stored in if the LDAP server represents "enabled" as a
+# bit on an integer rather than a boolean. A value of "0"
+# indicates the mask is not used. If this is not set to "0"
+# the typical value is "2". This is typically used when
+# "user_enabled_attribute = userAccountControl". (integer
+# value)
+#user_enabled_mask=0
+
+# Default value to enable users. This should match an
+# appropriate int value if the LDAP server uses non-boolean
+# (bitmask) values to indicate if a user is enabled or
+# disabled. If this is not set to "True"the typical value is
+# "512". This is typically used when "user_enabled_attribute =
+# userAccountControl". (string value)
+#user_enabled_default=True
+
+# List of attributes stripped off the user on update. (list
+# value)
+#user_attribute_ignore=default_project_id,tenants
+
+# LDAP attribute mapped to default_project_id for users.
+# (string value)
+#user_default_project_id_attribute=<None>
+
+# Allow user creation in LDAP backend. (boolean value)
+#user_allow_create=true
+
+# Allow user updates in LDAP backend. (boolean value)
+#user_allow_update=true
+
+# Allow user deletion in LDAP backend. (boolean value)
+#user_allow_delete=true
+
+# If True, Keystone uses an alternative method to determine if
+# a user is enabled or not by checking if they are a member of
+# the "user_enabled_emulation_dn" group. (boolean value)
+#user_enabled_emulation=false
+
+# DN of the group entry to hold enabled users when using
+# enabled emulation. (string value)
+#user_enabled_emulation_dn=<None>
+
+# List of additional LDAP attributes used for mapping
+# Additional attribute mappings for users. Attribute mapping
+# format is <ldap_attr>:<user_attr>, where ldap_attr is the
+# attribute in the LDAP entry and user_attr is the Identity
+# API attribute. (list value)
+#user_additional_attribute_mapping=
+
+# Search base for projects (string value)
+#tenant_tree_dn=<None>
+
+# LDAP search filter for projects. (string value)
+#tenant_filter=<None>
+
+# LDAP objectClass for projects. (string value)
+#tenant_objectclass=groupOfNames
+
+# LDAP attribute mapped to project id. (string value)
+#tenant_id_attribute=cn
+
+# LDAP attribute mapped to project membership for user.
+# (string value)
+#tenant_member_attribute=member
+
+# LDAP attribute mapped to project name. (string value)
+#tenant_name_attribute=ou
+
+# LDAP attribute mapped to project description. (string value)
+#tenant_desc_attribute=description
+
+# LDAP attribute mapped to project enabled. (string value)
+#tenant_enabled_attribute=enabled
+
+# LDAP attribute mapped to project domain_id. (string value)
+#tenant_domain_id_attribute=businessCategory
+
+# List of attributes stripped off the project on update. (list
+# value)
+#tenant_attribute_ignore=
+
+# Allow tenant creation in LDAP backend. (boolean value)
+#tenant_allow_create=true
+
+# Allow tenant update in LDAP backend. (boolean value)
+#tenant_allow_update=true
+
+# Allow tenant deletion in LDAP backend. (boolean value)
+#tenant_allow_delete=true
+
+# If True, Keystone uses an alternative method to determine if
+# a project is enabled or not by checking if they are a member
+# of the "tenant_enabled_emulation_dn" group. (boolean value)
+#tenant_enabled_emulation=false
+
+# DN of the group entry to hold enabled projects when using
+# enabled emulation. (string value)
+#tenant_enabled_emulation_dn=<None>
+
+# Additional attribute mappings for projects. Attribute
+# mapping format is <ldap_attr>:<user_attr>, where ldap_attr
+# is the attribute in the LDAP entry and user_attr is the
+# Identity API attribute. (list value)
+#tenant_additional_attribute_mapping=
+
+# Search base for roles. (string value)
+#role_tree_dn=<None>
+
+# LDAP search filter for roles. (string value)
+#role_filter=<None>
+
+# LDAP objectClass for roles. (string value)
+#role_objectclass=organizationalRole
+
+# LDAP attribute mapped to role id. (string value)
+#role_id_attribute=cn
+
+# LDAP attribute mapped to role name. (string value)
+#role_name_attribute=ou
+
+# LDAP attribute mapped to role membership. (string value)
+#role_member_attribute=roleOccupant
+
+# List of attributes stripped off the role on update. (list
+# value)
+#role_attribute_ignore=
+
+# Allow role creation in LDAP backend. (boolean value)
+#role_allow_create=true
+
+# Allow role update in LDAP backend. (boolean value)
+#role_allow_update=true
+
+# Allow role deletion in LDAP backend. (boolean value)
+#role_allow_delete=true
+
+# Additional attribute mappings for roles. Attribute mapping
+# format is <ldap_attr>:<user_attr>, where ldap_attr is the
+# attribute in the LDAP entry and user_attr is the Identity
+# API attribute. (list value)
+#role_additional_attribute_mapping=
+
+# Search base for groups. (string value)
+#group_tree_dn=<None>
+
+# LDAP search filter for groups. (string value)
+#group_filter=<None>
+
+# LDAP objectClass for groups. (string value)
+#group_objectclass=groupOfNames
+
+# LDAP attribute mapped to group id. (string value)
+#group_id_attribute=cn
+
+# LDAP attribute mapped to group name. (string value)
+#group_name_attribute=ou
+
+# LDAP attribute mapped to show group membership. (string
+# value)
+#group_member_attribute=member
+
+# LDAP attribute mapped to group description. (string value)
+#group_desc_attribute=description
+
+# List of attributes stripped off the group on update. (list
+# value)
+#group_attribute_ignore=
+
+# Allow group creation in LDAP backend. (boolean value)
+#group_allow_create=true
+
+# Allow group update in LDAP backend. (boolean value)
+#group_allow_update=true
+
+# Allow group deletion in LDAP backend. (boolean value)
+#group_allow_delete=true
+
+# Additional attribute mappings for groups. Attribute mapping
+# format is <ldap_attr>:<user_attr>, where ldap_attr is the
+# attribute in the LDAP entry and user_attr is the Identity
+# API attribute. (list value)
+#group_additional_attribute_mapping=
+
+# CA certificate file path for communicating with LDAP
+# servers. (string value)
+#tls_cacertfile=<None>
+
+# CA certificate directory path for communicating with LDAP
+# servers. (string value)
+#tls_cacertdir=<None>
+
+# Enable TLS for communicating with LDAP servers. (boolean
+# value)
+#use_tls=false
+
+# valid options for tls_req_cert are demand, never, and allow.
+# (string value)
+#tls_req_cert=demand
+
+
+[matchmaker_ring]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Matchmaker ring file (JSON). (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+
+[memcache]
+
+#
+# Options defined in keystone
+#
+
+# Memcache servers in the format of "host:port" (list value)
+#servers=localhost:11211
+
+# Number of compare-and-set attempts to make when using
+# compare-and-set in the token memcache back end. (integer
+# value)
+#max_compare_and_set_retry=16
+
+
+[oauth1]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Credential backend driver. (string value)
+#driver=keystone.contrib.oauth1.backends.sql.OAuth1
+
+# Duration (in seconds) for the OAuth Request Token. (integer
+# value)
+#request_token_duration=28800
+
+# Duration (in seconds) for the OAuth Access Token. (integer
+# value)
+#access_token_duration=86400
+
+
+[os_inherit]
+
+#
+# Options defined in keystone
+#
+
+# role-assignment inheritance to projects from owning domain
+# can be optionally enabled. (boolean value)
+#enabled=false
+
+
+[paste_deploy]
+
+#
+# Options defined in keystone
+#
+
+# Name of the paste configuration file that defines the
+# available pipelines. (string value)
+#config_file=keystone-paste.ini
+
+
+[policy]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Policy backend driver. (string value)
+#driver=keystone.policy.backends.sql.Policy
+
+# Maximum number of entities that will be returned in a policy
+# collection. (integer value)
+#list_limit=<None>
+
+
+[revoke]
+
+#
+# Options defined in keystone
+#
+
+# An implementation of the backend for persisting revocation
+# events. (string value)
+#driver=keystone.contrib.revoke.backends.kvs.Revoke
+
+# This value (calculated in seconds) is added to token
+# expiration before a revocation event may be removed from the
+# backend. (integer value)
+#expiration_buffer=1800
+
+# Toggle for revocation event cacheing. This has no effect
+# unless global caching is enabled. (boolean value)
+#caching=true
+
+
+[signing]
+
+#
+# Options defined in keystone
+#
+
+# Deprecated in favor of provider in the [token] section.
+# (string value)
+#token_format=<None>
+
+# Path of the certfile for token signing. (string value)
+#certfile=/etc/keystone/ssl/certs/signing_cert.pem
+
+# Path of the keyfile for token signing. (string value)
+#keyfile=/etc/keystone/ssl/private/signing_key.pem
+
+# Path of the CA for token signing. (string value)
+#ca_certs=/etc/keystone/ssl/certs/ca.pem
+
+# Path of the CA Key for token signing. (string value)
+#ca_key=/etc/keystone/ssl/private/cakey.pem
+
+# Key Size (in bits) for token signing cert (auto generated
+# certificate). (integer value)
+#key_size=2048
+
+# Day the token signing cert is valid for (auto generated
+# certificate). (integer value)
+#valid_days=3650
+
+# Certificate Subject (auto generated certificate) for token
+# signing. (string value)
+#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com
+
+
+[ssl]
+
+#
+# Options defined in keystone
+#
+
+# Toggle for SSL support on the keystone eventlet servers.
+# (boolean value)
+#enable=false
+
+# Path of the certfile for SSL. (string value)
+#certfile=/etc/keystone/ssl/certs/keystone.pem
+
+# Path of the keyfile for SSL. (string value)
+#keyfile=/etc/keystone/ssl/private/keystonekey.pem
+
+# Path of the ca cert file for SSL. (string value)
+#ca_certs=/etc/keystone/ssl/certs/ca.pem
+
+# Path of the CA key file for SSL. (string value)
+#ca_key=/etc/keystone/ssl/private/cakey.pem
+
+# Require client certificate. (boolean value)
+#cert_required=false
+
+# SSL Key Length (in bits) (auto generated certificate).
+# (integer value)
+#key_size=1024
+
+# Days the certificate is valid for once signed (auto
+# generated certificate). (integer value)
+#valid_days=3650
+
+# SSL Certificate Subject (auto generated certificate).
+# (string value)
+#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost
+
+
+[stats]
+
+#
+# Options defined in keystone
+#
+
+# Keystone stats backend driver. (string value)
+#driver=keystone.contrib.stats.backends.kvs.Stats
+
+
+[token]
+
+#
+# Options defined in keystone
+#
+
+# External auth mechanisms that should add bind information to
+# token e.g. kerberos, x509. (list value)
+#bind=
+
+# Enforcement policy on tokens presented to keystone with bind
+# information. One of disabled, permissive, strict, required
+# or a specifically required bind mode e.g. kerberos or x509
+# to require binding to that authentication. (string value)
+#enforce_token_bind=permissive
+
+# Amount of time a token should remain valid (in seconds).
+# (integer value)
+#expiration=3600
+
+# Controls the token construction, validation, and revocation
+# operations. Core providers are
+# "keystone.token.providers.[pki|uuid].Provider". (string
+# value)
+provider=keystone.token.providers.uuid.Provider
+
+# Keystone Token persistence backend driver. (string value)
+driver=keystone.token.persistence.backends.sql.Token
+
+# Toggle for token system cacheing. This has no effect unless
+# global caching is enabled. (boolean value)
+#caching=true
+
+# Time to cache the revocation list and the revocation events
+# if revoke extension is enabled (in seconds). This has no
+# effect unless global and token caching are enabled. (integer
+# value)
+revocation_cache_time=3600
+
+# Time to cache tokens (in seconds). This has no effect unless
+# global and token caching are enabled. (integer value)
+#cache_time=<None>
+
+# Revoke token by token identifier. Setting revoke_by_id to
+# True enables various forms of enumerating tokens, e.g. `list
+# tokens for user`. These enumerations are processed to
+# determine the list of tokens to revoke. Only disable if
+# you are switching to using the Revoke extension with a
+# backend other than KVS, which stores events in memory.
+# (boolean value)
+#revoke_by_id=true
+
+
+[trust]
+
+#
+# Options defined in keystone
+#
+
+# delegation and impersonation features can be optionally
+# disabled. (boolean value)
+#enabled=true
+
+# Keystone Trust backend driver. (string value)
+#driver=keystone.trust.backends.sql.Trust
+
+
+[extra_headers]
+Distribution = Ubuntu
+
diff --git a/compass/deploy/ansible/roles/keystone/templates/keystone_init b/compass/deploy/ansible/roles/keystone/templates/keystone_init
new file mode 100644
index 0000000..729669b
--- /dev/null
+++ b/compass/deploy/ansible/roles/keystone/templates/keystone_init
@@ -0,0 +1,43 @@
+# create an administrative user
+
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=admin --pass={{ ADMIN_PASS }} --email=admin@admin.com
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 role-create --name=admin
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=admin --pass={{ ADMIN_PASS }} --email=admin@admin.com
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-create --name=admin --description="Admin Tenant"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=admin --tenant=admin --role=admin
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=admin --role=_member_ --tenant=admin
+
+# create a normal user
+
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=demo --pass={{ DEMO_PASS }} --email=DEMO_EMAIL
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-create --name=demo --description="Demo Tenant"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=demo --role=_member_ --tenant=demo
+
+# create a service tenant
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-create --name=service --description="Service Tenant"
+
+# regist keystone
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=keystone --type=identity --description="OpenStack Identity"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service_id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ identity / {print $2}') --publicurl=http://{{ HA_VIP }}:5000/v2.0 --internalurl=http://{{ HA_VIP }}:5000/v2.0 --adminurl=http://{{ HA_VIP }}:35357/v2.0
+
+# Create a glance user that the Image Service can use to authenticate with the Identity service
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=glance --pass={{ GLANCE_PASS }} --email=glance@example.com
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=glance --tenant=service --role=admin
+
+#Register the Image Service with the Identity service so that other OpenStack services can locate it
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=glance --type=image --description="OpenStack Image Service"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ image / {print $2}') --publicurl=http://{{ HA_VIP }}:9292 --internalurl=http://{{ HA_VIP }}:9292 --adminurl=http://{{ HA_VIP }}:9292
+
+#Create a nova user that Compute uses to authenticate with the Identity Service
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=nova --pass={{ NOVA_PASS }} --email=nova@example.com
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=nova --tenant=service --role=admin
+
+# register Compute with the Identity Service so that other OpenStack services can locate it
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=nova --type=compute --description="OpenStack Compute"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ compute / {print $2}') --publicurl=http://{{ HA_VIP }}:8774/v2/%\(tenant_id\)s --internalurl=http://{{ HA_VIP }}:8774/v2/%\(tenant_id\)s --adminurl=http://{{ HA_VIP }}:8774/v2/%\(tenant_id\)s
+
+# register netron user, role and service
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name neutron --pass {{ NEUTRON_PASS }} --email neutron@example.com
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user neutron --tenant service --role admin
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name neutron --type network --description "OpenStack Networking"
+keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id $(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ network / {print $2}') --publicurl http://{{ HA_VIP }}:9696 --adminurl http://{{ HA_VIP }}:9696 --internalurl http://{{ HA_VIP }}:9696
diff --git a/compass/deploy/ansible/roles/monitor/files/check_service.sh b/compass/deploy/ansible/roles/monitor/files/check_service.sh
new file mode 100644
index 0000000..d309673
--- /dev/null
+++ b/compass/deploy/ansible/roles/monitor/files/check_service.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+services=`cat /opt/service | uniq`
+for service in $services; do
+ if [ `/sbin/initctl list|awk '/stop\/waiting/{print $1}'|uniq | grep $service` ]; then
+ /sbin/start $service
+ fi
+done
diff --git a/compass/deploy/ansible/roles/monitor/files/root b/compass/deploy/ansible/roles/monitor/files/root
new file mode 100644
index 0000000..9c55c4f
--- /dev/null
+++ b/compass/deploy/ansible/roles/monitor/files/root
@@ -0,0 +1 @@
+* * * * * /usr/local/bin/check_service.sh >> /var/log/check_service.log 2>&1
diff --git a/compass/deploy/ansible/roles/monitor/tasks/main.yml b/compass/deploy/ansible/roles/monitor/tasks/main.yml
new file mode 100644
index 0000000..e5b93f3
--- /dev/null
+++ b/compass/deploy/ansible/roles/monitor/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+- name: copy service check file
+ copy: src=check_service.sh dest=/usr/local/bin/check_service.sh mode=0777
+
+- name: copy cron file
+ copy: src=root dest=/var/spool/cron/crontabs/root mode=0600
+
+- name: restart cron
+ service: name=cron state=restarted
+
+
diff --git a/compass/deploy/ansible/roles/mq/tasks/main.yml b/compass/deploy/ansible/roles/mq/tasks/main.yml
new file mode 100644
index 0000000..4ae4065
--- /dev/null
+++ b/compass/deploy/ansible/roles/mq/tasks/main.yml
@@ -0,0 +1,5 @@
+---
+- include: rabbitmq.yml
+
+#- include: rabbitmq_cluster.yml
+# when: HA_CLUSTER is defined
diff --git a/compass/deploy/ansible/roles/mq/tasks/rabbitmq.yml b/compass/deploy/ansible/roles/mq/tasks/rabbitmq.yml
new file mode 100644
index 0000000..5714406
--- /dev/null
+++ b/compass/deploy/ansible/roles/mq/tasks/rabbitmq.yml
@@ -0,0 +1,45 @@
+---
+- name: create rabbitmq directory
+ file: path=/etc/rabbitmq state=directory mode=0755
+
+- name: copy rabbitmq config file
+ template: src=rabbitmq-env.conf dest=/etc/rabbitmq/rabbitmq-env.conf mode=0755
+
+- name: install rabbitmq-server
+ apt: name=rabbitmq-server state=present
+
+- name: stop rabbitmq-server
+ service: name=rabbitmq-server
+ state=stopped
+
+- name: update .erlang.cookie
+ template: src=.erlang.cookie dest=/var/lib/rabbitmq/.erlang.cookie
+ group=rabbitmq
+ owner=rabbitmq
+ mode=0400
+ when: ERLANG_TOKEN is defined
+
+- name: start and enable rabbitmq-server
+ service: name=rabbitmq-server
+ state=started
+ enabled=yes
+
+- name: generate mq service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - rabbitmq-server
+
+- name: modify rabbitmq password
+ command: rabbitmqctl change_password guest {{ RABBIT_PASS }}
+ when: "RABBIT_USER is defined and RABBIT_USER == 'guest'"
+ ignore_errors: True
+
+- name: add rabbitmq user
+ command: rabbitmqctl add_user {{ RABBIT_USER }} {{ RABBIT_PASS }}
+ when: "RABBIT_USER is defined and RABBIT_USER != 'guest'"
+ ignore_errors: True
+
+- name: set rabbitmq user permission
+ command: rabbitmqctl set_permissions -p / {{ RABBIT_USER }} ".*" ".*" ".*"
+ when: "RABBIT_USER is defined and RABBIT_USER != 'guest'"
+
diff --git a/compass/deploy/ansible/roles/mq/tasks/rabbitmq_cluster.yml b/compass/deploy/ansible/roles/mq/tasks/rabbitmq_cluster.yml
new file mode 100644
index 0000000..afd4c77
--- /dev/null
+++ b/compass/deploy/ansible/roles/mq/tasks/rabbitmq_cluster.yml
@@ -0,0 +1,27 @@
+---
+- name: stop rabbitmq app
+ command: rabbitmqctl stop_app
+ when: HA_CLUSTER[inventory_hostname] != ''
+
+- name: rabbitmqctl reset
+ command: rabbitmqctl reset
+ when: HA_CLUSTER[inventory_hostname] != ''
+
+- name: stop rabbitmq
+ shell: rabbitmqctl stop
+
+- name: set detach
+ shell: rabbitmq-server -detached
+
+- name: join cluster
+ command: rabbitmqctl join_cluster rabbit@{{ item }}
+ when: item != inventory_hostname and HA_CLUSTER[item] == ''
+ with_items:
+ groups['controller']
+
+- name: start rabbitmq app
+ command: rabbitmqctl start_app
+
+- name: set the HA policy
+ rabbitmq_policy: name=ha-all pattern='^(?!amq\.).*' tags="ha-mode=all"
+
diff --git a/compass/deploy/ansible/roles/mq/templates/.erlang.cookie b/compass/deploy/ansible/roles/mq/templates/.erlang.cookie
new file mode 100644
index 0000000..cadcfaf
--- /dev/null
+++ b/compass/deploy/ansible/roles/mq/templates/.erlang.cookie
@@ -0,0 +1 @@
+{{ ERLANG_TOKEN }}
diff --git a/compass/deploy/ansible/roles/mq/templates/rabbitmq-env.conf b/compass/deploy/ansible/roles/mq/templates/rabbitmq-env.conf
new file mode 100644
index 0000000..6dd7349
--- /dev/null
+++ b/compass/deploy/ansible/roles/mq/templates/rabbitmq-env.conf
@@ -0,0 +1 @@
+RABBITMQ_NODE_IP_ADDRESS={{ HA_VIP }}
diff --git a/compass/deploy/ansible/roles/neutron-common/handlers/main.yml b/compass/deploy/ansible/roles/neutron-common/handlers/main.yml
new file mode 100644
index 0000000..36d779d
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-common/handlers/main.yml
@@ -0,0 +1,13 @@
+---
+- name: restart neutron-plugin-openvswitch-agent
+ service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes
+ when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: restart neutron-l3-agent
+ service: name=neutron-l3-agent state=restarted enabled=yes
+
+- name: restart neutron-dhcp-agent
+ service: name=neutron-dhcp-agent state=restarted enabled=yes
+
+- name: restart neutron-metadata-agent
+ service: name=neutron-metadata-agent state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/neutron-compute/defaults/main.yml b/compass/deploy/ansible/roles/neutron-compute/defaults/main.yml
new file mode 100644
index 0000000..825178b
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+neutron_ovs_bridge_mappings: ""
diff --git a/compass/deploy/ansible/roles/neutron-compute/handlers/main.yml b/compass/deploy/ansible/roles/neutron-compute/handlers/main.yml
new file mode 100644
index 0000000..36d779d
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/handlers/main.yml
@@ -0,0 +1,13 @@
+---
+- name: restart neutron-plugin-openvswitch-agent
+ service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes
+ when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: restart neutron-l3-agent
+ service: name=neutron-l3-agent state=restarted enabled=yes
+
+- name: restart neutron-dhcp-agent
+ service: name=neutron-dhcp-agent state=restarted enabled=yes
+
+- name: restart neutron-metadata-agent
+ service: name=neutron-metadata-agent state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/neutron-compute/tasks/main.yml b/compass/deploy/ansible/roles/neutron-compute/tasks/main.yml
new file mode 100644
index 0000000..93ee46f
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/tasks/main.yml
@@ -0,0 +1,55 @@
+---
+
+- name: activate ipv4 forwarding
+ sysctl: name=net.ipv4.ip_forward value=1
+ state=present reload=yes
+
+- name: deactivate ipv4 rp filter
+ sysctl: name=net.ipv4.conf.all.rp_filter value=0
+ state=present reload=yes
+
+- name: deactivate ipv4 default rp filter
+ sysctl: name=net.ipv4.conf.default.rp_filter
+ value=0 state=present reload=yes
+
+- name: install compute-related neutron packages
+ apt: name={{ item }} state=present force=yes
+ with_items:
+ - neutron-common
+ - neutron-plugin-ml2
+ - openvswitch-datapath-dkms
+ - openvswitch-switch
+
+- name: generate neutron computer service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - neutron-plugin-openvswitch-agent
+
+- name: install neutron openvswitch agent
+ apt: name=neutron-plugin-openvswitch-agent
+ state=present force=yes
+ when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: config neutron
+ template: src=neutron-network.conf
+ dest=/etc/neutron/neutron.conf backup=yes
+ notify:
+ - restart neutron-plugin-openvswitch-agent
+
+- name: config ml2 plugin
+ template: src=ml2_conf.ini
+ dest=/etc/neutron/plugins/ml2/ml2_conf.ini
+ backup=yes
+ notify:
+ - restart neutron-plugin-openvswitch-agent
+
+- name: add br-int
+ openvswitch_bridge: bridge=br-int state=present
+ notify:
+ - restart neutron-plugin-openvswitch-agent
+ - restart nova-compute
+
+- include: ../../neutron-network/tasks/odl.yml
+ when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- meta: flush_handlers
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/dhcp_agent.ini b/compass/deploy/ansible/roles/neutron-compute/templates/dhcp_agent.ini
new file mode 100644
index 0000000..19eb62e
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/templates/dhcp_agent.ini
@@ -0,0 +1,90 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# The DHCP agent will resync its state with Neutron to recover from any
+# transient notification or rpc errors. The interval is number of
+# seconds between attempts.
+resync_interval = 5
+
+# The DHCP agent requires an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
+# BigSwitch/Floodlight)
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Name of Open vSwitch bridge to use
+# ovs_integration_bridge = br-int
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
+# no additional setup of the DHCP server.
+dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# The DHCP server can assist with providing metadata support on isolated
+# networks. Setting this value to True will cause the DHCP server to append
+# specific host routes to the DHCP request. The metadata service will only
+# be activated when the subnet does not contain any router port. The guest
+# instance must be configured to request host routes via DHCP (Option 121).
+enable_isolated_metadata = False
+
+# Allows for serving metadata requests coming from a dedicated metadata
+# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
+# is connected to a Neutron router from which the VMs send metadata
+# request. In this case DHCP Option 121 will not be injected in VMs, as
+# they will be able to reach 169.254.169.254 through a router.
+# This option requires enable_isolated_metadata = True
+enable_metadata_network = False
+
+# Number of threads to use during sync process. Should not exceed connection
+# pool size configured on server.
+# num_sync_threads = 4
+
+# Location to store DHCP server config files
+# dhcp_confs = $state_path/dhcp
+
+# Domain to use for building the hostnames
+dhcp_domain = openstacklocal
+
+# Override the default dnsmasq settings with this file
+# dnsmasq_config_file =
+dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
+
+# Comma-separated list of DNS servers which will be used by dnsmasq
+# as forwarders.
+# dnsmasq_dns_servers =
+
+# Limit number of leases to prevent a denial-of-service.
+dnsmasq_lease_max = 16777216
+
+# Location to DHCP lease relay UNIX domain socket
+# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# dhcp_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the dhcp agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a dhcp server is disabled.
+# dhcp_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf
new file mode 100644
index 0000000..7bcbd9d
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf
@@ -0,0 +1,2 @@
+dhcp-option-force=26,1454
+
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/etc/xorp/config.boot b/compass/deploy/ansible/roles/neutron-compute/templates/etc/xorp/config.boot
new file mode 100644
index 0000000..32caf96
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/templates/etc/xorp/config.boot
@@ -0,0 +1,25 @@
+interfaces {
+ restore-original-config-on-shutdown: false
+ interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ description: "Internal pNodes interface"
+ disable: false
+ default-system-config
+ }
+}
+
+protocols {
+ igmp {
+ disable: false
+ interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ disable: false
+ version: 3
+ }
+ }
+ traceoptions {
+ flag all {
+ disable: false
+ }
+ }
+ }
+}
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/l3_agent.ini b/compass/deploy/ansible/roles/neutron-compute/templates/l3_agent.ini
new file mode 100644
index 0000000..b394c00
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/templates/l3_agent.ini
@@ -0,0 +1,81 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# L3 requires that an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
+# that supports L3 agent
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# If use_namespaces is set as False then the agent can only configure one router.
+
+# This is done by setting the specific router_id.
+# router_id =
+
+# When external_network_bridge is set, each L3 agent can be associated
+# with no more than one external network. This value should be set to the UUID
+# of that external network. To allow L3 agent support multiple external
+# networks, both the external_network_bridge and gateway_external_network_id
+# must be left empty.
+# gateway_external_network_id =
+
+# Indicates that this L3 agent should also handle routers that do not have
+# an external network gateway configured. This option should be True only
+# for a single agent in a Neutron deployment, and may be False for all agents
+# if all routers must have an external network gateway
+handle_internal_only_routers = True
+
+# Name of bridge used for external network traffic. This should be set to
+# empty value for the linux bridge. when this parameter is set, each L3 agent
+# can be associated with no more than one external network.
+external_network_bridge = br-ex
+
+# TCP Port used by Neutron metadata server
+metadata_port = 9697
+
+# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
+# to disable this feature.
+send_arp_for_ha = 3
+
+# seconds between re-sync routers' data if needed
+periodic_interval = 40
+
+# seconds to start to sync routers' data after
+# starting agent
+periodic_fuzzy_delay = 5
+
+# enable_metadata_proxy, which is true by default, can be set to False
+# if the Nova metadata server is not available
+# enable_metadata_proxy = True
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# router_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the L3 agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a router is destroyed.
+# router_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/metadata_agent.ini b/compass/deploy/ansible/roles/neutron-compute/templates/metadata_agent.ini
new file mode 100644
index 0000000..6badf28
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/templates/metadata_agent.ini
@@ -0,0 +1,46 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+debug = True
+
+# The Neutron user information for accessing the Neutron API.
+auth_url = http://{{ HA_VIP }}:5000/v2.0
+auth_region = RegionOne
+# Turn off verification of the certificate for ssl
+# auth_insecure = False
+# Certificate Authority public key (CA cert) file for ssl
+# auth_ca_cert =
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+
+# Network service endpoint type to pull from the keystone catalog
+# endpoint_type = adminURL
+
+# IP address used by Nova metadata server
+nova_metadata_ip = {{ HA_VIP }}
+
+# TCP Port used by Nova metadata server
+nova_metadata_port = 8775
+
+# When proxying metadata requests, Neutron signs the Instance-ID header with a
+# shared secret to prevent spoofing. You may select any string for a secret,
+# but it must match here and in the configuration used by the Nova Metadata
+# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
+metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# Number of separate worker processes for metadata server
+# metadata_workers = 0
+
+# Number of backlog requests to configure the metadata server socket with
+# metadata_backlog = 128
+
+# URL to connect to the cache backend.
+# Example of URL using memory caching backend
+# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
+# default_ttl=0 parameter will cause cache entries to never expire.
+# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
+# No cache is used in case no value is passed.
+# cache_url =
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/ml2_conf.ini b/compass/deploy/ansible/roles/neutron-compute/templates/ml2_conf.ini
new file mode 100644
index 0000000..a790069
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/templates/ml2_conf.ini
@@ -0,0 +1,108 @@
+[ml2]
+# (ListOpt) List of network type driver entrypoints to be loaded from
+# the neutron.ml2.type_drivers namespace.
+#
+# type_drivers = local,flat,vlan,gre,vxlan
+# Example: type_drivers = flat,vlan,gre,vxlan
+type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
+
+# (ListOpt) Ordered list of network_types to allocate as tenant
+# networks. The default value 'local' is useful for single-box testing
+# but provides no connectivity between hosts.
+#
+# tenant_network_types = local
+# Example: tenant_network_types = vlan,gre,vxlan
+tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
+
+# (ListOpt) Ordered list of networking mechanism driver entrypoints
+# to be loaded from the neutron.ml2.mechanism_drivers namespace.
+# mechanism_drivers =
+# Example: mechanism_drivers = openvswitch,mlnx
+# Example: mechanism_drivers = arista
+# Example: mechanism_drivers = cisco,logger
+# Example: mechanism_drivers = openvswitch,brocade
+# Example: mechanism_drivers = linuxbridge,brocade
+mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
+
+[ml2_type_flat]
+# (ListOpt) List of physical_network names with which flat networks
+# can be created. Use * to allow flat networks with arbitrary
+# physical_network names.
+#
+flat_networks = external
+# Example:flat_networks = physnet1,physnet2
+# Example:flat_networks = *
+
+[ml2_type_vlan]
+# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
+# specifying physical_network names usable for VLAN provider and
+# tenant networks, as well as ranges of VLAN tags on each
+# physical_network available for allocation as tenant networks.
+#
+network_vlan_ranges =
+# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
+
+[ml2_type_gre]
+# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
+tunnel_id_ranges = 1:1000
+
+[ml2_type_vxlan]
+# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
+# ranges of VXLAN VNI IDs that are available for tenant network allocation.
+#
+vni_ranges = 1001:4095
+
+# (StrOpt) Multicast group for the VXLAN interface. When configured, will
+# enable sending all broadcast traffic to this multicast group. When left
+# unconfigured, will disable multicast VXLAN mode.
+#
+vxlan_group = 239.1.1.1
+# Example: vxlan_group = 239.1.1.1
+
+[securitygroup]
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
+firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+enable_security_group = True
+
+[database]
+connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
+
+[ovs]
+local_ip = {{ internal_ip }}
+{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
+integration_bridge = br-int
+tunnel_bridge = br-tun
+tunnel_id_ranges = 1001:4095
+tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
+{% endif %}
+
+[agent]
+root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
+tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
+vxlan_udp_port = 4789
+{% endif %}
+l2_population = False
+
+[odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+network_vlan_ranges = 1001:4095
+tunnel_id_ranges = 1001:4095
+tun_peer_patch_port = patch-int
+int_peer_patch_port = patch-tun
+tenant_network_type = vxlan
+tunnel_bridge = br-tun
+integration_bridge = br-int
+controllers = 10.1.0.15:8080:admin:admin
+{% endif %}
+
+[ml2_odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+username = {{ odl_username }}
+password = {{ odl_password }}
+url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
+{% endif %}
+
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/neutron-network.conf b/compass/deploy/ansible/roles/neutron-compute/templates/neutron-network.conf
new file mode 100644
index 0000000..93be9cb
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/templates/neutron-network.conf
@@ -0,0 +1,465 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ DEBUG }}
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/neutron.conf b/compass/deploy/ansible/roles/neutron-compute/templates/neutron.conf
new file mode 100644
index 0000000..1575367
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/templates/neutron.conf
@@ -0,0 +1,466 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ VERBOSE }}
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/neutron_init.sh b/compass/deploy/ansible/roles/neutron-compute/templates/neutron_init.sh
new file mode 100644
index 0000000..b92e202
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/templates/neutron_init.sh
@@ -0,0 +1,4 @@
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
+
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}
+
diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/nova.conf b/compass/deploy/ansible/roles/neutron-compute/templates/nova.conf
new file mode 100644
index 0000000..4988cb0
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-compute/templates/nova.conf
@@ -0,0 +1,73 @@
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose={{ VERBOSE}}
+debug={{ DEBUG }}
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=ec2,osapi_compute,metadata
+
+vif_plugging_is_fatal: false
+vif_plugging_timeout: 0
+
+auth_strategy = keystone
+
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+my_ip = {{ internal_ip }}
+vnc_enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
+
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
+
+network_api_class = nova.network.neutronv2.api.API
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = nova.openstack.common.notifier.rpc_notifier
+notification_driver = ceilometer.compute.nova_notifier
+
+[database]
+# The SQLAlchemy connection string used to connect to the database
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
+
+[conductor]
+manager = nova.conductor.manager.ConductorManager
+topic = conductor
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = nova
+admin_password = {{ NOVA_PASS }}
+
+[glance]
+host = {{ HA_VIP }}
+
+[neutron]
+url = http://{{ HA_VIP }}:9696
+auth_strategy = keystone
+admin_tenant_name = service
+admin_username = neutron
+admin_password = {{ NEUTRON_PASS }}
+admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/compass/deploy/ansible/roles/neutron-controller/handlers/main.yml b/compass/deploy/ansible/roles/neutron-controller/handlers/main.yml
new file mode 100644
index 0000000..b4c1585
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/handlers/main.yml
@@ -0,0 +1,24 @@
+---
+- name: restart nova-api
+ service: name=nova-api state=restarted enabled=yes
+
+- name: restart nova-cert
+ service: name=nova-cert state=restarted enabled=yes
+
+- name: restart nova-consoleauth
+ service: name=nova-consoleauth state=restarted enabled=yes
+
+- name: restart nova-scheduler
+ service: name=nova-scheduler state=restarted enabled=yes
+
+- name: restart nova-conductor
+ service: name=nova-conductor state=restarted enabled=yes
+
+- name: restart nova-novncproxy
+ service: name=nova-novncproxy state=restarted enabled=yes
+
+- name: remove nova-sqlite-db
+ shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.db.removed
+
+- name: restart neutron-server
+ service: name=neutron-server state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/neutron-controller/tasks/main.yml b/compass/deploy/ansible/roles/neutron-controller/tasks/main.yml
new file mode 100644
index 0000000..9c04d74
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+- include: neutron_install.yml
+ tags:
+ - install
+ - neutron_install
+ - neutron
+
+- include: neutron_config.yml
+ when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == ''
+ tags:
+ - config
+ - neutron_config
+ - neutron
diff --git a/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_config.yml b/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_config.yml
new file mode 100644
index 0000000..77cc29a
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_config.yml
@@ -0,0 +1,10 @@
+---
+- name: neutron-db-manage upgrade to Juno
+ shell: neutron-db-manage --config-file=/etc/neutron/neutron.conf --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini upgrade head
+ register: result
+ until: result.rc == 0
+ retries: 5
+ delay: 3
+ notify:
+ - restart neutron-server
+
diff --git a/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_install.yml b/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_install.yml
new file mode 100644
index 0000000..6165299
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_install.yml
@@ -0,0 +1,29 @@
+---
+- name: install controller-related neutron packages
+ apt: name={{ item }} state=present force=yes
+ with_items:
+ - neutron-server
+ - neutron-plugin-ml2
+
+- name: generate neutron controll service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - neutron-server
+ - neutron-plugin-ml2
+
+- name: get tenant id to fill neutron.conf
+ shell: keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-get service | grep id | awk '{print $4}'
+ register: NOVA_ADMIN_TENANT_ID
+
+- name: update neutron conf
+ template: src=neutron.conf dest=/etc/neutron/neutron.conf backup=yes
+ notify:
+ - restart neutron-server
+
+- name: update ml2 plugin conf
+ template: src=ml2_conf.ini dest=/etc/neutron/plugins/ml2/ml2_conf.ini backup=yes
+ notify:
+ - restart neutron-server
+
+- meta: flush_handlers
+
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/dhcp_agent.ini b/compass/deploy/ansible/roles/neutron-controller/templates/dhcp_agent.ini
new file mode 100644
index 0000000..19eb62e
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/templates/dhcp_agent.ini
@@ -0,0 +1,90 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# The DHCP agent will resync its state with Neutron to recover from any
+# transient notification or rpc errors. The interval is number of
+# seconds between attempts.
+resync_interval = 5
+
+# The DHCP agent requires an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
+# BigSwitch/Floodlight)
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Name of Open vSwitch bridge to use
+# ovs_integration_bridge = br-int
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
+# no additional setup of the DHCP server.
+dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# The DHCP server can assist with providing metadata support on isolated
+# networks. Setting this value to True will cause the DHCP server to append
+# specific host routes to the DHCP request. The metadata service will only
+# be activated when the subnet does not contain any router port. The guest
+# instance must be configured to request host routes via DHCP (Option 121).
+enable_isolated_metadata = False
+
+# Allows for serving metadata requests coming from a dedicated metadata
+# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
+# is connected to a Neutron router from which the VMs send metadata
+# request. In this case DHCP Option 121 will not be injected in VMs, as
+# they will be able to reach 169.254.169.254 through a router.
+# This option requires enable_isolated_metadata = True
+enable_metadata_network = False
+
+# Number of threads to use during sync process. Should not exceed connection
+# pool size configured on server.
+# num_sync_threads = 4
+
+# Location to store DHCP server config files
+# dhcp_confs = $state_path/dhcp
+
+# Domain to use for building the hostnames
+dhcp_domain = openstacklocal
+
+# Override the default dnsmasq settings with this file
+# dnsmasq_config_file =
+dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
+
+# Comma-separated list of DNS servers which will be used by dnsmasq
+# as forwarders.
+# dnsmasq_dns_servers =
+
+# Limit number of leases to prevent a denial-of-service.
+dnsmasq_lease_max = 16777216
+
+# Location to DHCP lease relay UNIX domain socket
+# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# dhcp_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the dhcp agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a dhcp server is disabled.
+# dhcp_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf
new file mode 100644
index 0000000..7bcbd9d
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf
@@ -0,0 +1,2 @@
+dhcp-option-force=26,1454
+
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/etc/xorp/config.boot b/compass/deploy/ansible/roles/neutron-controller/templates/etc/xorp/config.boot
new file mode 100644
index 0000000..32caf96
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/templates/etc/xorp/config.boot
@@ -0,0 +1,25 @@
+interfaces {
+ restore-original-config-on-shutdown: false
+ interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ description: "Internal pNodes interface"
+ disable: false
+ default-system-config
+ }
+}
+
+protocols {
+ igmp {
+ disable: false
+ interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ disable: false
+ version: 3
+ }
+ }
+ traceoptions {
+ flag all {
+ disable: false
+ }
+ }
+ }
+}
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/l3_agent.ini b/compass/deploy/ansible/roles/neutron-controller/templates/l3_agent.ini
new file mode 100644
index 0000000..b394c00
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/templates/l3_agent.ini
@@ -0,0 +1,81 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# L3 requires that an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
+# that supports L3 agent
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# If use_namespaces is set as False then the agent can only configure one router.
+
+# This is done by setting the specific router_id.
+# router_id =
+
+# When external_network_bridge is set, each L3 agent can be associated
+# with no more than one external network. This value should be set to the UUID
+# of that external network. To allow L3 agent support multiple external
+# networks, both the external_network_bridge and gateway_external_network_id
+# must be left empty.
+# gateway_external_network_id =
+
+# Indicates that this L3 agent should also handle routers that do not have
+# an external network gateway configured. This option should be True only
+# for a single agent in a Neutron deployment, and may be False for all agents
+# if all routers must have an external network gateway
+handle_internal_only_routers = True
+
+# Name of bridge used for external network traffic. This should be set to
+# empty value for the linux bridge. when this parameter is set, each L3 agent
+# can be associated with no more than one external network.
+external_network_bridge = br-ex
+
+# TCP Port used by Neutron metadata server
+metadata_port = 9697
+
+# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
+# to disable this feature.
+send_arp_for_ha = 3
+
+# seconds between re-sync routers' data if needed
+periodic_interval = 40
+
+# seconds to start to sync routers' data after
+# starting agent
+periodic_fuzzy_delay = 5
+
+# enable_metadata_proxy, which is true by default, can be set to False
+# if the Nova metadata server is not available
+# enable_metadata_proxy = True
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# router_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the L3 agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a router is destroyed.
+# router_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/metadata_agent.ini b/compass/deploy/ansible/roles/neutron-controller/templates/metadata_agent.ini
new file mode 100644
index 0000000..6badf28
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/templates/metadata_agent.ini
@@ -0,0 +1,46 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+debug = True
+
+# The Neutron user information for accessing the Neutron API.
+auth_url = http://{{ HA_VIP }}:5000/v2.0
+auth_region = RegionOne
+# Turn off verification of the certificate for ssl
+# auth_insecure = False
+# Certificate Authority public key (CA cert) file for ssl
+# auth_ca_cert =
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+
+# Network service endpoint type to pull from the keystone catalog
+# endpoint_type = adminURL
+
+# IP address used by Nova metadata server
+nova_metadata_ip = {{ HA_VIP }}
+
+# TCP Port used by Nova metadata server
+nova_metadata_port = 8775
+
+# When proxying metadata requests, Neutron signs the Instance-ID header with a
+# shared secret to prevent spoofing. You may select any string for a secret,
+# but it must match here and in the configuration used by the Nova Metadata
+# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
+metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# Number of separate worker processes for metadata server
+# metadata_workers = 0
+
+# Number of backlog requests to configure the metadata server socket with
+# metadata_backlog = 128
+
+# URL to connect to the cache backend.
+# Example of URL using memory caching backend
+# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
+# default_ttl=0 parameter will cause cache entries to never expire.
+# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
+# No cache is used in case no value is passed.
+# cache_url =
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/ml2_conf.ini b/compass/deploy/ansible/roles/neutron-controller/templates/ml2_conf.ini
new file mode 100644
index 0000000..a790069
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/templates/ml2_conf.ini
@@ -0,0 +1,108 @@
+[ml2]
+# (ListOpt) List of network type driver entrypoints to be loaded from
+# the neutron.ml2.type_drivers namespace.
+#
+# type_drivers = local,flat,vlan,gre,vxlan
+# Example: type_drivers = flat,vlan,gre,vxlan
+type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
+
+# (ListOpt) Ordered list of network_types to allocate as tenant
+# networks. The default value 'local' is useful for single-box testing
+# but provides no connectivity between hosts.
+#
+# tenant_network_types = local
+# Example: tenant_network_types = vlan,gre,vxlan
+tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
+
+# (ListOpt) Ordered list of networking mechanism driver entrypoints
+# to be loaded from the neutron.ml2.mechanism_drivers namespace.
+# mechanism_drivers =
+# Example: mechanism_drivers = openvswitch,mlnx
+# Example: mechanism_drivers = arista
+# Example: mechanism_drivers = cisco,logger
+# Example: mechanism_drivers = openvswitch,brocade
+# Example: mechanism_drivers = linuxbridge,brocade
+mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
+
+[ml2_type_flat]
+# (ListOpt) List of physical_network names with which flat networks
+# can be created. Use * to allow flat networks with arbitrary
+# physical_network names.
+#
+flat_networks = external
+# Example:flat_networks = physnet1,physnet2
+# Example:flat_networks = *
+
+[ml2_type_vlan]
+# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
+# specifying physical_network names usable for VLAN provider and
+# tenant networks, as well as ranges of VLAN tags on each
+# physical_network available for allocation as tenant networks.
+#
+network_vlan_ranges =
+# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
+
+[ml2_type_gre]
+# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
+tunnel_id_ranges = 1:1000
+
+[ml2_type_vxlan]
+# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
+# ranges of VXLAN VNI IDs that are available for tenant network allocation.
+#
+vni_ranges = 1001:4095
+
+# (StrOpt) Multicast group for the VXLAN interface. When configured, will
+# enable sending all broadcast traffic to this multicast group. When left
+# unconfigured, will disable multicast VXLAN mode.
+#
+vxlan_group = 239.1.1.1
+# Example: vxlan_group = 239.1.1.1
+
+[securitygroup]
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
+firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+enable_security_group = True
+
+[database]
+connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
+
+[ovs]
+local_ip = {{ internal_ip }}
+{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
+integration_bridge = br-int
+tunnel_bridge = br-tun
+tunnel_id_ranges = 1001:4095
+tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
+{% endif %}
+
+[agent]
+root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
+tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
+vxlan_udp_port = 4789
+{% endif %}
+l2_population = False
+
+[odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+network_vlan_ranges = 1001:4095
+tunnel_id_ranges = 1001:4095
+tun_peer_patch_port = patch-int
+int_peer_patch_port = patch-tun
+tenant_network_type = vxlan
+tunnel_bridge = br-tun
+integration_bridge = br-int
+controllers = 10.1.0.15:8080:admin:admin
+{% endif %}
+
+[ml2_odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+username = {{ odl_username }}
+password = {{ odl_password }}
+url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
+{% endif %}
+
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/neutron-network.conf b/compass/deploy/ansible/roles/neutron-controller/templates/neutron-network.conf
new file mode 100644
index 0000000..93be9cb
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/templates/neutron-network.conf
@@ -0,0 +1,465 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ DEBUG }}
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/neutron.conf b/compass/deploy/ansible/roles/neutron-controller/templates/neutron.conf
new file mode 100644
index 0000000..2a66e94
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/templates/neutron.conf
@@ -0,0 +1,466 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ VERBOSE }}
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/neutron_init.sh b/compass/deploy/ansible/roles/neutron-controller/templates/neutron_init.sh
new file mode 100644
index 0000000..b92e202
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/templates/neutron_init.sh
@@ -0,0 +1,4 @@
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
+
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}
+
diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/nova.conf b/compass/deploy/ansible/roles/neutron-controller/templates/nova.conf
new file mode 100644
index 0000000..9587073
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-controller/templates/nova.conf
@@ -0,0 +1,69 @@
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose={{ VERBOSE}}
+debug={{ DEBUG }}
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=ec2,osapi_compute,metadata
+
+vif_plugging_is_fatal: false
+vif_plugging_timeout: 0
+
+auth_strategy = keystone
+
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+my_ip = {{ internal_ip }}
+vnc_enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
+
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
+
+network_api_class = nova.network.neutronv2.api.API
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = nova.openstack.common.notifier.rpc_notifier
+notification_driver = ceilometer.compute.nova_notifier
+
+[database]
+# The SQLAlchemy connection string used to connect to the database
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = nova
+admin_password = {{ NOVA_PASS }}
+
+[glance]
+host = {{ HA_VIP }}
+
+[neutron]
+url = http://{{ HA_VIP }}:9696
+auth_strategy = keystone
+admin_tenant_name = service
+admin_username = neutron
+admin_password = {{ NEUTRON_PASS }}
+admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/compass/deploy/ansible/roles/neutron-network/handlers/main.yml b/compass/deploy/ansible/roles/neutron-network/handlers/main.yml
new file mode 100644
index 0000000..d6c5cc8
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/handlers/main.yml
@@ -0,0 +1,21 @@
+---
+- name: restart neutron-plugin-openvswitch-agent
+ service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes
+ when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: restart neutron-l3-agent
+ service: name=neutron-l3-agent state=restarted enabled=yes
+
+- name: kill dnsmasq
+ command: killall dnsmasq
+ ignore_errors: True
+
+- name: restart neutron-dhcp-agent
+ service: name=neutron-dhcp-agent state=restarted enabled=yes
+
+- name: restart neutron-metadata-agent
+ service: name=neutron-metadata-agent state=restarted enabled=yes
+
+- name: restart xorp
+ service: name=xorp state=restarted enabled=yes sleep=10
+ ignore_errors: True
diff --git a/compass/deploy/ansible/roles/neutron-network/tasks/igmp-router.yml b/compass/deploy/ansible/roles/neutron-network/tasks/igmp-router.yml
new file mode 100644
index 0000000..d6f38a0
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/tasks/igmp-router.yml
@@ -0,0 +1,20 @@
+---
+- name: Install XORP to provide IGMP router functionality
+ apt: pkg=xorp
+
+- name: configure xorp
+ template: src=etc/xorp/config.boot dest=/etc/xorp/config.boot
+ notify:
+ - restart xorp
+
+- name: set xorp defaults
+ lineinfile: dest=/etc/default/xorp regexp=^RUN= line=RUN=yes
+ notify:
+ - restart xorp
+
+- meta: flush_handlers
+
+- name: start and enable xorp service
+ service: name=xorp state=started enabled=yes
+ retries: 2
+ delay: 10
diff --git a/compass/deploy/ansible/roles/neutron-network/tasks/main.yml b/compass/deploy/ansible/roles/neutron-network/tasks/main.yml
new file mode 100644
index 0000000..1d4b591
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/tasks/main.yml
@@ -0,0 +1,114 @@
+---
+- name: activate ipv4 forwarding
+ sysctl: name=net.ipv4.ip_forward value=1
+ state=present reload=yes
+
+- name: deactivate ipv4 rp filter
+ sysctl: name=net.ipv4.conf.all.rp_filter value=0
+ state=present reload=yes
+
+- name: deactivate ipv4 default rp filter
+ sysctl: name=net.ipv4.conf.default.rp_filter
+ value=0 state=present reload=yes
+
+- name: install neutron network related packages
+ apt: name={{ item }} state=present force=yes
+ with_items:
+ - neutron-plugin-ml2
+ - openvswitch-datapath-dkms
+ - openvswitch-switch
+ - neutron-l3-agent
+ - neutron-dhcp-agent
+
+- name: generate neutron service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - openvswitch-switch
+ - neutron-l3-agent
+ - neutron-dhcp-agent
+ - neutron-plugin-openvswitch-agent
+ - neutron-metadata-agent
+ - xorp
+
+- name: install neutron openvswitch agent
+ apt: name=neutron-plugin-openvswitch-agent
+ state=present force=yes
+ when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: config neutron
+ template: src=neutron-network.conf
+ dest=/etc/neutron/neutron.conf backup=yes
+ notify:
+ - restart neutron-plugin-openvswitch-agent
+ - restart neutron-l3-agent
+ - kill dnsmasq
+ - restart neutron-dhcp-agent
+ - restart neutron-metadata-agent
+
+- name: config l3 agent
+ template: src=l3_agent.ini dest=/etc/neutron/l3_agent.ini
+ backup=yes
+ notify:
+ - restart neutron-l3-agent
+
+- name: config dhcp agent
+ template: src=dhcp_agent.ini dest=/etc/neutron/dhcp_agent.ini
+ backup=yes
+ notify:
+ - kill dnsmasq
+ - restart neutron-dhcp-agent
+
+- name: update dnsmasq-neutron.conf
+ template: src=dnsmasq-neutron.conf
+ dest=/etc/neutron/dnsmasq-neutron.conf
+ notify:
+ - kill dnsmasq
+ - restart neutron-dhcp-agent
+
+- name: config metadata agent
+ template: src=metadata_agent.ini
+ dest=/etc/neutron/metadata_agent.ini backup=yes
+ notify:
+ - restart neutron-metadata-agent
+
+- name: config ml2 plugin
+ template: src=ml2_conf.ini
+ dest=/etc/neutron/plugins/ml2/ml2_conf.ini
+ backup=yes
+ notify:
+ - restart neutron-plugin-openvswitch-agent
+
+- meta: flush_handlers
+
+- name: add br-int
+ openvswitch_bridge: bridge=br-int state=present
+
+- name: add br-ex
+ openvswitch_bridge: bridge=br-ex state=present
+ when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: assign a port to br-ex for physical ext interface
+ openvswitch_port: bridge=br-ex port={{ INTERFACE_NAME }}
+ state=present
+ when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- include: igmp-router.yml
+ when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
+
+- name: assert kernel support for vxlan
+ command: modinfo -F version vxlan
+ when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
+
+- name: assert iproute2 suppport for vxlan
+ command: ip link add type vxlan help
+ register: iproute_out
+ failed_when: iproute_out.rc == 255
+ when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
+
+- include: odl.yml
+ when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
+
+- name: restart ovs service
+ service: name=openvswitch-switch state=restarted enabled=yes
+
+- meta: flush_handlers
diff --git a/compass/deploy/ansible/roles/neutron-network/tasks/odl.yml b/compass/deploy/ansible/roles/neutron-network/tasks/odl.yml
new file mode 100644
index 0000000..a2b449c
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/tasks/odl.yml
@@ -0,0 +1,13 @@
+---
+- name: ovs set manager
+ command: ovs-vsctl set-manager tcp:{{ controller }}:6640
+
+- name: get ovs uuid
+ shell: ovs-vsctl get Open_vSwitch . _uuid
+ register: ovs_uuid
+
+- name: set bridge_mappings
+ command: ovs-vsctl set Open_vSwitch {{ ovs_uuid.stdout }} other_config:bridge_mappings=physnet1:{{ INTERFACE_NAME }}
+
+- name: set local ip
+ command: ovs-vsctl set Open_vSwitch {{ ovs_uuid.stdout }} other_config:local_ip={{ internal_ip }}
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/dhcp_agent.ini b/compass/deploy/ansible/roles/neutron-network/templates/dhcp_agent.ini
new file mode 100644
index 0000000..19eb62e
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/templates/dhcp_agent.ini
@@ -0,0 +1,90 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# The DHCP agent will resync its state with Neutron to recover from any
+# transient notification or rpc errors. The interval is number of
+# seconds between attempts.
+resync_interval = 5
+
+# The DHCP agent requires an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
+# BigSwitch/Floodlight)
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Name of Open vSwitch bridge to use
+# ovs_integration_bridge = br-int
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
+# no additional setup of the DHCP server.
+dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# The DHCP server can assist with providing metadata support on isolated
+# networks. Setting this value to True will cause the DHCP server to append
+# specific host routes to the DHCP request. The metadata service will only
+# be activated when the subnet does not contain any router port. The guest
+# instance must be configured to request host routes via DHCP (Option 121).
+enable_isolated_metadata = False
+
+# Allows for serving metadata requests coming from a dedicated metadata
+# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
+# is connected to a Neutron router from which the VMs send metadata
+# request. In this case DHCP Option 121 will not be injected in VMs, as
+# they will be able to reach 169.254.169.254 through a router.
+# This option requires enable_isolated_metadata = True
+enable_metadata_network = False
+
+# Number of threads to use during sync process. Should not exceed connection
+# pool size configured on server.
+# num_sync_threads = 4
+
+# Location to store DHCP server config files
+# dhcp_confs = $state_path/dhcp
+
+# Domain to use for building the hostnames
+dhcp_domain = openstacklocal
+
+# Override the default dnsmasq settings with this file
+# dnsmasq_config_file =
+dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
+
+# Comma-separated list of DNS servers which will be used by dnsmasq
+# as forwarders.
+# dnsmasq_dns_servers =
+
+# Limit number of leases to prevent a denial-of-service.
+dnsmasq_lease_max = 16777216
+
+# Location to DHCP lease relay UNIX domain socket
+# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# dhcp_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the dhcp agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a dhcp server is disabled.
+# dhcp_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf
new file mode 100644
index 0000000..7bcbd9d
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf
@@ -0,0 +1,2 @@
+dhcp-option-force=26,1454
+
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/etc/xorp/config.boot b/compass/deploy/ansible/roles/neutron-network/templates/etc/xorp/config.boot
new file mode 100644
index 0000000..32caf96
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/templates/etc/xorp/config.boot
@@ -0,0 +1,25 @@
+interfaces {
+ restore-original-config-on-shutdown: false
+ interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ description: "Internal pNodes interface"
+ disable: false
+ default-system-config
+ }
+}
+
+protocols {
+ igmp {
+ disable: false
+ interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ disable: false
+ version: 3
+ }
+ }
+ traceoptions {
+ flag all {
+ disable: false
+ }
+ }
+ }
+}
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/l3_agent.ini b/compass/deploy/ansible/roles/neutron-network/templates/l3_agent.ini
new file mode 100644
index 0000000..b394c00
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/templates/l3_agent.ini
@@ -0,0 +1,81 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# L3 requires that an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
+# that supports L3 agent
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# If use_namespaces is set as False then the agent can only configure one router.
+
+# This is done by setting the specific router_id.
+# router_id =
+
+# When external_network_bridge is set, each L3 agent can be associated
+# with no more than one external network. This value should be set to the UUID
+# of that external network. To allow L3 agent support multiple external
+# networks, both the external_network_bridge and gateway_external_network_id
+# must be left empty.
+# gateway_external_network_id =
+
+# Indicates that this L3 agent should also handle routers that do not have
+# an external network gateway configured. This option should be True only
+# for a single agent in a Neutron deployment, and may be False for all agents
+# if all routers must have an external network gateway
+handle_internal_only_routers = True
+
+# Name of bridge used for external network traffic. This should be set to
+# empty value for the linux bridge. when this parameter is set, each L3 agent
+# can be associated with no more than one external network.
+external_network_bridge = br-ex
+
+# TCP Port used by Neutron metadata server
+metadata_port = 9697
+
+# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
+# to disable this feature.
+send_arp_for_ha = 3
+
+# seconds between re-sync routers' data if needed
+periodic_interval = 40
+
+# seconds to start to sync routers' data after
+# starting agent
+periodic_fuzzy_delay = 5
+
+# enable_metadata_proxy, which is true by default, can be set to False
+# if the Nova metadata server is not available
+# enable_metadata_proxy = True
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# router_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the L3 agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a router is destroyed.
+# router_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/metadata_agent.ini b/compass/deploy/ansible/roles/neutron-network/templates/metadata_agent.ini
new file mode 100644
index 0000000..6badf28
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/templates/metadata_agent.ini
@@ -0,0 +1,46 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+debug = True
+
+# The Neutron user information for accessing the Neutron API.
+auth_url = http://{{ HA_VIP }}:5000/v2.0
+auth_region = RegionOne
+# Turn off verification of the certificate for ssl
+# auth_insecure = False
+# Certificate Authority public key (CA cert) file for ssl
+# auth_ca_cert =
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+
+# Network service endpoint type to pull from the keystone catalog
+# endpoint_type = adminURL
+
+# IP address used by Nova metadata server
+nova_metadata_ip = {{ HA_VIP }}
+
+# TCP Port used by Nova metadata server
+nova_metadata_port = 8775
+
+# When proxying metadata requests, Neutron signs the Instance-ID header with a
+# shared secret to prevent spoofing. You may select any string for a secret,
+# but it must match here and in the configuration used by the Nova Metadata
+# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
+metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# Number of separate worker processes for metadata server
+# metadata_workers = 0
+
+# Number of backlog requests to configure the metadata server socket with
+# metadata_backlog = 128
+
+# URL to connect to the cache backend.
+# Example of URL using memory caching backend
+# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
+# default_ttl=0 parameter will cause cache entries to never expire.
+# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
+# No cache is used in case no value is passed.
+# cache_url =
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/ml2_conf.ini b/compass/deploy/ansible/roles/neutron-network/templates/ml2_conf.ini
new file mode 100644
index 0000000..a790069
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/templates/ml2_conf.ini
@@ -0,0 +1,108 @@
+[ml2]
+# (ListOpt) List of network type driver entrypoints to be loaded from
+# the neutron.ml2.type_drivers namespace.
+#
+# type_drivers = local,flat,vlan,gre,vxlan
+# Example: type_drivers = flat,vlan,gre,vxlan
+type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
+
+# (ListOpt) Ordered list of network_types to allocate as tenant
+# networks. The default value 'local' is useful for single-box testing
+# but provides no connectivity between hosts.
+#
+# tenant_network_types = local
+# Example: tenant_network_types = vlan,gre,vxlan
+tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
+
+# (ListOpt) Ordered list of networking mechanism driver entrypoints
+# to be loaded from the neutron.ml2.mechanism_drivers namespace.
+# mechanism_drivers =
+# Example: mechanism_drivers = openvswitch,mlnx
+# Example: mechanism_drivers = arista
+# Example: mechanism_drivers = cisco,logger
+# Example: mechanism_drivers = openvswitch,brocade
+# Example: mechanism_drivers = linuxbridge,brocade
+mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
+
+[ml2_type_flat]
+# (ListOpt) List of physical_network names with which flat networks
+# can be created. Use * to allow flat networks with arbitrary
+# physical_network names.
+#
+flat_networks = external
+# Example:flat_networks = physnet1,physnet2
+# Example:flat_networks = *
+
+[ml2_type_vlan]
+# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
+# specifying physical_network names usable for VLAN provider and
+# tenant networks, as well as ranges of VLAN tags on each
+# physical_network available for allocation as tenant networks.
+#
+network_vlan_ranges =
+# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
+
+[ml2_type_gre]
+# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
+tunnel_id_ranges = 1:1000
+
+[ml2_type_vxlan]
+# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
+# ranges of VXLAN VNI IDs that are available for tenant network allocation.
+#
+vni_ranges = 1001:4095
+
+# (StrOpt) Multicast group for the VXLAN interface. When configured, will
+# enable sending all broadcast traffic to this multicast group. When left
+# unconfigured, will disable multicast VXLAN mode.
+#
+vxlan_group = 239.1.1.1
+# Example: vxlan_group = 239.1.1.1
+
+[securitygroup]
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
+firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+enable_security_group = True
+
+[database]
+connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
+
+[ovs]
+local_ip = {{ internal_ip }}
+{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
+integration_bridge = br-int
+tunnel_bridge = br-tun
+tunnel_id_ranges = 1001:4095
+tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
+{% endif %}
+
+[agent]
+root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
+tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
+vxlan_udp_port = 4789
+{% endif %}
+l2_population = False
+
+[odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+network_vlan_ranges = 1001:4095
+tunnel_id_ranges = 1001:4095
+tun_peer_patch_port = patch-int
+int_peer_patch_port = patch-tun
+tenant_network_type = vxlan
+tunnel_bridge = br-tun
+integration_bridge = br-int
+controllers = 10.1.0.15:8080:admin:admin
+{% endif %}
+
+[ml2_odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+username = {{ odl_username }}
+password = {{ odl_password }}
+url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
+{% endif %}
+
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/neutron-network.conf b/compass/deploy/ansible/roles/neutron-network/templates/neutron-network.conf
new file mode 100644
index 0000000..93be9cb
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/templates/neutron-network.conf
@@ -0,0 +1,465 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ DEBUG }}
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/neutron.conf b/compass/deploy/ansible/roles/neutron-network/templates/neutron.conf
new file mode 100644
index 0000000..1575367
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/templates/neutron.conf
@@ -0,0 +1,466 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ VERBOSE }}
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/neutron_init.sh b/compass/deploy/ansible/roles/neutron-network/templates/neutron_init.sh
new file mode 100644
index 0000000..b92e202
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/templates/neutron_init.sh
@@ -0,0 +1,4 @@
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
+
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}
+
diff --git a/compass/deploy/ansible/roles/neutron-network/templates/nova.conf b/compass/deploy/ansible/roles/neutron-network/templates/nova.conf
new file mode 100644
index 0000000..9587073
--- /dev/null
+++ b/compass/deploy/ansible/roles/neutron-network/templates/nova.conf
@@ -0,0 +1,69 @@
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose={{ VERBOSE}}
+debug={{ DEBUG }}
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=ec2,osapi_compute,metadata
+
+vif_plugging_is_fatal: false
+vif_plugging_timeout: 0
+
+auth_strategy = keystone
+
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+my_ip = {{ internal_ip }}
+vnc_enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
+
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
+
+network_api_class = nova.network.neutronv2.api.API
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = nova.openstack.common.notifier.rpc_notifier
+notification_driver = ceilometer.compute.nova_notifier
+
+[database]
+# The SQLAlchemy connection string used to connect to the database
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = nova
+admin_password = {{ NOVA_PASS }}
+
+[glance]
+host = {{ HA_VIP }}
+
+[neutron]
+url = http://{{ HA_VIP }}:9696
+auth_strategy = keystone
+admin_tenant_name = service
+admin_username = neutron
+admin_password = {{ NEUTRON_PASS }}
+admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/compass/deploy/ansible/roles/nova-compute/handlers/main.yml b/compass/deploy/ansible/roles/nova-compute/handlers/main.yml
new file mode 100644
index 0000000..c135003
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-compute/handlers/main.yml
@@ -0,0 +1,3 @@
+---
+- name: restart nova-compute
+ service: name=nova-compute state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/nova-compute/tasks/main.yml b/compass/deploy/ansible/roles/nova-compute/tasks/main.yml
new file mode 100644
index 0000000..51c8dfa
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-compute/tasks/main.yml
@@ -0,0 +1,21 @@
+---
+- name: install nova-compute related packages
+ apt: name=nova-compute-kvm state=present force=yes
+
+- name: update nova-compute conf
+ template: src={{ item }} dest=/etc/nova/{{ item }}
+ with_items:
+ - nova.conf
+ - nova-compute.conf
+ notify:
+ - restart nova-compute
+
+- name: generate neutron controll service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - nova-compute
+
+- meta: flush_handlers
+
+- name: remove nova sqlite db
+ shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.removed
diff --git a/compass/deploy/ansible/roles/nova-compute/templates/nova-compute.conf b/compass/deploy/ansible/roles/nova-compute/templates/nova-compute.conf
new file mode 100644
index 0000000..401dee7
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-compute/templates/nova-compute.conf
@@ -0,0 +1,7 @@
+[DEFAULT]
+compute_driver=libvirt.LibvirtDriver
+force_raw_images = true
+[libvirt]
+virt_type=qemu
+images_type = raw
+mem_stats_period_seconds=0
diff --git a/compass/deploy/ansible/roles/nova-compute/templates/nova.conf b/compass/deploy/ansible/roles/nova-compute/templates/nova.conf
new file mode 100644
index 0000000..4988cb0
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-compute/templates/nova.conf
@@ -0,0 +1,73 @@
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose={{ VERBOSE}}
+debug={{ DEBUG }}
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=ec2,osapi_compute,metadata
+
+vif_plugging_is_fatal: false
+vif_plugging_timeout: 0
+
+auth_strategy = keystone
+
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+my_ip = {{ internal_ip }}
+vnc_enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
+
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
+
+network_api_class = nova.network.neutronv2.api.API
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = nova.openstack.common.notifier.rpc_notifier
+notification_driver = ceilometer.compute.nova_notifier
+
+[database]
+# The SQLAlchemy connection string used to connect to the database
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
+
+[conductor]
+manager = nova.conductor.manager.ConductorManager
+topic = conductor
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = nova
+admin_password = {{ NOVA_PASS }}
+
+[glance]
+host = {{ HA_VIP }}
+
+[neutron]
+url = http://{{ HA_VIP }}:9696
+auth_strategy = keystone
+admin_tenant_name = service
+admin_username = neutron
+admin_password = {{ NEUTRON_PASS }}
+admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/compass/deploy/ansible/roles/nova-controller/handlers/main.yml b/compass/deploy/ansible/roles/nova-controller/handlers/main.yml
new file mode 100644
index 0000000..b4c1585
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/handlers/main.yml
@@ -0,0 +1,24 @@
+---
+- name: restart nova-api
+ service: name=nova-api state=restarted enabled=yes
+
+- name: restart nova-cert
+ service: name=nova-cert state=restarted enabled=yes
+
+- name: restart nova-consoleauth
+ service: name=nova-consoleauth state=restarted enabled=yes
+
+- name: restart nova-scheduler
+ service: name=nova-scheduler state=restarted enabled=yes
+
+- name: restart nova-conductor
+ service: name=nova-conductor state=restarted enabled=yes
+
+- name: restart nova-novncproxy
+ service: name=nova-novncproxy state=restarted enabled=yes
+
+- name: remove nova-sqlite-db
+ shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.db.removed
+
+- name: restart neutron-server
+ service: name=neutron-server state=restarted enabled=yes
diff --git a/compass/deploy/ansible/roles/nova-controller/tasks/main.yml b/compass/deploy/ansible/roles/nova-controller/tasks/main.yml
new file mode 100644
index 0000000..72a9f4d
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+- include: nova_install.yml
+ tags:
+ - install
+ - nova_install
+ - nova
+
+- include: nova_config.yml
+ when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == ''
+ tags:
+ - config
+ - nova_config
+ - nova
diff --git a/compass/deploy/ansible/roles/nova-controller/tasks/nova_config.yml b/compass/deploy/ansible/roles/nova-controller/tasks/nova_config.yml
new file mode 100644
index 0000000..62351fa
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/tasks/nova_config.yml
@@ -0,0 +1,16 @@
+---
+- name: nova db sync
+ command: su -s /bin/sh -c "nova-manage db sync" nova
+ register: result
+ until: result.rc == 0
+ retries: 5
+ delay: 3
+ notify:
+ - restart nova-api
+ - restart nova-cert
+ - restart nova-consoleauth
+ - restart nova-scheduler
+ - restart nova-conductor
+ - restart nova-novncproxy
+
+- meta: flush_handlers
diff --git a/compass/deploy/ansible/roles/nova-controller/tasks/nova_install.yml b/compass/deploy/ansible/roles/nova-controller/tasks/nova_install.yml
new file mode 100644
index 0000000..a1cded5
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/tasks/nova_install.yml
@@ -0,0 +1,35 @@
+---
+- name: install nova related packages
+ apt: name={{ item }} state=present force=yes
+ with_items:
+ - nova-api
+ - nova-cert
+ - nova-conductor
+ - nova-consoleauth
+ - nova-novncproxy
+ - nova-scheduler
+ - python-novaclient
+ - python-oslo.rootwrap
+
+- name: generate nova controll service list
+ shell: echo {{ item }} >> /opt/service
+ with_items:
+ - nova-api
+ - nova-cert
+ - nova-conductor
+ - nova-consoleauth
+ - nova-novncproxy
+ - nova-scheduler
+
+- name: update nova conf
+ template: src=nova.conf
+ dest=/etc/nova/nova.conf
+ backup=yes
+ notify:
+ - restart nova-api
+ - restart nova-cert
+ - restart nova-consoleauth
+ - restart nova-scheduler
+ - restart nova-conductor
+ - restart nova-novncproxy
+ - remove nova-sqlite-db
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/dhcp_agent.ini b/compass/deploy/ansible/roles/nova-controller/templates/dhcp_agent.ini
new file mode 100644
index 0000000..19eb62e
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/templates/dhcp_agent.ini
@@ -0,0 +1,90 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# The DHCP agent will resync its state with Neutron to recover from any
+# transient notification or rpc errors. The interval is number of
+# seconds between attempts.
+resync_interval = 5
+
+# The DHCP agent requires an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
+# BigSwitch/Floodlight)
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Name of Open vSwitch bridge to use
+# ovs_integration_bridge = br-int
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
+# no additional setup of the DHCP server.
+dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# The DHCP server can assist with providing metadata support on isolated
+# networks. Setting this value to True will cause the DHCP server to append
+# specific host routes to the DHCP request. The metadata service will only
+# be activated when the subnet does not contain any router port. The guest
+# instance must be configured to request host routes via DHCP (Option 121).
+enable_isolated_metadata = False
+
+# Allows for serving metadata requests coming from a dedicated metadata
+# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
+# is connected to a Neutron router from which the VMs send metadata
+# request. In this case DHCP Option 121 will not be injected in VMs, as
+# they will be able to reach 169.254.169.254 through a router.
+# This option requires enable_isolated_metadata = True
+enable_metadata_network = False
+
+# Number of threads to use during sync process. Should not exceed connection
+# pool size configured on server.
+# num_sync_threads = 4
+
+# Location to store DHCP server config files
+# dhcp_confs = $state_path/dhcp
+
+# Domain to use for building the hostnames
+dhcp_domain = openstacklocal
+
+# Override the default dnsmasq settings with this file
+# dnsmasq_config_file =
+dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
+
+# Comma-separated list of DNS servers which will be used by dnsmasq
+# as forwarders.
+# dnsmasq_dns_servers =
+
+# Limit number of leases to prevent a denial-of-service.
+dnsmasq_lease_max = 16777216
+
+# Location to DHCP lease relay UNIX domain socket
+# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# dhcp_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the dhcp agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a dhcp server is disabled.
+# dhcp_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf
new file mode 100644
index 0000000..7bcbd9d
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf
@@ -0,0 +1,2 @@
+dhcp-option-force=26,1454
+
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/etc/xorp/config.boot b/compass/deploy/ansible/roles/nova-controller/templates/etc/xorp/config.boot
new file mode 100644
index 0000000..32caf96
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/templates/etc/xorp/config.boot
@@ -0,0 +1,25 @@
+interfaces {
+ restore-original-config-on-shutdown: false
+ interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ description: "Internal pNodes interface"
+ disable: false
+ default-system-config
+ }
+}
+
+protocols {
+ igmp {
+ disable: false
+ interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
+ disable: false
+ version: 3
+ }
+ }
+ traceoptions {
+ flag all {
+ disable: false
+ }
+ }
+ }
+}
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/l3_agent.ini b/compass/deploy/ansible/roles/nova-controller/templates/l3_agent.ini
new file mode 100644
index 0000000..b394c00
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/templates/l3_agent.ini
@@ -0,0 +1,81 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+# debug = False
+verbose = True
+
+# L3 requires that an interface driver be set. Choose the one that best
+# matches your plugin.
+# interface_driver =
+
+# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
+# that supports L3 agent
+# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+
+# Use veth for an OVS interface or not.
+# Support kernels with limited namespace support
+# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
+# ovs_use_veth = False
+
+# Example of interface_driver option for LinuxBridge
+# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
+
+# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
+# iproute2 package that supports namespaces).
+use_namespaces = True
+
+# If use_namespaces is set as False then the agent can only configure one router.
+
+# This is done by setting the specific router_id.
+# router_id =
+
+# When external_network_bridge is set, each L3 agent can be associated
+# with no more than one external network. This value should be set to the UUID
+# of that external network. To allow L3 agent support multiple external
+# networks, both the external_network_bridge and gateway_external_network_id
+# must be left empty.
+# gateway_external_network_id =
+
+# Indicates that this L3 agent should also handle routers that do not have
+# an external network gateway configured. This option should be True only
+# for a single agent in a Neutron deployment, and may be False for all agents
+# if all routers must have an external network gateway
+handle_internal_only_routers = True
+
+# Name of bridge used for external network traffic. This should be set to
+# empty value for the linux bridge. when this parameter is set, each L3 agent
+# can be associated with no more than one external network.
+external_network_bridge = br-ex
+
+# TCP Port used by Neutron metadata server
+metadata_port = 9697
+
+# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
+# to disable this feature.
+send_arp_for_ha = 3
+
+# seconds between re-sync routers' data if needed
+periodic_interval = 40
+
+# seconds to start to sync routers' data after
+# starting agent
+periodic_fuzzy_delay = 5
+
+# enable_metadata_proxy, which is true by default, can be set to False
+# if the Nova metadata server is not available
+# enable_metadata_proxy = True
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# router_delete_namespaces, which is false by default, can be set to True if
+# namespaces can be deleted cleanly on the host running the L3 agent.
+# Do not enable this until you understand the problem with the Linux iproute
+# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
+# you are sure that your version of iproute does not suffer from the problem.
+# If True, namespaces will be deleted when a router is destroyed.
+# router_delete_namespaces = False
+
+# Timeout for ovs-vsctl commands.
+# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
+# ovs_vsctl_timeout = 10
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/metadata_agent.ini b/compass/deploy/ansible/roles/nova-controller/templates/metadata_agent.ini
new file mode 100644
index 0000000..6badf28
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/templates/metadata_agent.ini
@@ -0,0 +1,46 @@
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+debug = True
+
+# The Neutron user information for accessing the Neutron API.
+auth_url = http://{{ HA_VIP }}:5000/v2.0
+auth_region = RegionOne
+# Turn off verification of the certificate for ssl
+# auth_insecure = False
+# Certificate Authority public key (CA cert) file for ssl
+# auth_ca_cert =
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+
+# Network service endpoint type to pull from the keystone catalog
+# endpoint_type = adminURL
+
+# IP address used by Nova metadata server
+nova_metadata_ip = {{ HA_VIP }}
+
+# TCP Port used by Nova metadata server
+nova_metadata_port = 8775
+
+# When proxying metadata requests, Neutron signs the Instance-ID header with a
+# shared secret to prevent spoofing. You may select any string for a secret,
+# but it must match here and in the configuration used by the Nova Metadata
+# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
+metadata_proxy_shared_secret = {{ METADATA_SECRET }}
+
+# Location of Metadata Proxy UNIX domain socket
+# metadata_proxy_socket = $state_path/metadata_proxy
+
+# Number of separate worker processes for metadata server
+# metadata_workers = 0
+
+# Number of backlog requests to configure the metadata server socket with
+# metadata_backlog = 128
+
+# URL to connect to the cache backend.
+# Example of URL using memory caching backend
+# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
+# default_ttl=0 parameter will cause cache entries to never expire.
+# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
+# No cache is used in case no value is passed.
+# cache_url =
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/ml2_conf.ini b/compass/deploy/ansible/roles/nova-controller/templates/ml2_conf.ini
new file mode 100644
index 0000000..a790069
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/templates/ml2_conf.ini
@@ -0,0 +1,108 @@
+[ml2]
+# (ListOpt) List of network type driver entrypoints to be loaded from
+# the neutron.ml2.type_drivers namespace.
+#
+# type_drivers = local,flat,vlan,gre,vxlan
+# Example: type_drivers = flat,vlan,gre,vxlan
+type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
+
+# (ListOpt) Ordered list of network_types to allocate as tenant
+# networks. The default value 'local' is useful for single-box testing
+# but provides no connectivity between hosts.
+#
+# tenant_network_types = local
+# Example: tenant_network_types = vlan,gre,vxlan
+tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
+
+# (ListOpt) Ordered list of networking mechanism driver entrypoints
+# to be loaded from the neutron.ml2.mechanism_drivers namespace.
+# mechanism_drivers =
+# Example: mechanism_drivers = openvswitch,mlnx
+# Example: mechanism_drivers = arista
+# Example: mechanism_drivers = cisco,logger
+# Example: mechanism_drivers = openvswitch,brocade
+# Example: mechanism_drivers = linuxbridge,brocade
+mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
+
+[ml2_type_flat]
+# (ListOpt) List of physical_network names with which flat networks
+# can be created. Use * to allow flat networks with arbitrary
+# physical_network names.
+#
+flat_networks = external
+# Example:flat_networks = physnet1,physnet2
+# Example:flat_networks = *
+
+[ml2_type_vlan]
+# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
+# specifying physical_network names usable for VLAN provider and
+# tenant networks, as well as ranges of VLAN tags on each
+# physical_network available for allocation as tenant networks.
+#
+network_vlan_ranges =
+# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
+
+[ml2_type_gre]
+# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
+tunnel_id_ranges = 1:1000
+
+[ml2_type_vxlan]
+# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
+# ranges of VXLAN VNI IDs that are available for tenant network allocation.
+#
+vni_ranges = 1001:4095
+
+# (StrOpt) Multicast group for the VXLAN interface. When configured, will
+# enable sending all broadcast traffic to this multicast group. When left
+# unconfigured, will disable multicast VXLAN mode.
+#
+vxlan_group = 239.1.1.1
+# Example: vxlan_group = 239.1.1.1
+
+[securitygroup]
+# Controls if neutron security group is enabled or not.
+# It should be false when you use nova security group.
+# enable_security_group = True
+firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+enable_security_group = True
+
+[database]
+connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8
+
+[ovs]
+local_ip = {{ internal_ip }}
+{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
+integration_bridge = br-int
+tunnel_bridge = br-tun
+tunnel_id_ranges = 1001:4095
+tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
+{% endif %}
+
+[agent]
+root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
+tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
+{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
+vxlan_udp_port = 4789
+{% endif %}
+l2_population = False
+
+[odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+network_vlan_ranges = 1001:4095
+tunnel_id_ranges = 1001:4095
+tun_peer_patch_port = patch-int
+int_peer_patch_port = patch-tun
+tenant_network_type = vxlan
+tunnel_bridge = br-tun
+integration_bridge = br-int
+controllers = 10.1.0.15:8080:admin:admin
+{% endif %}
+
+[ml2_odl]
+{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
+username = {{ odl_username }}
+password = {{ odl_password }}
+url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
+{% endif %}
+
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/neutron-network.conf b/compass/deploy/ansible/roles/nova-controller/templates/neutron-network.conf
new file mode 100644
index 0000000..93be9cb
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/templates/neutron-network.conf
@@ -0,0 +1,465 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ DEBUG }}
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/neutron.conf b/compass/deploy/ansible/roles/nova-controller/templates/neutron.conf
new file mode 100644
index 0000000..1575367
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/templates/neutron.conf
@@ -0,0 +1,466 @@
+[DEFAULT]
+# Print more verbose output (set logging level to INFO instead of default WARNING level).
+verbose = {{ VERBOSE }}
+
+# Print debugging output (set logging level to DEBUG instead of default WARNING level).
+debug = {{ VERBOSE }}
+
+# Where to store Neutron state files. This directory must be writable by the
+# user executing the agent.
+state_path = /var/lib/neutron
+
+# Where to store lock files
+lock_path = $state_path/lock
+
+# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# log_date_format = %Y-%m-%d %H:%M:%S
+
+# use_syslog -> syslog
+# log_file and log_dir -> log_dir/log_file
+# (not log_file) and log_dir -> log_dir/{binary_name}.log
+# use_stderr -> stderr
+# (not user_stderr) and (not log_file) -> stdout
+# publish_errors -> notification system
+
+# use_syslog = False
+# syslog_log_facility = LOG_USER
+
+# use_stderr = True
+# log_file =
+log_dir = /var/log/neutron
+
+# publish_errors = False
+
+# Address to bind the API server to
+bind_host = {{ network_server_host }}
+
+# Port the bind the API server to
+bind_port = 9696
+
+# Path to the extensions. Note that this can be a colon-separated list of
+# paths. For example:
+# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
+# The __path__ of neutron.extensions is appended to this, so if your
+# extensions are in there you don't need to specify them here
+# api_extensions_path =
+
+# (StrOpt) Neutron core plugin entrypoint to be loaded from the
+# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
+# plugins included in the neutron source distribution. For compatibility with
+# previous versions, the class name of a plugin can be specified instead of its
+# entrypoint name.
+#
+#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+core_plugin = ml2
+# Example: core_plugin = ml2
+
+# (ListOpt) List of service plugin entrypoints to be loaded from the
+# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
+# the plugins included in the neutron source distribution. For compatibility
+# with previous versions, the class name of a plugin can be specified instead
+# of its entrypoint name.
+#
+# service_plugins =
+# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
+service_plugins = router
+
+# Paste configuration file
+api_paste_config = api-paste.ini
+
+# The strategy to be used for auth.
+# Supported values are 'keystone'(default), 'noauth'.
+auth_strategy = keystone
+
+# Base MAC address. The first 3 octets will remain unchanged. If the
+# 4h octet is not 00, it will also be used. The others will be
+# randomly generated.
+# 3 octet
+# base_mac = fa:16:3e:00:00:00
+# 4 octet
+# base_mac = fa:16:3e:4f:00:00
+
+# Maximum amount of retries to generate a unique MAC address
+# mac_generation_retries = 16
+
+# DHCP Lease duration (in seconds)
+dhcp_lease_duration = 86400
+
+# Allow sending resource operation notification to DHCP agent
+# dhcp_agent_notification = True
+
+# Enable or disable bulk create/update/delete operations
+# allow_bulk = True
+# Enable or disable pagination
+# allow_pagination = False
+# Enable or disable sorting
+# allow_sorting = False
+# Enable or disable overlapping IPs for subnets
+# Attention: the following parameter MUST be set to False if Neutron is
+# being used in conjunction with nova security groups
+allow_overlapping_ips = True
+# Ensure that configured gateway is on subnet
+# force_gateway_on_subnet = False
+
+
+# RPC configuration options. Defined in rpc __init__
+# The messaging module to use, defaults to kombu.
+# rpc_backend = neutron.openstack.common.rpc.impl_kombu
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_password = {{ RABBIT_PASS }}
+
+# Size of RPC thread pool
+rpc_thread_pool_size = 240
+# Size of RPC connection pool
+rpc_conn_pool_size = 100
+# Seconds to wait for a response from call or multicall
+rpc_response_timeout = 300
+# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
+rpc_cast_timeout = 300
+# Modules of exceptions that are permitted to be recreated
+# upon receiving exception data from an rpc call.
+# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
+# AMQP exchange to connect to if using RabbitMQ or QPID
+# control_exchange = neutron
+
+# If passed, use a fake RabbitMQ provider
+# fake_rabbit = False
+
+# Configuration options if sending notifications via kombu rpc (these are
+# the defaults)
+# SSL version to use (valid only if SSL enabled)
+# kombu_ssl_version =
+# SSL key file (valid only if SSL enabled)
+# kombu_ssl_keyfile =
+# SSL cert file (valid only if SSL enabled)
+# kombu_ssl_certfile =
+# SSL certification authority file (valid only if SSL enabled)
+# kombu_ssl_ca_certs =
+# Port where RabbitMQ server is running/listening
+rabbit_port = 5672
+# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
+# rabbit_hosts = localhost:5672
+# User ID used for RabbitMQ connections
+rabbit_userid = {{ RABBIT_USER }}
+# Location of a virtual RabbitMQ installation.
+# rabbit_virtual_host = /
+# Maximum retries with trying to connect to RabbitMQ
+# (the default of 0 implies an infinite retry count)
+# rabbit_max_retries = 0
+# RabbitMQ connection retry interval
+# rabbit_retry_interval = 1
+# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
+# wipe RabbitMQ database when changing this option. (boolean value)
+# rabbit_ha_queues = false
+# QPID
+# rpc_backend=neutron.openstack.common.rpc.impl_qpid
+# Qpid broker hostname
+# qpid_hostname = localhost
+# Qpid broker port
+# qpid_port = 5672
+# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
+# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
+# qpid_hosts = localhost:5672
+# Username for qpid connection
+# qpid_username = ''
+# Password for qpid connection
+# qpid_password = ''
+# Space separated list of SASL mechanisms to use for auth
+# qpid_sasl_mechanisms = ''
+# Seconds between connection keepalive heartbeats
+# qpid_heartbeat = 60
+# Transport to use, either 'tcp' or 'ssl'
+# qpid_protocol = tcp
+# Disable Nagle algorithm
+# qpid_tcp_nodelay = True
+
+# ZMQ
+# rpc_backend=neutron.openstack.common.rpc.impl_zmq
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address.
+# rpc_zmq_bind_address = *
+
+# ============ Notification System Options =====================
+
+# Notifications can be sent when network/subnet/port are created, updated or deleted.
+# There are three methods of sending notifications: logging (via the
+# log_file directive), rpc (via a message queue) and
+# noop (no notifications sent, the default)
+
+# Notification_driver can be defined multiple times
+# Do nothing driver
+# notification_driver = neutron.openstack.common.notifier.no_op_notifier
+# Logging driver
+# notification_driver = neutron.openstack.common.notifier.log_notifier
+# RPC driver.
+notification_driver = neutron.openstack.common.notifier.rpc_notifier
+
+# default_notification_level is used to form actual topic name(s) or to set logging level
+default_notification_level = INFO
+
+# default_publisher_id is a part of the notification payload
+# host = myhost.com
+# default_publisher_id = $host
+
+# Defined in rpc_notifier, can be comma separated values.
+# The actual topic names will be %s.%(default_notification_level)s
+notification_topics = notifications
+
+# Default maximum number of items returned in a single response,
+# value == infinite and value < 0 means no max limit, and value must
+# be greater than 0. If the number of items requested is greater than
+# pagination_max_limit, server will just return pagination_max_limit
+# of number of items.
+# pagination_max_limit = -1
+
+# Maximum number of DNS nameservers per subnet
+# max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet
+# max_subnet_host_routes = 20
+
+# Maximum number of fixed ips per port
+# max_fixed_ips_per_port = 5
+
+# =========== items for agent management extension =============
+# Seconds to regard the agent as down; should be at least twice
+# report_interval, to be sure the agent is down for good
+agent_down_time = 75
+# =========== end of items for agent management extension =====
+
+# =========== items for agent scheduler extension =============
+# Driver to use for scheduling network to DHCP agent
+network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling router to a default L3 agent
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+# Driver to use for scheduling a loadbalancer pool to an lbaas agent
+# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
+# networks to first DHCP agent which sends get_active_networks message to
+# neutron server
+# network_auto_schedule = True
+
+# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
+# routers to first L3 agent which sends sync_routers message to neutron server
+# router_auto_schedule = True
+
+# Number of DHCP agents scheduled to host a network. This enables redundant
+# DHCP agents for configured networks.
+# dhcp_agents_per_network = 1
+
+# =========== end of items for agent scheduler extension =====
+
+# =========== WSGI parameters related to the API server ==============
+# Number of separate worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as workers. The parent process manages them.
+api_workers = 8
+
+# Number of separate RPC worker processes to spawn. The default, 0, runs the
+# worker thread in the current process. Greater than 0 launches that number of
+# child processes as RPC workers. The parent process manages them.
+# This feature is experimental until issues are addressed and testing has been
+# enabled for various plugins for compatibility.
+rpc_workers = 8
+
+# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
+# starting API server. Not supported on OS X.
+# tcp_keepidle = 600
+
+# Number of seconds to keep retrying to listen
+# retry_until_window = 30
+
+# Number of backlog requests to configure the socket with.
+# backlog = 4096
+
+# Max header line to accommodate large tokens
+# max_header_line = 16384
+
+# Enable SSL on the API server
+# use_ssl = False
+
+# Certificate file to use when starting API server securely
+# ssl_cert_file = /path/to/certfile
+
+# Private key file to use when starting API server securely
+# ssl_key_file = /path/to/keyfile
+
+# CA certificate file to use when starting API server securely to
+# verify connecting clients. This is an optional parameter only required if
+# API clients need to authenticate to the API server using SSL certificates
+# signed by a trusted CA
+# ssl_ca_file = /path/to/cafile
+# ======== end of WSGI parameters related to the API server ==========
+
+
+# ======== neutron nova interactions ==========
+# Send notification to nova when port status is active.
+notify_nova_on_port_status_changes = True
+
+# Send notifications to nova when port data (fixed_ips/floatingips) change
+# so nova can update it's cache.
+notify_nova_on_port_data_changes = True
+
+# URL for connection to nova (Only supports one nova region currently).
+nova_url = http://{{ HA_VIP }}:8774/v2
+
+# Name of nova region to use. Useful if keystone manages more than one region
+nova_region_name = RegionOne
+
+# Username for connection to nova in admin context
+nova_admin_username = nova
+
+# The uuid of the admin nova tenant
+nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
+
+# Password for connection to nova in admin context.
+nova_admin_password = {{ NOVA_PASS }}
+
+# Authorization URL for connection to nova in admin context.
+nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
+
+# Number of seconds between sending events to nova if there are any events to send
+send_events_interval = 2
+
+# ======== end of neutron nova interactions ==========
+
+[quotas]
+# Default driver to use for quota checks
+quota_driver = neutron.db.quota_db.DbQuotaDriver
+
+# Resource name(s) that are supported in quota features
+quota_items = network,subnet,port
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited.
+default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+quota_network = 100
+
+# Number of subnets allowed per tenant. A negative value means unlimited.
+quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+quota_port = 8000
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited.
+quota_security_group = 1000
+
+# Number of security group rules allowed per tenant. A negative value means
+# unlimited.
+quota_security_group_rule = 1000
+
+# Number of vips allowed per tenant. A negative value means unlimited.
+# quota_vip = 10
+
+# Number of pools allowed per tenant. A negative value means unlimited.
+# quota_pool = 10
+
+# Number of pool members allowed per tenant. A negative value means unlimited.
+# The default is unlimited because a member is not a real resource consumer
+# on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_member = -1
+
+# Number of health monitors allowed per tenant. A negative value means
+# unlimited.
+# The default is unlimited because a health monitor is not a real resource
+# consumer on Openstack. However, on back-end, a member is a resource consumer
+# and that is the reason why quota is possible.
+# quota_health_monitors = -1
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# quota_floatingip = 50
+
+[agent]
+# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
+# root filter facility.
+# Change to "sudo" to skip the filtering and just run the comand directly
+root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
+
+# =========== items for agent management extension =============
+# seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time
+report_interval = 30
+
+# =========== end of items for agent management extension =====
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/v2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = neutron
+admin_password = {{ NEUTRON_PASS }}
+signing_dir = $state_path/keystone-signing
+
+[database]
+# This line MUST be changed to actually run the plugin.
+# Example:
+# connection = mysql://root:pass@127.0.0.1:3306/neutron
+# Replace 127.0.0.1 above with the IP address of the database used by the
+# main neutron server. (Leave it as is if the database runs on this host.)
+# connection = sqlite:////var/lib/neutron/neutron.sqlite
+#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
+
+# The SQLAlchemy connection string used to connect to the slave database
+slave_connection =
+
+# Database reconnection retry times - in event connectivity is lost
+# set to -1 implies an infinite retry count
+max_retries = 10
+
+# Database reconnection interval in seconds - if the initial connection to the
+# database fails
+retry_interval = 10
+
+# Minimum number of SQL connections to keep open in a pool
+min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool
+max_pool_size = 100
+
+# Timeout in seconds before idle sql connections are reaped
+idle_timeout = 3600
+
+# If set, use this value for max_overflow with sqlalchemy
+max_overflow = 100
+
+# Verbosity of SQL debugging information. 0=None, 100=Everything
+connection_debug = 0
+
+# Add python stack traces to SQL as comment strings
+connection_trace = False
+
+# If set, use this value for pool_timeout with sqlalchemy
+pool_timeout = 10
+
+[service_providers]
+# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option, example for default provider:
+# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
+# example of non-default provider:
+# service_provider=FIREWALL:name2:firewall_driver_path
+# --- Reference implementations ---
+service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+# In order to activate Radware's lbaas driver you need to uncomment the next line.
+# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
+# Otherwise comment the HA Proxy line
+# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
+# uncomment the following line to make the 'netscaler' LBaaS provider available.
+# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
+# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
+# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
+# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
+# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/neutron_init.sh b/compass/deploy/ansible/roles/nova-controller/templates/neutron_init.sh
new file mode 100644
index 0000000..b92e202
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/templates/neutron_init.sh
@@ -0,0 +1,4 @@
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
+
+# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}
+
diff --git a/compass/deploy/ansible/roles/nova-controller/templates/nova.conf b/compass/deploy/ansible/roles/nova-controller/templates/nova.conf
new file mode 100644
index 0000000..c8991a3
--- /dev/null
+++ b/compass/deploy/ansible/roles/nova-controller/templates/nova.conf
@@ -0,0 +1,72 @@
+[DEFAULT]
+dhcpbridge_flagfile=/etc/nova/nova.conf
+dhcpbridge=/usr/bin/nova-dhcpbridge
+logdir=/var/log/nova
+state_path=/var/lib/nova
+lock_path=/var/lock/nova
+force_dhcp_release=True
+iscsi_helper=tgtadm
+libvirt_use_virtio_for_bridges=True
+connection_type=libvirt
+root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+verbose={{ VERBOSE}}
+debug={{ DEBUG }}
+ec2_private_dns_show_ip=True
+api_paste_config=/etc/nova/api-paste.ini
+volumes_path=/var/lib/nova/volumes
+enabled_apis=osapi_compute,metadata
+
+vif_plugging_is_fatal: false
+vif_plugging_timeout: 0
+
+auth_strategy = keystone
+
+rpc_backend = rabbit
+rabbit_host = {{ rabbit_host }}
+rabbit_userid = {{ RABBIT_USER }}
+rabbit_password = {{ RABBIT_PASS }}
+
+osapi_compute_listen={{ internal_ip }}
+metadata_listen={{ internal_ip }}
+
+my_ip = {{ internal_ip }}
+vnc_enabled = True
+vncserver_listen = {{ internal_ip }}
+vncserver_proxyclient_address = {{ internal_ip }}
+novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html
+
+novncproxy_host = {{ internal_ip }}
+novncproxy_port = 6080
+
+network_api_class = nova.network.neutronv2.api.API
+linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
+firewall_driver = nova.virt.firewall.NoopFirewallDriver
+security_group_api = neutron
+
+instance_usage_audit = True
+instance_usage_audit_period = hour
+notify_on_state_change = vm_and_task_state
+notification_driver = nova.openstack.common.notifier.rpc_notifier
+notification_driver = ceilometer.compute.nova_notifier
+
+[database]
+# The SQLAlchemy connection string used to connect to the database
+connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
+
+[keystone_authtoken]
+auth_uri = http://{{ HA_VIP }}:5000/2.0
+identity_uri = http://{{ HA_VIP }}:35357
+admin_tenant_name = service
+admin_user = nova
+admin_password = {{ NOVA_PASS }}
+
+[glance]
+host = {{ HA_VIP }}
+
+[neutron]
+url = http://{{ HA_VIP }}:9696
+auth_strategy = keystone
+admin_tenant_name = service
+admin_username = neutron
+admin_password = {{ NEUTRON_PASS }}
+admin_auth_url = http://{{ HA_VIP }}:35357/v2.0
diff --git a/compass/deploy/ansible/roles/repo/tasks/main.yml b/compass/deploy/ansible/roles/repo/tasks/main.yml
new file mode 100644
index 0000000..9476f80
--- /dev/null
+++ b/compass/deploy/ansible/roles/repo/tasks/main.yml
@@ -0,0 +1,6 @@
+---
+- name: add juno cloudarchive
+ apt_repository: repo="{{ juno_cloud_archive }}" state=present
+
+- name: first update pkgs
+ apt: update_cache=yes
diff --git a/compass/deploy/ansible/roles/repo/templates/sources.list b/compass/deploy/ansible/roles/repo/templates/sources.list
new file mode 100644
index 0000000..8b062e7
--- /dev/null
+++ b/compass/deploy/ansible/roles/repo/templates/sources.list
@@ -0,0 +1 @@
+{{ LOCAL_REPO }}
diff --git a/compass/deploy/compass_vm.sh b/compass/deploy/compass_vm.sh
new file mode 100644
index 0000000..0764917
--- /dev/null
+++ b/compass/deploy/compass_vm.sh
@@ -0,0 +1,103 @@
+compass_vm_dir=$WORK_DIR/vm/compass
+rsa_file=$compass_vm_dir/boot.rsa
+ssh_args="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i $rsa_file"
+function tear_down_compass() {
+ sudo virsh destroy compass > /dev/null 2>&1
+ sudo virsh undefine compass > /dev/null 2>&1
+
+ sudo umount $compass_vm_dir/old > /dev/null 2>&1
+ sudo umount $compass_vm_dir/new > /dev/null 2>&1
+
+ sudo rm -rf $compass_vm_dir
+
+ log_info "tear_down_compass success!!!"
+}
+
+function install_compass_core() {
+ local inventory_file=$compass_vm_dir/inventory.file
+ log_info "install_compass_core enter"
+ sed -i "s/mgmt_next_ip:.*/mgmt_next_ip: ${COMPASS_SERVER}/g" $WORK_DIR/installer/compass-install/install/group_vars/all
+ echo "compass_nodocker ansible_ssh_host=$MGMT_IP ansible_ssh_port=22" > $inventory_file
+ PYTHONUNBUFFERED=1 ANSIBLE_FORCE_COLOR=true ANSIBLE_HOST_KEY_CHECKING=false ANSIBLE_SSH_ARGS='-o UserKnownHostsFile=/dev/null -o ControlMaster=auto -o ControlPersist=60s' ansible-playbook -e pipeline=true --private-key=$rsa_file --user=root --connection=ssh --inventory-file=$inventory_file $WORK_DIR/installer/compass-install/install/compass_nodocker.yml
+ exit_status=$?
+ rm $inventory_file
+ log_info "install_compass_core exit"
+ if [[ $exit_status != 0 ]];then
+ /bin/false
+ fi
+}
+
+function wait_ok() {
+ log_info "wait_compass_ok enter"
+ retry=0
+ until timeout 1s ssh $ssh_args root@$MGMT_IP "exit" 2>/dev/null
+ do
+ log_progress "os install time used: $((retry*100/$1))%"
+ sleep 1
+ let retry+=1
+ if [[ $retry -ge $1 ]];then
+ log_error "os install time out"
+ tear_down_compass
+ exit 1
+ fi
+ done
+
+ log_warn "os install time used: 100%"
+ log_info "wait_compass_ok exit"
+}
+
+function launch_compass() {
+ local old_mnt=$compass_vm_dir/old
+ local new_mnt=$compass_vm_dir/new
+ local old_iso=$WORK_DIR/iso/centos.iso
+ local new_iso=$compass_vm_dir/centos.iso
+
+ log_info "launch_compass enter"
+ tear_down_compass
+
+ set -e
+ mkdir -p $compass_vm_dir $old_mnt
+ sudo mount -o loop $old_iso $old_mnt
+ cd $old_mnt;find .|cpio -pd $new_mnt;cd -
+
+ sudo umount $old_mnt
+
+ chmod 755 -R $new_mnt
+ sed -i -e "s/REPLACE_MGMT_IP/$MGMT_IP/g" -e "s/REPLACE_MGMT_NETMASK/$MGMT_MASK/g" -e "s/REPLACE_INSTALL_IP/$COMPASS_SERVER/g" -e "s/REPLACE_INSTALL_NETMASK/$INSTALL_MASK/g" -e "s/REPLACE_GW/$MGMT_GW/g" $new_mnt/isolinux/isolinux.cfg
+
+ sudo ssh-keygen -f $new_mnt/bootstrap/boot.rsa -t rsa -N ''
+ cp $new_mnt/bootstrap/boot.rsa $rsa_file
+
+ rm -rf $new_mnt/.rr_moved $new_mnt/rr_moved
+ sudo mkisofs -quiet -r -J -R -b isolinux/isolinux.bin -no-emul-boot -boot-load-size 4 -boot-info-table -hide-rr-moved -x "lost+found:" -o $new_iso $new_mnt
+
+ rm -rf $old_mnt $new_mnt
+
+ qemu-img create -f qcow2 $compass_vm_dir/disk.img 100G
+
+ # create vm xml
+ sed -e "s/REPLACE_MEM/$COMPASS_VIRT_MEM/g" \
+ -e "s/REPLACE_CPU/$COMPASS_VIRT_CPUS/g" \
+ -e "s#REPLACE_IMAGE#$compass_vm_dir/disk.img#g" \
+ -e "s#REPLACE_ISO#$compass_vm_dir/centos.iso#g" \
+ -e "s/REPLACE_NET_MGMT/mgmt/g" \
+ -e "s/REPLACE_BRIDGE_INSTALL/br_install/g" \
+ $COMPASS_DIR/deploy/template/vm/compass.xml \
+ > $WORK_DIR/vm/compass/libvirt.xml
+
+ sudo virsh define $compass_vm_dir/libvirt.xml
+ sudo virsh start compass
+
+ if ! wait_ok 300;then
+ log_error "install os timeout"
+ exit 1
+ fi
+
+ if ! install_compass_core;then
+ log_error "install compass core failed"
+ exit 1
+ fi
+
+ set +e
+ log_info "launch_compass exit"
+}
diff --git a/compass/deploy/conf/baremetal.conf b/compass/deploy/conf/baremetal.conf
new file mode 100644
index 0000000..317d561
--- /dev/null
+++ b/compass/deploy/conf/baremetal.conf
@@ -0,0 +1,20 @@
+export VIRT_CPUS=4
+export HOST_MACS="'64:3e:8c:4c:6d:a3' '64:3e:8c:4c:6d:37' '64:3e:8c:4c:6c:d7' '64:3e:8c:4c:6b:7b' '64:3e:8c:4c:68:2b'"
+export VIRT_MEM=16384
+export VIRT_DISK=30G
+export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*'
+#export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*'
+export ADAPTER_NAME="openstack_juno"
+export ADAPTER_TARGET_SYSTEM_PATTERN="^openstack$"
+export ADAPTER_FLAVOR_PATTERN="HA-ansible-multinodes"
+export HOSTNAMES="host1,host2,host3,host4,host5"
+export HOST_ROLES="host1=controller,ha;host2=controller,ha;host3=controller,ha;host4=compute;host5=compute"
+export DEFAULT_ROLES=""
+export SWITCH_IPS="172.29.1.166"
+export SWITCH_CREDENTIAL="version=2c,community=public"
+export DEPLOYMENT_TIMEOUT="150"
+export POLL_SWITCHES_FLAG="nopoll_switches"
+export DASHBOARD_URL=""
+export REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+source ${REGTEST_DIR}/base.conf
+export VIP="10.1.0.222"
diff --git a/compass/deploy/conf/base.conf b/compass/deploy/conf/base.conf
index 8362b9a..d8e8d51 100644
--- a/compass/deploy/conf/base.conf
+++ b/compass/deploy/conf/base.conf
@@ -1,11 +1,28 @@
-export COMPASS_SERVER_URL="http://10.1.0.12/api"
+export ISO_URL=http://192.168.123.11:9999/xh/work/build/work/compass.iso
+export INSTALL_IP=${INSTALL_IP:-10.1.0.12}
+export INSTALL_MASK=${INSTALL_MASK:-255.255.255.0}
+export INSTALL_GW=${INSTALL_GW:-10.1.0.1}
+export INSTALL_IP_START=${INSTALL_IP_START:-10.1.0.1}
+export INSTALL_IP_END=${INSTALL_IP_END:-10.1.0.254}
+export MGMT_IP=${MGMT_IP:-192.168.200.2}
+export MGMT_MASK=${MAGMT_MASK:-255.255.252.0}
+export MGMT_GW=${MAGMT_GW:-192.168.200.1}
+export MGMT_IP_START=${MGMT_IP_START:-192.168.200.3}
+export MGMT_IP_END=${MGMT_IP_END:-192.168.200.254}
+export OM_NIC=${OM_NIC:-eth3}
+export OM_IP=${OM_IP:-192.168.123.11/22}
+export OM_GW=${OM_GW:-192.168.120.1}
+export COMPASS_VIRT_CPUS=4
+export COMPASS_VIRT_MEM=4096
+export COMPASS_SERVER=$INSTALL_IP
+export COMPASS_SERVER_URL="http://$COMPASS_SERVER/api"
export COMPASS_USER_EMAIL="admin@huawei.com"
export COMPASS_USER_PASSWORD="admin"
export CLUSTER_NAME="opnfv2"
export LANGUAGE="EN"
export TIMEZONE="America/Los_Angeles"
-export NTP_SERVER="10.1.0.12"
-export NAMESERVERS="10.1.0.12"
+export NTP_SERVER="$COMPASS_SERVER"
+export NAMESERVERS="$COMPASS_SERVER"
export DOMAIN="ods.com"
export PARTITIONS="/home=5%,/tmp=5%,/var=20%"
export SUBNETS="10.1.0.0/24,172.16.2.0/24,172.16.3.0/24,172.16.4.0/24"
@@ -18,6 +35,7 @@ export TENANT_INTERFACE=${TENANT_INTERFACE:-eth1}
export STORAGE_INTERFACE=${STORAGE_INTERFACE:-eth3}
export PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-eth2}
+
function next_ip {
ip_addr=$1
ip_base="$(echo $ip_addr | cut -d. -f'1 2 3')"
diff --git a/compass/deploy/conf/cluster.conf b/compass/deploy/conf/cluster.conf
new file mode 100644
index 0000000..4f43027
--- /dev/null
+++ b/compass/deploy/conf/cluster.conf
@@ -0,0 +1,20 @@
+export VIRT_NUMBER=5
+export VIRT_CPUS=4
+export VIRT_MEM=16384
+export VIRT_DISK=30G
+export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*'
+#export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*'
+export ADAPTER_NAME="openstack_juno"
+export ADAPTER_TARGET_SYSTEM_PATTERN="^openstack$"
+export ADAPTER_FLAVOR_PATTERN="HA-ansible-multinodes"
+export HOSTNAMES="host1,host2,host3,host4,host5"
+export HOST_ROLES="host1=controller,ha;host2=controller,ha;host3=controller,ha;host4=compute;host5=compute"
+export DEFAULT_ROLES=""
+export SWITCH_IPS="1.1.1.1"
+export SWITCH_CREDENTIAL="version=2c,community=public"
+export DEPLOYMENT_TIMEOUT="150"
+export POLL_SWITCHES_FLAG="nopoll_switches"
+export DASHBOARD_URL=""
+export REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+source ${REGTEST_DIR}/base.conf
+export VIP="10.1.0.222"
diff --git a/compass/deploy/conf/five.conf b/compass/deploy/conf/five.conf
index e63e514..f32411b 100644
--- a/compass/deploy/conf/five.conf
+++ b/compass/deploy/conf/five.conf
@@ -1,6 +1,6 @@
export VIRT_NUMBER=5
export VIRT_CPUS=4
-export VIRT_MEM=4096
+export VIRT_MEM=16384
export VIRT_DISK=30G
export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*'
#export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*'
@@ -12,7 +12,7 @@ export HOST_ROLES="host1=controller,network;host2=compute,storage;host3=compute,
export DEFAULT_ROLES=""
export SWITCH_IPS="1.1.1.1"
export SWITCH_CREDENTIAL="version=2c,community=public"
-export DEPLOYMENT_TIMEOUT="90"
+export DEPLOYMENT_TIMEOUT="150"
export POLL_SWITCHES_FLAG="nopoll_switches"
export DASHBOARD_URL=""
export REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
diff --git a/compass/deploy/deploy-vm.sh b/compass/deploy/deploy-vm.sh
index 18857cd..41ef209 100644
--- a/compass/deploy/deploy-vm.sh
+++ b/compass/deploy/deploy-vm.sh
@@ -16,6 +16,12 @@ fi
cp bin/switch_virtualenv.py.template bin/switch_virtualenv.py
sed -i "s|\$PythonHome|$VIRTUAL_ENV|g" bin/switch_virtualenv.py
#source ../compass-install/ci/allinone.conf
+/usr/bin/expect ${SCRIPT_DIR}/../deploy/remote_excute.exp \
+ "ssh root@${COMPASS_SERVER} mkdir -p /opt/compass/bin/ansible_callbacks" vagrant
+
+/usr/bin/expect ${SCRIPT_DIR}/../deploy/remote_excute.exp \
+ "scp -r ${SCRIPT_DIR}/../deploy/status_callback.py root@${COMPASS_SERVER}:/opt/compass/bin/ansible_callbacks/status_callback.py" \
+ vagrant
bin/client.py --logfile= --loglevel=debug --logdir= --compass_server="${COMPASS_SERVER_URL}" \
--compass_user_email="${COMPASS_USER_EMAIL}" --compass_user_password="${COMPASS_USER_PASSWORD}" \
--cluster_name="${CLUSTER_NAME}" --language="${LANGUAGE}" --timezone="${TIMEZONE}" \
@@ -32,7 +38,8 @@ bin/client.py --logfile= --loglevel=debug --logdir= --compass_server="${COMPASS_
--network_mapping="${NETWORK_MAPPING}" --package_config_json_file="${PACKAGE_CONFIG_FILENAME}" \
--host_roles="${HOST_ROLES}" --default_roles="${DEFAULT_ROLES}" --switch_ips="${SWITCH_IPS}" \
--machines=${machines//\'} --switch_credential="${SWITCH_CREDENTIAL}" \
---deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}"
+--deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}" \
+--cluster_vip="${VIP}"
deploy_result=$?
tear_down_machines
cd ../compass-install
diff --git a/compass/deploy/deploy_host.sh b/compass/deploy/deploy_host.sh
new file mode 100644
index 0000000..d08a821
--- /dev/null
+++ b/compass/deploy/deploy_host.sh
@@ -0,0 +1,40 @@
+function deploy_host(){
+ cd $WORK_DIR/installer/compass-core
+ source $WORK_DIR/venv/bin/activate
+ if pip --help | grep -q trusted; then
+ pip install -i http://pypi.douban.com/simple -e . --trusted-host pypi.douban.com
+ else
+ pip install -i http://pypi.douban.com/simple -e .
+ fi
+
+ sudo mkdir -p /var/log/compass
+ sudo chown -R 777 /var/log/compass
+
+ sudo mkdir -p /etc/compass
+ sudo cp -rf conf/setting /etc/compass/.
+
+ cp bin/switch_virtualenv.py.template bin/switch_virtualenv.py
+ sed -i "s|\$PythonHome|$VIRTUAL_ENV|g" bin/switch_virtualenv.py
+ ssh $ssh_args root@${COMPASS_SERVER} mkdir -p /opt/compass/bin/ansible_callbacks
+ scp $ssh_args -r ${COMPASS_DIR}/deploy/status_callback.py root@${COMPASS_SERVER}:/opt/compass/bin/ansible_callbacks/status_callback.py
+
+ (sleep 15;reboot_hosts ) &
+ bin/client.py --logfile= --loglevel=debug --logdir= --compass_server="${COMPASS_SERVER_URL}" \
+ --compass_user_email="${COMPASS_USER_EMAIL}" --compass_user_password="${COMPASS_USER_PASSWORD}" \
+ --cluster_name="${CLUSTER_NAME}" --language="${LANGUAGE}" --timezone="${TIMEZONE}" \
+ --hostnames="${HOSTNAMES}" --partitions="${PARTITIONS}" --subnets="${SUBNETS}" \
+ --adapter_os_pattern="${ADAPTER_OS_PATTERN}" --adapter_name="${ADAPTER_NAME}" \
+ --adapter_target_system_pattern="${ADAPTER_TARGET_SYSTEM_PATTERN}" \
+ --adapter_flavor_pattern="${ADAPTER_FLAVOR_PATTERN}" \
+ --http_proxy="${PROXY}" --https_proxy="${PROXY}" --no_proxy="${IGNORE_PROXY}" \
+ --ntp_server="${NTP_SERVER}" --dns_servers="${NAMESERVERS}" --domain="${DOMAIN}" \
+ --search_path="${SEARCH_PATH}" --default_gateway="${GATEWAY}" \
+ --server_credential="${SERVER_CREDENTIAL}" --local_repo_url="${LOCAL_REPO_URL}" \
+ --os_config_json_file="${OS_CONFIG_FILENAME}" --service_credentials="${SERVICE_CREDENTIALS}" \
+ --console_credentials="${CONSOLE_CREDENTIALS}" --host_networks="${HOST_NETWORKS}" \
+ --network_mapping="${NETWORK_MAPPING}" --package_config_json_file="${PACKAGE_CONFIG_FILENAME}" \
+ --host_roles="${HOST_ROLES}" --default_roles="${DEFAULT_ROLES}" --switch_ips="${SWITCH_IPS}" \
+ --machines=${machines//\'} --switch_credential="${SWITCH_CREDENTIAL}" \
+ --deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}" \
+ --cluster_vip="${VIP}"
+}
diff --git a/compass/deploy/func.sh b/compass/deploy/func.sh
index 29c2c23..49ea947 100644..100755
--- a/compass/deploy/func.sh
+++ b/compass/deploy/func.sh
@@ -1,19 +1,22 @@
function tear_down_machines() {
- virtmachines=$(virsh list --name |grep pxe)
+ virtmachines=$(sudo virsh list --name |grep pxe)
for virtmachine in $virtmachines; do
echo "destroy $virtmachine"
- virsh destroy $virtmachine
+ sudo virsh destroy $virtmachine
if [[ "$?" != "0" ]]; then
echo "destroy instance $virtmachine failed"
exit 1
fi
done
- virtmachines=$(virsh list --all --name |grep pxe)
- for virtmachine in $virtmachines; do
- echo "undefine $virtmachine"
- virsh undefine $virtmachine
+
+ sudo virsh list --all|grep shut|awk '{print $2}'|xargs -n 1 sudo virsh undefine
+
+ vol_names=$(sudo virsh vol-list default |grep .img | awk '{print $1}')
+ for vol_name in $vol_names; do
+ echo "virsh vol-delete $vol_name"
+ sudo virsh vol-delete $vol_name --pool default
if [[ "$?" != "0" ]]; then
- echo "undefine instance $virtmachine failed"
+ echo "vol-delete $vol_name failed!"
exit 1
fi
done
diff --git a/compass/deploy/host_baremetal.sh b/compass/deploy/host_baremetal.sh
new file mode 100644
index 0000000..26238e0
--- /dev/null
+++ b/compass/deploy/host_baremetal.sh
@@ -0,0 +1,9 @@
+function get_host_macs() {
+ local config_file=$WORK_DIR/installer/compass-install/install/group_vars/all
+ local machines=`echo $HOST_MACS|sed 's/ /,/g'`
+
+ echo "test: true" >> $config_file
+ echo "pxe_boot_macs: [${machines}]" >> $config_file
+
+ echo $machines
+}
diff --git a/compass/deploy/host_vm.sh b/compass/deploy/host_vm.sh
new file mode 100644
index 0000000..cf9a757
--- /dev/null
+++ b/compass/deploy/host_vm.sh
@@ -0,0 +1,59 @@
+host_vm_dir=$WORK_DIR/vm
+function tear_down_machines() {
+ for i in host{0..4}
+ do
+ sudo virsh destroy $i 1>/dev/null 2>/dev/null
+ sudo virsh undefine $i 1>/dev/null 2>/dev/null
+ rm -rf $host_vm_dir/host$i
+ done
+}
+
+function reboot_hosts() {
+ log_warn "reboot_hosts do nothing"
+}
+
+function launch_host_vms() {
+ tear_down_machines
+ #function_bod
+ mac_array=`echo $machines|sed 's/,/ /g'`
+ log_info "bringing up pxe boot vms"
+ i=0
+ for mac in $mac_array; do
+ log_info "creating vm disk for instance host${i}"
+ vm_dir=$host_vm_dir/host$i
+ mkdir -p $vm_dir
+ sudo qemu-img create -f raw $vm_dir/disk.img ${VIRT_DISK}
+ # create vm xml
+ sed -e "s/REPLACE_MEM/$VIRT_MEM/g" \
+ -e "s/REPLACE_CPU/$VIRT_CPUS/g" \
+ -e "s/REPLACE_NAME/host$i/g" \
+ -e "s#REPLACE_IMAGE#$vm_dir/disk.img#g" \
+ -e "s/REPLACE_BOOT_MAC/$mac/g" \
+ -e "s/REPLACE_BRIDGE_MGMT/br_install/g" \
+ -e "s/REPLACE_BRIDGE_TENANT/br_install/g" \
+ -e "s/REPLACE_BRIDGE_PUBLIC/br_install/g" \
+ -e "s/REPLACE_BRIDGE_STORAGE/br_install/g" \
+ $COMPASS_DIR/deploy/template/vm/host.xml\
+ > $vm_dir/libvirt.xml
+
+ sudo virsh define $vm_dir/libvirt.xml
+ sudo virsh start host$i
+ let i=i+1
+ done
+}
+
+function get_host_macs() {
+ local config_file=$WORK_DIR/installer/compass-install/install/group_vars/all
+ local mac_generator=${COMPASS_DIR}/deploy/mac_generator.sh
+ local machines=
+
+ chmod +x $mac_generator
+ mac_array=`$mac_generator $VIRT_NUMBER`
+ machines=`echo $mac_array|sed 's/ /,/g'`
+
+ echo "test: true" >> $config_file
+ echo "pxe_boot_macs: [${machines}]" >> $config_file
+
+ echo $machines
+}
+
diff --git a/compass/deploy/network.sh b/compass/deploy/network.sh
new file mode 100755
index 0000000..c60607e
--- /dev/null
+++ b/compass/deploy/network.sh
@@ -0,0 +1,70 @@
+function destroy_nets() {
+ sudo virsh net-destroy mgmt > /dev/null 2>&1
+ sudo virsh net-undefine mgmt > /dev/null 2>&1
+
+ sudo virsh net-destroy install > /dev/null 2>&1
+ sudo virsh net-undefine install > /dev/null 2>&1
+ rm -rf $COMPASS_DIR/deploy/work/network/*.xml
+}
+
+function setup_om_bridge() {
+ local device=$1
+ local gw=$2
+ ip link set br_install down
+ ip addr flush $device
+ brctl delbr br_install
+
+ brctl addbr br_install
+ brctl addif br_install $device
+ ip link set br_install up
+
+ shift;shift
+ for ip in $*;do
+ ip addr add $ip dev br_install
+ done
+
+ route add default gw $gw
+}
+
+function setup_om_nat() {
+ # create install network
+ sed -e "s/REPLACE_BRIDGE/br_install/g" \
+ -e "s/REPLACE_NAME/install/g" \
+ -e "s/REPLACE_GATEWAY/$INSTALL_GW/g" \
+ -e "s/REPLACE_MASK/$INSTALL_MASK/g" \
+ -e "s/REPLACE_START/$INSTALL_IP_START/g" \
+ -e "s/REPLACE_END/$INSTALL_IP_END/g" \
+ $COMPASS_DIR/deploy/template/network/nat.xml \
+ > $WORK_DIR/network/install.xml
+
+ sudo virsh net-define $WORK_DIR/network/install.xml
+ sudo virsh net-start install
+}
+
+function create_nets() {
+ destroy_nets
+
+ # create mgmt network
+ sed -e "s/REPLACE_BRIDGE/br_mgmt/g" \
+ -e "s/REPLACE_NAME/mgmt/g" \
+ -e "s/REPLACE_GATEWAY/$MGMT_GW/g" \
+ -e "s/REPLACE_MASK/$MGMT_MASK/g" \
+ -e "s/REPLACE_START/$MGMT_IP_START/g" \
+ -e "s/REPLACE_END/$MGMT_IP_END/g" \
+ $COMPASS_DIR/deploy/template/network/nat.xml \
+ > $WORK_DIR/network/mgmt.xml
+
+ sudo virsh net-define $WORK_DIR/network/mgmt.xml
+ sudo virsh net-start mgmt
+
+ # create install network
+ if [[ ! -z $VIRT_NUMBER ]];then
+ setup_om_nat
+ else
+ mask=`echo $INSTALL_MASK | awk -F'.' '{print ($1*(2^24)+$2*(2^16)+$3*(2^8)+$4)}'`
+ mask_len=`echo "obase=2;${mask}"|bc|awk -F'0' '{print length($1)}'`
+ setup_om_bridge $OM_NIC $OM_GW $INSTALL_GW/$mask_len $OM_IP
+ fi
+
+}
+
diff --git a/compass/deploy/prepare.sh b/compass/deploy/prepare.sh
index 2086c5d..9ec15f8 100644
--- a/compass/deploy/prepare.sh
+++ b/compass/deploy/prepare.sh
@@ -1,35 +1,35 @@
-sudo apt-get update -y
-sudo apt-get install git python-pip python-dev -y
-vagrant --version
-if [[ $? != 0 ]]; then
- vagrant_pkg_url=https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.deb
- wget ${vagrant_pkg_url}
- sudo dpkg -i $(basename ${vagrant_pkg_url})
-else
- echo "vagrant is already installed"
-fi
-sudo apt-get install libxslt-dev libxml2-dev libvirt-dev build-essential qemu-utils qemu-kvm libvirt-bin virtinst libmysqld-dev -y
-sudo service libvirt-bin restart
+function prepare_env() {
+ export PYTHONPATH=/usr/lib/python2.7/dist-packages:/usr/local/lib/python2.7/dist-packages
+ sudo apt-get update -y
+ sudo apt-get install mkisofs bc
+ sudo apt-get install git python-pip python-dev -y
+ sudo apt-get install libxslt-dev libxml2-dev libvirt-dev build-essential qemu-utils qemu-kvm libvirt-bin virtinst libmysqld-dev -y
+ sudo pip install --upgrade pip
+ sudo pip install --upgrade ansible
+ sudo pip install --upgrade virtualenv
+ sudo service libvirt-bin restart
+
+ # prepare work dir
+ sudo rm -rf $WORK_DIR
+ mkdir -p $WORK_DIR
+ mkdir -p $WORK_DIR/installer
+ mkdir -p $WORK_DIR/vm
+ mkdir -p $WORK_DIR/network
+ mkdir -p $WORK_DIR/iso
+ mkdir -p $WORK_DIR/venv
-for plugin in vagrant-libvirt vagrant-mutate; do
- vagrant plugin list |grep $plugin
- if [[ $? != 0 ]]; then
- vagrant plugin install $plugin --plugin-source https://ruby.taobao.org
- else
- echo "$plugin plugin is already installed"
+ if [[ ! -f centos.iso ]];then
+ wget -O $WORK_DIR/iso/centos.iso $ISO_URL
fi
-done
-sudo pip install --upgrade ansible virtualenv
-#precise_box_vb_url=https://cloud-images.ubuntu.com/vagrant/precise/current/precise-server-cloudimg-amd64-vagrant-disk1.box
-#precise_box_vb_filename=$(basename ${precise_box_vb_url})
-centos65_box_vb_url=https://developer.nrel.gov/downloads/vagrant-boxes/CentOS-6.5-x86_64-v20140504.box
-centos65_box_vb_filename=$(basename ${centos65_box_vb_url})
-#wget ${precise_box_vb_url}
-vagrant box list |grep centos65
-if [[ $? != 0 ]]; then
- wget ${centos65_box_vb_url}
- mv ${centos65_box_vb_filename} centos65.box
- vagrant mutate centos65.box libvirt
-else
- echo "centos65 box already exists"
-fi
+
+ # copy compass
+ mkdir -p $WORK_DIR/mnt
+ sudo mount -o loop $WORK_DIR/iso/centos.iso $WORK_DIR/mnt
+ cp -rf $WORK_DIR/mnt/compass/compass-core $WORK_DIR/installer/
+ cp -rf $WORK_DIR/mnt/compass/compass-install $WORK_DIR/installer/
+ sudo umount $WORK_DIR/mnt
+ rm -rf $WORK_DIR/mnt
+
+ chmod 755 $WORK_DIR -R
+ virtualenv $WORK_DIR/venv
+}
diff --git a/compass/deploy/remote_excute.exp b/compass/deploy/remote_excute.exp
new file mode 100644
index 0000000..9dd112b
--- /dev/null
+++ b/compass/deploy/remote_excute.exp
@@ -0,0 +1,23 @@
+#!/usr/bin/expect
+
+set command [lindex $argv 0]
+set passwd [lindex $argv 1]
+
+eval spawn "$command"
+set timeout 60
+
+expect {
+ -re ".*es.*o.*"
+ {
+ exp_send "yes\r"
+ exp_continue
+ }
+
+ -re ".*sword:" {
+ exp_send "$passwd\r"
+
+ }
+
+}
+
+interact
diff --git a/compass/deploy/status_callback.py b/compass/deploy/status_callback.py
new file mode 100644
index 0000000..8619132
--- /dev/null
+++ b/compass/deploy/status_callback.py
@@ -0,0 +1,174 @@
+# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+import httplib
+import json
+import sys
+import logging
+
+def task_error(host, data):
+ logging.info("task_error: host=%s,data=%s" % (host, data))
+
+ if type(data) == dict:
+ invocation = data.pop('invocation', {})
+
+ notify_host("localhost", host, "failed")
+
+class CallbackModule(object):
+ """
+ logs playbook results, per host, in /var/log/ansible/hosts
+ """
+
+ def on_any(self, *args, **kwargs):
+ pass
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ task_error(host, res)
+
+ def runner_on_ok(self, host, res):
+ pass
+
+ def runner_on_skipped(self, host, item=None):
+ pass
+
+ def runner_on_unreachable(self, host, res):
+ pass
+
+ def runner_on_no_hosts(self):
+ pass
+
+ def runner_on_async_poll(self, host, res, jid, clock):
+ pass
+
+ def runner_on_async_ok(self, host, res, jid):
+ pass
+
+ def runner_on_async_failed(self, host, res, jid):
+ task_error(host, res)
+
+ def playbook_on_start(self):
+ pass
+
+ def playbook_on_notify(self, host, handler):
+ pass
+
+ def playbook_on_no_hosts_matched(self):
+ pass
+
+ def playbook_on_no_hosts_remaining(self):
+ pass
+
+ def playbook_on_task_start(self, name, is_conditional):
+ pass
+
+ def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
+ pass
+
+ def playbook_on_setup(self):
+ pass
+
+ def playbook_on_import_for_host(self, host, imported_file):
+ pass
+
+ def playbook_on_not_import_for_host(self, host, missing_file):
+ pass
+
+ def playbook_on_play_start(self, name):
+ pass
+
+ def playbook_on_stats(self, stats):
+ logging.info("playbook_on_stats enter")
+ hosts = sorted(stats.processed.keys())
+ host_vars = self.playbook.inventory.get_variables(hosts[0])
+ cluster_name = host_vars['cluster_name']
+ failures = False
+ unreachable = False
+
+ for host in hosts:
+ summary = stats.summarize(host)
+
+ if summary['failures'] > 0:
+ failures = True
+ if summary['unreachable'] > 0:
+ unreachable = True
+
+ if failures or unreachable:
+ for host in hosts:
+ notify_host("localhost", host, "error")
+ return
+
+ for host in hosts:
+ clusterhost_name = host + "." + cluster_name
+ notify_host("localhost", clusterhost_name, "succ")
+
+
+def raise_for_status(resp):
+ if resp.status < 200 or resp.status > 300:
+ raise RuntimeError("%s, %s, %s" % (resp.status, resp.reason, resp.read()))
+
+def auth(conn):
+ credential = {}
+ credential['email'] = "admin@huawei.com"
+ credential['password'] = "admin"
+ url = "/api/users/token"
+ headers = {"Content-type": "application/json",
+ "Accept": "*/*"}
+ conn.request("POST", url, json.dumps(credential), headers)
+ resp = conn.getresponse()
+
+ raise_for_status(resp)
+ return json.loads(resp.read())["token"]
+
+def notify_host(compass_host, host, status):
+ if status == "succ":
+ body = {"ready": True}
+ url = "/api/clusterhosts/%s/state_internal" % host
+ elif status == "error":
+ body = {"state": "ERROR"}
+ host = host.strip("host")
+ url = "/api/clusterhosts/%s/state" % host
+ else:
+ logging.error("notify_host: host %s with status %s is not supported" \
+ % (host, status))
+ return
+
+ headers = {"Content-type": "application/json",
+ "Accept": "*/*"}
+
+ conn = httplib.HTTPConnection(compass_host, 80)
+ token = auth(conn)
+ headers["X-Auth-Token"] = token
+ logging.info("host=%s,url=%s,body=%s,headers=%s" % (compass_host,url,json.dumps(body),headers))
+ conn.request("POST", url, json.dumps(body), headers)
+ resp = conn.getresponse()
+ try:
+ raise_for_status(resp)
+ logging.info("notify host status success!!! status=%s, body=%s" % (resp.status, resp.read()))
+ except Exception as e:
+ logging.error("http request failed %s" % str(e))
+ raise
+ finally:
+ conn.close()
+
+if __name__ == "__main__":
+ if len(sys.argv) != 3:
+ logging.error("params: host, status is need")
+ sys.exit(1)
+
+ host = sys.argv[1]
+ status = sys.argv[2]
+ notify_host(host, status)
diff --git a/compass/deploy/template/network/bridge.xml b/compass/deploy/template/network/bridge.xml
new file mode 100644
index 0000000..6202cf1
--- /dev/null
+++ b/compass/deploy/template/network/bridge.xml
@@ -0,0 +1,5 @@
+<network ipv6='no'>
+ <name>REPLACE_NAME</name>
+ <forward mode='bridge'>
+ </forward>
+</network>
diff --git a/compass/deploy/template/network/nat.xml b/compass/deploy/template/network/nat.xml
new file mode 100644
index 0000000..90ce888
--- /dev/null
+++ b/compass/deploy/template/network/nat.xml
@@ -0,0 +1,10 @@
+<network ipv6='yes'>
+ <name>REPLACE_NAME</name>
+ <forward mode='nat'/>
+ <bridge name='REPLACE_BRIDGE'/>
+ <ip address='REPLACE_GATEWAY' netmask='REPLACE_MASK'>
+ <!--dhcp>
+ <range start='REPLACE_START' end='REPLACE_END'/>
+ </dhcp-->
+ </ip>
+</network>
diff --git a/compass/deploy/template/vm/compass.xml b/compass/deploy/template/vm/compass.xml
new file mode 100644
index 0000000..918a9f2
--- /dev/null
+++ b/compass/deploy/template/vm/compass.xml
@@ -0,0 +1,64 @@
+<domain type='kvm'>
+ <name>compass</name>
+ <memory unit='MiB'>REPLACE_MEM</memory>
+ <currentMemory unit='MiB'>REPLACE_MEM</currentMemory>
+ <vcpu placement='static'>REPLACE_CPU</vcpu>
+ <os>
+ <type arch='x86_64' machine='pc-i440fx-trusty'>hvm</type>
+ <boot dev='hd'/>
+ <boot dev='cdrom'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <cpu mode='host-model'>
+ <model fallback='allow'/>
+ <feature policy='optional' name='vmx'/>
+ <feature policy='optional' name='svm'/>
+ </cpu>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>destroy</on_crash>
+ <devices>
+ <emulator>/usr/bin/kvm-spice</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2'/>
+ <source file='REPLACE_IMAGE'/>
+ <target dev='vda' bus='ide'/>
+ </disk>
+ <disk type='file' device='cdrom'>
+ <driver name='qemu' type='raw'/>
+ <source file='REPLACE_ISO'/>
+ <target dev='hdc' bus='ide'/>
+ <readonly/>
+ </disk>
+ <controller type='pci' index='0' model='pci-root'/>
+ <interface type='network'>
+ <source network='REPLACE_NET_MGMT'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
+ </interface>
+ <interface type='bridge'>
+ <source bridge='REPLACE_BRIDGE_INSTALL'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+ </interface>
+ <serial type='pty'>
+ <target port='0'/>
+ </serial>
+ <console type='pty'>
+ <target type='serial' port='0'/>
+ </console>
+ <input type='mouse' bus='ps2'/>
+ <input type='keyboard' bus='ps2'/>
+ <graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0' keymap='en-us'>
+ <listen type='address' address='0.0.0.0'/>
+ </graphics>
+ <video>
+ <model type='cirrus' vram='9216' heads='1'/>
+ </video>
+ </devices>
+</domain>
diff --git a/compass/deploy/template/vm/host.xml b/compass/deploy/template/vm/host.xml
new file mode 100644
index 0000000..b399e6f
--- /dev/null
+++ b/compass/deploy/template/vm/host.xml
@@ -0,0 +1,67 @@
+<domain type='kvm'>
+ <name>REPLACE_NAME</name>
+ <memory unit='MiB'>REPLACE_MEM</memory>
+ <currentMemory unit='MiB'>REPLACE_MEM</currentMemory>
+ <vcpu placement='static'>REPLACE_CPU</vcpu>
+ <os>
+ <type arch='x86_64' machine='pc-i440fx-trusty'>hvm</type>
+ <boot dev='hd'/>
+ <boot dev='network'/>
+ <bios useserial='yes' rebootTimeout='0'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>/usr/bin/kvm-spice</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='raw'/>
+ <source file='REPLACE_IMAGE'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <interface type='bridge'>
+ <mac address=REPLACE_BOOT_MAC/>
+ <source bridge='REPLACE_BRIDGE_MGMT'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
+ </interface>
+ <interface type='bridge'>
+ <source bridge='REPLACE_BRIDGE_TENANT'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
+ </interface>
+ <interface type='bridge'>
+ <source bridge='REPLACE_BRIDGE_PUBLIC'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+ </interface>
+ <interface type='bridge'>
+ <source bridge='REPLACE_BRIDGE_STORAGE'/>
+ <model type='virtio'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+ </interface>
+ <serial type='pty'>
+ <source path='/dev/pts/0'/>
+ </serial>
+ <console type='pty' tty='/dev/pts/0'>
+ <source path='/dev/pts/0'/>
+ <target type='serial' port='0'/>
+ </console>
+ <input type='mouse' bus='ps2'/>
+ <input type='keyboard' bus='ps2'/>
+ <graphics type='vnc' port='5901' autoport='yes' listen='0.0.0.0'>
+ <listen type='address' address='0.0.0.0'/>
+ </graphics>
+ <video>
+ <model type='cirrus' vram='9216' heads='1'/>
+ <alias name='video0'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+ </video>
+ </devices>
+</domain>
diff --git a/foreman/ci/Vagrantfile b/foreman/ci/Vagrantfile
index 100e12d..5550976 100644
--- a/foreman/ci/Vagrantfile
+++ b/foreman/ci/Vagrantfile
@@ -12,7 +12,7 @@ Vagrant.configure(2) do |config|
# Every Vagrant development environment requires a box. You can search for
# boxes at https://atlas.hashicorp.com/search.
- config.vm.box = "chef/centos-7.0"
+ config.vm.box = "opnfv/centos-7.0"
# Disable automatic box update checking. If you disable this, then
# boxes will only be checked for updates when the user runs
@@ -41,6 +41,9 @@ Vagrant.configure(2) do |config|
default_gw = ""
nat_flag = false
+ # Disable dhcp flag
+ disable_dhcp_flag = false
+
# Share an additional folder to the guest VM. The first argument is
# the path on the host to the actual folder. The second argument is
# the path on the guest to mount the folder. And the optional third
@@ -90,4 +93,8 @@ Vagrant.configure(2) do |config|
config.vm.provision :shell, path: "nat_setup.sh"
end
config.vm.provision :shell, path: "bootstrap.sh"
+ if disable_dhcp_flag
+ config.vm.provision :shell, :inline => "systemctl stop dhcpd"
+ config.vm.provision :shell, :inline => "systemctl disable dhcpd"
+ end
end
diff --git a/foreman/ci/bootstrap.sh b/foreman/ci/bootstrap.sh
index 4bc22ed..c98f00e 100755
--- a/foreman/ci/bootstrap.sh
+++ b/foreman/ci/bootstrap.sh
@@ -25,8 +25,7 @@ green=`tput setaf 2`
yum install -y epel-release-7*
# Install other required packages
-# Major version is pinned to force some consistency for Arno
-if ! yum -y install python-pip-1* python-virtualenv-1* gcc-4* git-1* sshpass-1* ansible-1* python-requests-1*; then
+if ! yum -y install python-pip python-virtualenv gcc git sshpass ansible python-requests; then
printf '%s\n' 'bootstrap.sh: failed to install required packages' >&2
exit 1
fi
@@ -36,7 +35,7 @@ cd /opt
echo "Cloning khaleesi to /opt"
if [ ! -d khaleesi ]; then
- if ! git clone -b v1.0 https://github.com/trozet/khaleesi.git; then
+ if ! git clone -b opnfv https://github.com/trozet/khaleesi.git; then
printf '%s\n' 'bootstrap.sh: Unable to git clone khaleesi' >&2
exit 1
fi
diff --git a/foreman/ci/clean.sh b/foreman/ci/clean.sh
index f61ac93..345864b 100755
--- a/foreman/ci/clean.sh
+++ b/foreman/ci/clean.sh
@@ -3,22 +3,23 @@
#Clean script to uninstall provisioning server for Foreman/QuickStack
#author: Tim Rozet (trozet@redhat.com)
#
-#Uses Vagrant and VirtualBox
+#Removes Libvirt, KVM, Vagrant, VirtualBox
#
-#Destroys Vagrant VM running in /tmp/bgs_vagrant
+#Destroys Vagrant VMs running in $vm_dir/
#Shuts down all nodes found in Khaleesi settings
-#Removes hypervisor kernel modules (VirtualBox)
+#Removes hypervisor kernel modules (VirtualBox & KVM/Libvirt)
##VARS
reset=`tput sgr0`
blue=`tput setaf 4`
red=`tput setaf 1`
green=`tput setaf 2`
+vm_dir=/var/opt/opnfv
##END VARS
##FUNCTIONS
display_usage() {
- echo -e "\n\n${blue}This script is used to uninstall Foreman/QuickStack Installer and Clean OPNFV Target System${reset}\n\n"
+ echo -e "\n\n${blue}This script is used to uninstall and clean the OPNFV Target System${reset}\n\n"
echo -e "\nUsage:\n$0 [arguments] \n"
echo -e "\n -no_parse : No variable parsing into config. Flag. \n"
echo -e "\n -base_config : Full path of ksgen settings file to parse. Required. Will provide BMC info to shutdown hosts. Example: -base_config /opt/myinventory.yml \n"
@@ -31,7 +32,7 @@ if [[ ( $1 == "--help") || $1 == "-h" ]]; then
exit 0
fi
-echo -e "\n\n${blue}This script is used to uninstall Foreman/QuickStack Installer and Clean OPNFV Target System${reset}\n\n"
+echo -e "\n\n${blue}This script is used to uninstall and clean the OPNFV Target System${reset}\n\n"
echo "Use -h to display help"
sleep 2
@@ -50,54 +51,55 @@ do
esac
done
-
-# Install ipmitool
-# Major version is pinned to force some consistency for Arno
-if ! yum list installed | grep -i ipmitool; then
- if ! yum -y install ipmitool-1*; then
- echo "${red}Unable to install ipmitool!${reset}"
- exit 1
- fi
-else
- echo "${blue}Skipping ipmitool as it is already installed!${reset}"
-fi
-
-###find all the bmc IPs and number of nodes
-node_counter=0
-output=`grep bmc_ip $base_config | grep -Eo '[0-9]+.[0-9]+.[0-9]+.[0-9]+'`
-for line in ${output} ; do
- bmc_ip[$node_counter]=$line
- ((node_counter++))
-done
-
-max_nodes=$((node_counter-1))
-
-###find bmc_users per node
-node_counter=0
-output=`grep bmc_user $base_config | sed 's/\s*bmc_user:\s*//'`
-for line in ${output} ; do
- bmc_user[$node_counter]=$line
- ((node_counter++))
-done
-
-###find bmc_pass per node
-node_counter=0
-output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'`
-for line in ${output} ; do
- bmc_pass[$node_counter]=$line
- ((node_counter++))
-done
-
-for mynode in `seq 0 $max_nodes`; do
- echo "${blue}Node: ${bmc_ip[$mynode]} ${bmc_user[$mynode]} ${bmc_pass[$mynode]} ${reset}"
- if ipmitool -I lanplus -P ${bmc_pass[$mynode]} -U ${bmc_user[$mynode]} -H ${bmc_ip[$mynode]} chassis power off; then
- echo "${blue}Node: $mynode, ${bmc_ip[$mynode]} powered off!${reset}"
+if [ ! -z "$base_config" ]; then
+ # Install ipmitool
+ # Major version is pinned to force some consistency for Arno
+ if ! yum list installed | grep -i ipmitool; then
+ if ! yum -y install ipmitool-1*; then
+ echo "${red}Unable to install ipmitool!${reset}"
+ exit 1
+ fi
else
- echo "${red}Error: Unable to power off $mynode, ${bmc_ip[$mynode]} ${reset}"
- exit 1
+ echo "${blue}Skipping ipmitool as it is already installed!${reset}"
fi
-done
+ ###find all the bmc IPs and number of nodes
+ node_counter=0
+ output=`grep bmc_ip $base_config | grep -Eo '[0-9]+.[0-9]+.[0-9]+.[0-9]+'`
+ for line in ${output} ; do
+ bmc_ip[$node_counter]=$line
+ ((node_counter++))
+ done
+
+ max_nodes=$((node_counter-1))
+
+ ###find bmc_users per node
+ node_counter=0
+ output=`grep bmc_user $base_config | sed 's/\s*bmc_user:\s*//'`
+ for line in ${output} ; do
+ bmc_user[$node_counter]=$line
+ ((node_counter++))
+ done
+
+ ###find bmc_pass per node
+ node_counter=0
+ output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'`
+ for line in ${output} ; do
+ bmc_pass[$node_counter]=$line
+ ((node_counter++))
+ done
+ for mynode in `seq 0 $max_nodes`; do
+ echo "${blue}Node: ${bmc_ip[$mynode]} ${bmc_user[$mynode]} ${bmc_pass[$mynode]} ${reset}"
+ if ipmitool -I lanplus -P ${bmc_pass[$mynode]} -U ${bmc_user[$mynode]} -H ${bmc_ip[$mynode]} chassis power off; then
+ echo "${blue}Node: $mynode, ${bmc_ip[$mynode]} powered off!${reset}"
+ else
+ echo "${red}Error: Unable to power off $mynode, ${bmc_ip[$mynode]} ${reset}"
+ exit 1
+ fi
+ done
+else
+ echo "${blue}Skipping Baremetal node poweroff as base_config was not provided${reset}"
+fi
###check to see if vbox is installed
vboxpkg=`rpm -qa | grep VirtualBox`
if [ $? -eq 0 ]; then
@@ -106,39 +108,120 @@ else
skip_vagrant=1
fi
+###legacy VM location check
+###remove me later
+if [ -d /tmp/bgs_vagrant ]; then
+ cd /tmp/bgs_vagrant
+ vagrant destroy -f
+ rm -rf /tmp/bgs_vagrant
+fi
+
###destroy vagrant
if [ $skip_vagrant -eq 0 ]; then
- cd /tmp/bgs_vagrant
- if vagrant destroy -f; then
- echo "${blue}Successfully destroyed Foreman VM ${reset}"
+ if [ -d $vm_dir ]; then
+ ##all vm directories
+ for vm in $( ls $vm_dir ); do
+ cd $vm_dir/$vm
+ if vagrant destroy -f; then
+ echo "${blue}Successfully destroyed $vm Vagrant VM ${reset}"
+ else
+ echo "${red}Unable to destroy $vm Vagrant VM! Attempting to killall vagrant if process is hung ${reset}"
+ killall vagrant
+ echo "${blue}Checking if vagrant was already destroyed and no process is active...${reset}"
+ if ps axf | grep vagrant | grep -v 'grep'; then
+ echo "${red}Vagrant process still exists after kill...exiting ${reset}"
+ exit 1
+ else
+ echo "${blue}Vagrant process doesn't exist. Moving on... ${reset}"
+ fi
+ fi
+
+ ##Vagrant boxes appear as VboxHeadless processes
+ ##try to gracefully destroy the VBox VM if it still exists
+ if vboxmanage list runningvms | grep $vm; then
+ echo "${red} $vm VBoxHeadless process still exists...Removing${reset}"
+ vbox_id=$(vboxmanage list runningvms | grep $vm | awk '{print $1}' | sed 's/"//g')
+ vboxmanage controlvm $vbox_id poweroff
+ if vboxmanage unregistervm --delete $vbox_id; then
+ echo "${blue}$vm VM is successfully deleted! ${reset}"
+ else
+ echo "${red} Unable to delete VM $vm ...Exiting ${reset}"
+ exit 1
+ fi
+ else
+ echo "${blue}$vm VM is successfully deleted! ${reset}"
+ fi
+ done
else
- echo "${red}Unable to destroy Foreman VM ${reset}"
- echo "${blue}Checking if vagrant was already destroyed and no process is active...${reset}"
- if ps axf | grep vagrant; then
- echo "${red}Vagrant VM still exists...exiting ${reset}"
- exit 1
- else
- echo "${blue}Vagrant process doesn't exist. Moving on... ${reset}"
- fi
+ echo "${blue}${vm_dir} doesn't exist, no VMs in OPNFV directory to destroy! ${reset}"
fi
+ echo "${blue}Checking for any remaining virtual box processes...${reset}"
###kill virtualbox
- echo "${blue}Killing VirtualBox ${reset}"
- killall virtualbox
- killall VBoxHeadless
+ if ps axf | grep virtualbox | grep -v 'grep'; then
+ echo "${blue}virtualbox processes are still running. Killing any remaining VirtualBox processes...${reset}"
+ killall virtualbox
+ fi
+
+ ###kill any leftover VMs (brute force)
+ if ps axf | grep VBoxHeadless | grep -v 'grep'; then
+ echo "${blue}VBoxHeadless processes are still running. Killing any remaining VBoxHeadless processes...${reset}"
+ killall VBoxHeadless
+ fi
###remove virtualbox
- echo "${blue}Removing VirtualBox ${reset}"
+ echo "${blue}Removing VirtualBox... ${reset}"
yum -y remove $vboxpkg
else
- echo "${blue}Skipping Vagrant destroy + Vbox Removal as VirtualBox package is already removed ${reset}"
+ echo "${blue}Skipping Vagrant destroy + VBox Removal as VirtualBox package is already removed ${reset}"
fi
+###remove working vm directory
+echo "${blue}Removing working VM directory: $vm_dir ${reset}"
+rm -rf $vm_dir
+
+###check to see if libvirt is installed
+echo "${blue}Checking if libvirt/KVM is installed"
+if rpm -qa | grep -iE 'libvirt|kvm'; then
+ echo "${blue}Libvirt/KVM is installed${reset}"
+ echo "${blue}Checking for any QEMU/KVM VMs...${reset}"
+ vm_count=0
+ while read -r line; do ((vm_count++)); done < <(virsh list --all | sed 1,2d | head -n -1)
+ if [ $vm_count -gt 0 ]; then
+ echo "${blue}VMs Found: $vm_count${reset}"
+ vm_runnning=0
+ while read -r line; do ((vm_running++)); done < <(virsh list --all | sed 1,2d | head -n -1| grep -i running)
+ echo "${blue}Powering off $vm_running VM(s)${reset}"
+ while read -r vm; do
+ if ! virsh destroy $vm; then
+ echo "${red}WARNING: Unable to power off VM ${vm}${reset}"
+ else
+ echo "${blue}VM $vm powered off!${reset}"
+ fi
+ done < <(virsh list --all | sed 1,2d | head -n -1| grep -i running | sed 's/^[ \t]*//' | awk '{print $2}')
+ echo "${blue}Destroying libvirt VMs...${reset}"
+ while read -r vm; do
+ if ! virsh undefine --remove-all-storage $vm; then
+ echo "${red}ERROR: Unable to remove the VM ${vm}${reset}"
+ exit 1
+ else
+ echo "${blue}VM $vm removed!${reset}"
+ fi
+ done < <(virsh list --all | sed 1,2d | head -n -1| awk '{print $2}')
+ else
+ echo "${blue}No VMs found for removal"
+ fi
+ echo "${blue}Removing libvirt and kvm packages"
+ yum -y remove libvirt-*
+ yum -y remove *qemu*
+else
+ echo "${blue}libvirt/KVM is not installed${reset}"
+fi
###remove kernel modules
echo "${blue}Removing kernel modules ${reset}"
-for kernel_mod in vboxnetadp vboxnetflt vboxpci vboxdrv; do
+for kernel_mod in vboxnetadp vboxnetflt vboxpci vboxdrv kvm_intel kvm; do
if ! rmmod $kernel_mod; then
if rmmod $kernel_mod 2>&1 | grep -i 'not currently loaded'; then
echo "${blue} $kernel_mod is not currently loaded! ${reset}"
diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh
index 86f03a7..6771da0 100755
--- a/foreman/ci/deploy.sh
+++ b/foreman/ci/deploy.sh
@@ -25,6 +25,12 @@ red=`tput setaf 1`
green=`tput setaf 2`
declare -A interface_arr
+declare -A controllers_ip_arr
+declare -A admin_ip_arr
+declare -A public_ip_arr
+
+vm_dir=/var/opt/opnfv
+script=`realpath $0`
##END VARS
##FUNCTIONS
@@ -35,6 +41,36 @@ display_usage() {
echo -e "\n -no_parse : No variable parsing into config. Flag. \n"
echo -e "\n -base_config : Full path of settings file to parse. Optional. Will provide a new base settings file rather than the default. Example: -base_config /opt/myinventory.yml \n"
echo -e "\n -virtual : Node virtualization instead of baremetal. Flag. \n"
+ echo -e "\n -enable_virtual_dhcp : Run dhcp server instead of using static IPs. Use this with -virtual only. \n"
+ echo -e "\n -static_ip_range : static IP range to define when using virtual and when dhcp is not being used (default), must at least a 20 IP block. Format: '192.168.1.1,192.168.1.20' \n"
+ echo -e "\n -ping_site : site to use to verify IP connectivity from the VM when -virtual is used. Format: -ping_site www.blah.com \n"
+ echo -e "\n -floating_ip_count : number of IP address from the public range to be used for floating IP. Default is 20.\n"
+ echo -e "\n -admin_nic : Baremetal NIC for the admin network. Required if other "nic" arguments are used. \
+Not applicable with -virtual. Example: -admin_nic em1"
+ echo -e "\n -private_nic : Baremetal NIC for the private network. Required if other "nic" arguments are used. \
+Not applicable with -virtual. Example: -private_nic em2"
+ echo -e "\n -public_nic : Baremetal NIC for the public network. Required if other "nic" arguments are used. \
+Can also be used with -virtual. Example: -public_nic em3"
+ echo -e "\n -storage_nic : Baremetal NIC for the storage network. Optional. Not applicable with -virtual. \
+Private NIC will be used for storage if not specified. Example: -storage_nic em4"
+}
+
+##verify vm dir exists
+##params: none
+function verify_vm_dir {
+ if [ -d "$vm_dir" ]; then
+ echo -e "\n\n${red}ERROR: VM Directory: $vm_dir already exists. Environment not clean. Please use clean.sh. Exiting${reset}\n\n"
+ exit 1
+ else
+ mkdir -p $vm_dir
+ fi
+
+ chmod 700 $vm_dir
+
+ if [ ! -d $vm_dir ]; then
+ echo -e "\n\n${red}ERROR: Unable to create VM Directory: $vm_dir Exiting${reset}\n\n"
+ exit -1
+ fi
}
##find ip of interface
@@ -51,6 +87,41 @@ function find_subnet {
printf "%d.%d.%d.%d\n" "$((i1 & m1))" "$((i2 & m2))" "$((i3 & m3))" "$((i4 & m4))"
}
+##verify subnet has at least n IPs
+##params: subnet mask, n IPs
+function verify_subnet_size {
+ IFS=. read -r i1 i2 i3 i4 <<< "$1"
+ num_ips_required=$2
+
+ ##this function assumes you would never need more than 254
+ ##we check here to make sure
+ if [ "$num_ips_required" -ge 254 ]; then
+ echo -e "\n\n${red}ERROR: allocating more than 254 IPs is unsupported...Exiting${reset}\n\n"
+ return 1
+ fi
+
+ ##we just return if 3rd octet is not 255
+ ##because we know the subnet is big enough
+ if [ "$i3" -ne 255 ]; then
+ return 0
+ elif [ $((254-$i4)) -ge "$num_ips_required" ]; then
+ return 0
+ else
+ echo -e "\n\n${red}ERROR: Subnet is too small${reset}\n\n"
+ return 1
+ fi
+}
+
+##finds last usable ip (broadcast minus 1) of a subnet from an IP and netmask
+## Warning: This function only works for IPv4 at the moment.
+##params: ip, netmask
+function find_last_ip_subnet {
+ IFS=. read -r i1 i2 i3 i4 <<< "$1"
+ IFS=. read -r m1 m2 m3 m4 <<< "$2"
+ IFS=. read -r s1 s2 s3 s4 <<< "$((i1 & m1)).$((i2 & m2)).$((i3 & m3)).$((i4 & m4))"
+ printf "%d.%d.%d.%d\n" "$((255 - $m1 + $s1))" "$((255 - $m2 + $s2))" "$((255 - $m3 + $s3))" "$((255 - $m4 + $s4 - 1))"
+}
+
##increments subnet by a value
##params: ip, value
##assumes low value
@@ -87,6 +158,19 @@ function next_ip {
echo $baseaddr.$lsv
}
+##subtracts a value from an IP address
+##params: last ip, ip_count
+##assumes ip_count is less than the last octect of the address
+subtract_ip() {
+ IFS=. read -r i1 i2 i3 i4 <<< "$1"
+ ip_count=$2
+ if [ $i4 -lt $ip_count ]; then
+ echo -e "\n\n${red}ERROR: Can't subtract $ip_count from IP address $1 Exiting${reset}\n\n"
+ exit 1
+ fi
+ printf "%d.%d.%d.%d\n" "$i1" "$i2" "$i3" "$((i4 - $ip_count ))"
+}
+
##removes the network interface config from Vagrantfile
##params: interface
##assumes you are in the directory of Vagrantfile
@@ -149,19 +233,21 @@ parse_yaml() {
}'
}
-##END FUNCTIONS
-
-if [[ ( $1 == "--help") || $1 == "-h" ]]; then
+##translates the command line paramaters into variables
+##params: $@ the entire command line is passed
+##usage: parse_cmd_line() "$@"
+parse_cmdline() {
+ if [[ ( $1 == "--help") || $1 == "-h" ]]; then
display_usage
exit 0
-fi
+ fi
-echo -e "\n\n${blue}This script is used to deploy Foreman/QuickStack Installer and Provision OPNFV Target System${reset}\n\n"
-echo "Use -h to display help"
-sleep 2
+ echo -e "\n\n${blue}This script is used to deploy Foreman/QuickStack Installer and Provision OPNFV Target System${reset}\n\n"
+ echo "Use -h to display help"
+ sleep 2
-while [ "`echo $1 | cut -c1`" = "-" ]
-do
+ while [ "`echo $1 | cut -c1`" = "-" ]
+ do
echo $1
case "$1" in
-base_config)
@@ -176,35 +262,137 @@ do
virtual="TRUE"
shift 1
;;
+ -enable_virtual_dhcp)
+ enable_virtual_dhcp="TRUE"
+ shift 1
+ ;;
+ -static_ip_range)
+ static_ip_range=$2
+ shift 2
+ ;;
+ -ping_site)
+ ping_site=$2
+ shift 2
+ ;;
+ -floating_ip_count)
+ floating_ip_count=$2
+ shift 2
+ ;;
+ -admin_nic)
+ admin_nic=$2
+ shift 2
+ nic_arg_flag=1
+ ;;
+ -private_nic)
+ private_nic=$2
+ shift 2
+ nic_arg_flag=1
+ ;;
+ -public_nic)
+ public_nic=$2
+ shift 2
+ nic_arg_flag=1
+ ;;
+ -storage_nic)
+ storage_nic=$2
+ shift 2
+ nic_arg_flag=1
+ ;;
*)
display_usage
exit 1
;;
-esac
-done
+ esac
+ done
+
+ if [ ! -z "$enable_virtual_dhcp" ] && [ ! -z "$static_ip_range" ]; then
+ echo -e "\n\n${red}ERROR: Incorrect Usage. Static IP range cannot be set when using DHCP!. Exiting${reset}\n\n"
+ exit 1
+ fi
+
+ if [ -z "$virtual" ]; then
+ if [ ! -z "$enable_virtual_dhcp" ]; then
+ echo -e "\n\n${red}ERROR: Incorrect Usage. enable_virtual_dhcp can only be set when using -virtual!. Exiting${reset}\n\n"
+ exit 1
+ elif [ ! -z "$static_ip_range" ]; then
+ echo -e "\n\n${red}ERROR: Incorrect Usage. static_ip_range can only be set when using -virtual!. Exiting${reset}\n\n"
+ exit 1
+ fi
+ fi
+
+ if [ -z "$floating_ip_count" ]; then
+ floating_ip_count=20
+ fi
+
+ ##Validate nic args
+ if [[ $nic_arg_flag -eq 1 ]]; then
+ if [ -z "$virtual" ]; then
+ for nic_type in admin_nic private_nic public_nic; do
+ eval "nic_value=\$$nic_type"
+ if [ -z "$nic_value" ]; then
+ echo "${red}$nic_type is empty or not defined. Required when other nic args are given!${reset}"
+ exit 1
+ fi
+ interface_ip=$(find_ip $nic_value)
+ if [ ! "$interface_ip" ]; then
+ echo "${red}$nic_value does not have an IP address! Exiting... ${reset}"
+ exit 1
+ fi
+ done
+ else
+ ##if virtual only public_nic should be specified
+ for nic_type in admin_nic private_nic storage_nic; do
+ eval "nic_value=\$$nic_type"
+ if [ ! -z "$nic_value" ]; then
+ echo "${red}$nic_type is not a valid argument using -virtual. Please only specify public_nic!${reset}"
+ exit 1
+ fi
+ done
+
+ interface_ip=$(find_ip $public_nic)
+ if [ ! "$interface_ip" ]; then
+ echo "${red}Public NIC: $public_nic does not have an IP address! Exiting... ${reset}"
+ exit 1
+ fi
+ fi
+ fi
+}
##disable selinux
-/sbin/setenforce 0
-
-# Install EPEL repo for access to many other yum repos
-# Major version is pinned to force some consistency for Arno
-yum install -y epel-release-7*
-
-# Install other required packages
-# Major versions are pinned to force some consistency for Arno
-if ! yum install -y binutils-2* gcc-4* make-3* patch-2* libgomp-4* glibc-headers-2* glibc-devel-2* kernel-headers-3* kernel-devel-3* dkms-2* psmisc-22*; then
- printf '%s\n' 'deploy.sh: Unable to install depdency packages' >&2
- exit 1
-fi
-
-##install VirtualBox repo
-if cat /etc/*release | grep -i "Fedora release"; then
- vboxurl=http://download.virtualbox.org/virtualbox/rpm/fedora/\$releasever/\$basearch
-else
- vboxurl=http://download.virtualbox.org/virtualbox/rpm/el/\$releasever/\$basearch
-fi
-
-cat > /etc/yum.repos.d/virtualbox.repo << EOM
+##params: none
+##usage: disable_selinux()
+disable_selinux() {
+ /sbin/setenforce 0
+}
+
+##Install the EPEL repository and additional packages
+##params: none
+##usage: install_EPEL()
+install_EPEL() {
+ # Install EPEL repo for access to many other yum repos
+ # Major version is pinned to force some consistency for Arno
+ yum install -y epel-release-7*
+
+ # Install other required packages
+ # Major versions are pinned to force some consistency for Arno
+ if ! yum install -y binutils-2* gcc-4* make-3* patch-2* libgomp-4* glibc-headers-2* glibc-devel-2* kernel-headers-3* kernel-devel-3* dkms-2* psmisc-22*; then
+ printf '%s\n' 'deploy.sh: Unable to install depdency packages' >&2
+ exit 1
+ fi
+}
+
+##Download and install virtual box
+##params: none
+##usage: install_vbox()
+install_vbox() {
+ ##install VirtualBox repo
+ if cat /etc/*release | grep -i "Fedora release"; then
+ vboxurl=http://download.virtualbox.org/virtualbox/rpm/fedora/\$releasever/\$basearch
+ else
+ vboxurl=http://download.virtualbox.org/virtualbox/rpm/el/\$releasever/\$basearch
+ fi
+
+ cat > /etc/yum.repos.d/virtualbox.repo << EOM
[virtualbox]
name=Oracle Linux / RHEL / CentOS-\$releasever / \$basearch - VirtualBox
baseurl=$vboxurl
@@ -215,365 +403,321 @@ skip_if_unavailable = 1
keepcache = 0
EOM
-##install VirtualBox
-if ! yum list installed | grep -i virtualbox; then
- if ! yum -y install VirtualBox-4.3; then
- printf '%s\n' 'deploy.sh: Unable to install virtualbox package' >&2
- exit 1
+ ##install VirtualBox
+ if ! yum list installed | grep -i virtualbox; then
+ if ! yum -y install VirtualBox-4.3; then
+ printf '%s\n' 'deploy.sh: Unable to install virtualbox package' >&2
+ exit 1
+ fi
fi
-fi
-##install kmod-VirtualBox
-if ! lsmod | grep vboxdrv; then
- if ! sudo /etc/init.d/vboxdrv setup; then
- printf '%s\n' 'deploy.sh: Unable to install kernel module for virtualbox' >&2
- exit 1
+ ##install kmod-VirtualBox
+ if ! lsmod | grep vboxdrv; then
+ sudo /etc/init.d/vboxdrv setup
+ if ! lsmod | grep vboxdrv; then
+ printf '%s\n' 'deploy.sh: Unable to install kernel module for virtualbox' >&2
+ exit 1
+ fi
+ else
+ printf '%s\n' 'deploy.sh: Skipping kernel module for virtualbox. Already Installed'
fi
-else
- printf '%s\n' 'deploy.sh: Skipping kernel module for virtualbox. Already Installed'
-fi
+}
-##install Ansible
-if ! yum list installed | grep -i ansible; then
- if ! yum -y install ansible-1*; then
- printf '%s\n' 'deploy.sh: Unable to install Ansible package' >&2
- exit 1
+##install Ansible using yum
+##params: none
+##usage: install_ansible()
+install_ansible() {
+ if ! yum list installed | grep -i ansible; then
+ if ! yum -y install ansible-1*; then
+ printf '%s\n' 'deploy.sh: Unable to install Ansible package' >&2
+ exit 1
+ fi
fi
-fi
+}
-##install Vagrant
-if ! rpm -qa | grep vagrant; then
- if ! rpm -Uvh https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.rpm; then
- printf '%s\n' 'deploy.sh: Unable to install vagrant package' >&2
- exit 1
+##install Vagrant RPM directly with the bintray.com site
+##params: none
+##usage: install_vagrant()
+install_vagrant() {
+ if ! rpm -qa | grep vagrant; then
+ if ! rpm -Uvh https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.rpm; then
+ printf '%s\n' 'deploy.sh: Unable to install vagrant package' >&2
+ exit 1
+ fi
+ else
+ printf '%s\n' 'deploy.sh: Skipping Vagrant install as it is already installed.'
fi
-else
- printf '%s\n' 'deploy.sh: Skipping Vagrant install as it is already installed.'
-fi
-##add centos 7 box to vagrant
-if ! vagrant box list | grep chef/centos-7.0; then
- if ! vagrant box add chef/centos-7.0 --provider virtualbox; then
- printf '%s\n' 'deploy.sh: Unable to download centos7 box for Vagrant' >&2
- exit 1
+ ##add centos 7 box to vagrant
+ if ! vagrant box list | grep opnfv/centos-7.0; then
+ if ! vagrant box add opnfv/centos-7.0 --provider virtualbox; then
+ printf '%s\n' 'deploy.sh: Unable to download centos7 box for Vagrant' >&2
+ exit 1
+ fi
+ else
+ printf '%s\n' 'deploy.sh: Skipping Vagrant box add as centos-7.0 is already installed.'
fi
-else
- printf '%s\n' 'deploy.sh: Skipping Vagrant box add as centos-7.0 is already installed.'
-fi
-##install workaround for centos7
-if ! vagrant plugin list | grep vagrant-centos7_fix; then
- if ! vagrant plugin install vagrant-centos7_fix; then
- printf '%s\n' 'deploy.sh: Warning: unable to install vagrant centos7 workaround' >&2
+ ##install workaround for centos7
+ if ! vagrant plugin list | grep vagrant-centos7_fix; then
+ if ! vagrant plugin install vagrant-centos7_fix; then
+ printf '%s\n' 'deploy.sh: Warning: unable to install vagrant centos7 workaround' >&2
+ fi
+ else
+ printf '%s\n' 'deploy.sh: Skipping Vagrant plugin as centos7 workaround is already installed.'
fi
-else
- printf '%s\n' 'deploy.sh: Skipping Vagrant plugin as centos7 workaround is already installed.'
-fi
+}
-cd /tmp/
##remove bgs vagrant incase it wasn't cleaned up
-rm -rf /tmp/bgs_vagrant
-
-##clone bgs vagrant
-##will change this to be opnfv repo when commit is done
-if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git; then
- printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2
- exit 1
-fi
-
-cd bgs_vagrant
-
-echo "${blue}Detecting network configuration...${reset}"
-##detect host 1 or 3 interface configuration
-#output=`ip link show | grep -E "^[0-9]" | grep -Ev ": lo|tun|virbr|vboxnet" | awk '{print $2}' | sed 's/://'`
-output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'`
-
-if [ ! "$output" ]; then
- printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2
- exit 1
-fi
-
-##find number of interfaces with ip and substitute in VagrantFile
-if_counter=0
-for interface in ${output}; do
-
- if [ "$if_counter" -ge 4 ]; then
- break
- fi
- interface_ip=$(find_ip $interface)
- if [ ! "$interface_ip" ]; then
- continue
- fi
- new_ip=$(next_usable_ip $interface_ip)
- if [ ! "$new_ip" ]; then
- continue
- fi
- interface_arr[$interface]=$if_counter
- interface_ip_arr[$if_counter]=$new_ip
- subnet_mask=$(find_netmask $interface)
- if [ "$if_counter" -eq 1 ]; then
- private_subnet_mask=$subnet_mask
- private_short_subnet_mask=$(find_short_netmask $interface)
- fi
- if [ "$if_counter" -eq 2 ]; then
- public_subnet_mask=$subnet_mask
- public_short_subnet_mask=$(find_short_netmask $interface)
- fi
- if [ "$if_counter" -eq 3 ]; then
- storage_subnet_mask=$subnet_mask
- fi
- sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile
- ((if_counter++))
-done
-
-##now remove interface config in Vagrantfile for 1 node
-##if 1, 3, or 4 interfaces set deployment type
-##if 2 interfaces remove 2nd interface and set deployment type
-if [ "$if_counter" == 1 ]; then
- deployment_type="single_network"
- remove_vagrant_network eth_replace1
- remove_vagrant_network eth_replace2
- remove_vagrant_network eth_replace3
-elif [ "$if_counter" == 2 ]; then
- deployment_type="single_network"
- second_interface=`echo $output | awk '{print $2}'`
- remove_vagrant_network $second_interface
- remove_vagrant_network eth_replace2
-elif [ "$if_counter" == 3 ]; then
- deployment_type="three_network"
- remove_vagrant_network eth_replace3
-else
- deployment_type="multi_network"
-fi
-
-echo "${blue}Network detected: ${deployment_type}! ${reset}"
-
-if route | grep default; then
- echo "${blue}Default Gateway Detected ${reset}"
- host_default_gw=$(ip route | grep default | awk '{print $3}')
- echo "${blue}Default Gateway: $host_default_gw ${reset}"
- default_gw_interface=$(ip route get $host_default_gw | awk '{print $3}')
- case "${interface_arr[$default_gw_interface]}" in
- 0)
- echo "${blue}Default Gateway Detected on Admin Interface!${reset}"
- sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile
- node_default_gw=$host_default_gw
- ;;
- 1)
- echo "${red}Default Gateway Detected on Private Interface!${reset}"
- echo "${red}Private subnet should be private and not have Internet access!${reset}"
- exit 1
- ;;
- 2)
- echo "${blue}Default Gateway Detected on Public Interface!${reset}"
- sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile
- echo "${blue}Will setup NAT from Admin -> Public Network on VM!${reset}"
- sed -i 's/^.*nat_flag =.*$/ nat_flag = true/' Vagrantfile
- echo "${blue}Setting node gateway to be VM Admin IP${reset}"
- node_default_gw=${interface_ip_arr[0]}
- public_gateway=$default_gw
- ;;
- 3)
- echo "${red}Default Gateway Detected on Storage Interface!${reset}"
- echo "${red}Storage subnet should be private and not have Internet access!${reset}"
- exit 1
- ;;
- *)
- echo "${red}Unable to determine which interface default gateway is on..Exiting!${reset}"
- exit 1
- ;;
- esac
-else
- #assumes 24 bit mask
- defaultgw=`echo ${interface_ip_arr[0]} | cut -d. -f1-3`
- firstip=.1
- defaultgw=$defaultgw$firstip
- echo "${blue}Unable to find default gateway. Assuming it is $defaultgw ${reset}"
- sed -i 's/^.*default_gw =.*$/ default_gw = '\""$defaultgw"\"'/' Vagrantfile
- node_default_gw=$defaultgw
-fi
-
-if [ $base_config ]; then
- if ! cp -f $base_config opnfv_ksgen_settings.yml; then
- echo "{red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}"
- exit 1
- fi
-fi
-
-if [ $no_parse ]; then
-echo "${blue}Skipping parsing variables into settings file as no_parse flag is set${reset}"
-
-else
-
-echo "${blue}Gathering network parameters for Target System...this may take a few minutes${reset}"
-##Edit the ksgen settings appropriately
-##ksgen settings will be stored in /vagrant on the vagrant machine
-##if single node deployment all the variables will have the same ip
-##interface names will be enp0s3, enp0s8, enp0s9 in chef/centos7
-
-sed -i 's/^.*default_gw:.*$/default_gw:'" $node_default_gw"'/' opnfv_ksgen_settings.yml
+##params: none
+##usage: clean_tmp()
+clean_tmp() {
+ rm -rf $vm_dir/foreman_vm
+}
-##replace private interface parameter
-##private interface will be of hosts, so we need to know the provisioned host interface name
-##we add biosdevname=0, net.ifnames=0 to the kickstart to use regular interface naming convention on hosts
-##replace IP for parameters with next IP that will be given to controller
-if [ "$deployment_type" == "single_network" ]; then
- ##we also need to assign IP addresses to nodes
- ##for single node, foreman is managing the single network, so we can't reserve them
- ##not supporting single network anymore for now
- echo "{blue}Single Network type is unsupported right now. Please check your interface configuration. Exiting. ${reset}"
- exit 0
+##clone genesis and move to node vm dir
+##params: destination directory
+##usage: clone_bgs /tmp/myvm/
+clone_bgs() {
+ script_dir="`dirname "$script"`"
+ cp -fr $script_dir/ $1
+ cp -fr $script_dir/../../common/puppet-opnfv $1
+}
-elif [[ "$deployment_type" == "multi_network" || "$deployment_type" == "three_network" ]]; then
+##validates the network settings and update VagrantFile with network settings
+##params: none
+##usage: configure_network()
+configure_network() {
+ cd $vm_dir/foreman_vm
+
+ ##if nic_arg_flag is set, then we don't figure out
+ ##NICs dynamically
+ if [[ $nic_arg_flag -eq 1 ]]; then
+ echo "${blue}Static Network Interfaces Defined. Updating Vagrantfile...${reset}"
+ if [ $virtual ]; then
+ nic_list="$public_nic"
+ elif [ -z "$storage_nic" ]; then
+ echo "${blue}storage_nic not defined, will combine storage into private VLAN ${reset}"
+ nic_list="$admin_nic $private_nic $public_nic"
+ else
+ nic_list="$admin_nic $private_nic $public_nic $storage_nic"
+ fi
+ nic_array=( $nic_list )
+ output=$nic_list
+ else
+ echo "${blue}Detecting network configuration...${reset}"
+ ##detect host 1 or 3 interface configuration
+ #output=`ip link show | grep -E "^[0-9]" | grep -Ev ": lo|tun|virbr|vboxnet" | awk '{print $2}' | sed 's/://'`
+ output=`/bin/ls -l /sys/class/net | tail -n +2 | grep -v virtual | cut -d " " -f10`
+ fi
- if [ "$deployment_type" == "three_network" ]; then
- sed -i 's/^.*network_type:.*$/network_type: three_network/' opnfv_ksgen_settings.yml
+ if [ ! "$output" ]; then
+ printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2
+ exit 1
fi
- sed -i 's/^.*deployment_type:.*$/ deployment_type: '"$deployment_type"'/' opnfv_ksgen_settings.yml
+ ##virtual we only find 1 interface
+ if [ $virtual ]; then
+ if [ ! -z "${nic_array[0]}" ]; then
+ echo "${blue}Public Interface specified: ${nic_array[0]}${reset}"
+ this_default_gw_interface=${nic_array[0]}
+ else
+ ##find interface with default gateway
+ this_default_gw=$(ip route | grep default | awk '{print $3}')
+ echo "${blue}Default Gateway: $this_default_gw ${reset}"
+ this_default_gw_interface=$(ip route get $this_default_gw | awk '{print $3}')
+ fi
- ##get ip addresses for private network on controllers to make dhcp entries
- ##required for controllers_ip_array global param
- next_private_ip=${interface_ip_arr[1]}
- type=_private
- for node in controller1 controller2 controller3; do
- next_private_ip=$(next_usable_ip $next_private_ip)
- if [ ! "$next_private_ip" ]; then
- printf '%s\n' 'deploy.sh: Unable to find next ip for private network for control nodes' >&2
- exit 1
+ ##find interface IP, make sure its valid
+ interface_ip=$(find_ip $this_default_gw_interface)
+ if [ ! "$interface_ip" ]; then
+ echo "${red}Interface ${this_default_gw_interface} does not have an IP: $interface_ip ! Exiting ${reset}"
+ exit 1
fi
- sed -i 's/'"$node$type"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml
- controller_ip_array=$controller_ip_array$next_private_ip,
- done
- ##replace global param for contollers_ip_array
- controller_ip_array=${controller_ip_array%?}
- sed -i 's/^.*controllers_ip_array:.*$/ controllers_ip_array: '"$controller_ip_array"'/' opnfv_ksgen_settings.yml
+ ##set variable info
+ if [ ! -z "$static_ip_range" ]; then
+ new_ip=$(echo $static_ip_range | cut -d , -f1)
+ else
+ new_ip=$(next_usable_ip $interface_ip)
+ if [ ! "$new_ip" ]; then
+ echo "${red} Cannot find next IP on interface ${this_default_gw_interface} new_ip: $new_ip ! Exiting ${reset}"
+ exit 1
+ fi
+ fi
+ interface=$this_default_gw_interface
+ public_interface=$interface
+ interface_arr[$interface]=2
+ interface_ip_arr[2]=$new_ip
+ subnet_mask=$(find_netmask $interface)
+ public_subnet_mask=$subnet_mask
+ public_short_subnet_mask=$(find_short_netmask $interface)
- ##now replace all the VIP variables. admin//private can be the same IP
- ##we have to use IP's here that won't be allocated to hosts at provisioning time
- ##therefore we increment the ip by 10 to make sure we have a safe buffer
- next_private_ip=$(increment_ip $next_private_ip 10)
+ if ! verify_subnet_size $public_subnet_mask 25; then
+ echo "${red} Not enough IPs in public subnet: $interface_ip_arr[2] ${public_subnet_mask}. Need at least 25 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
- grep -E '*private_vip|loadbalancer_vip|db_vip|amqp_vip|*admin_vip' opnfv_ksgen_settings.yml | while read -r line ; do
- sed -i 's/^.*'"$line"'.*$/ '"$line $next_private_ip"'/' opnfv_ksgen_settings.yml
- next_private_ip=$(next_usable_ip $next_private_ip)
- if [ ! "$next_private_ip" ]; then
- printf '%s\n' 'deploy.sh: Unable to find next ip for private network for vip replacement' >&2
+ ##set that interface to be public
+ sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile
+ if_counter=1
+ else
+ ##find number of interfaces with ip and substitute in VagrantFile
+ if_counter=0
+ for interface in ${output}; do
+
+ if [ "$if_counter" -ge 4 ]; then
+ break
+ fi
+ interface_ip=$(find_ip $interface)
+ if [ ! "$interface_ip" ]; then
+ continue
+ fi
+ new_ip=$(next_usable_ip $interface_ip)
+ if [ ! "$new_ip" ]; then
+ continue
+ fi
+ interface_arr[$interface]=$if_counter
+ interface_ip_arr[$if_counter]=$new_ip
+ subnet_mask=$(find_netmask $interface)
+ if [ "$if_counter" -eq 0 ]; then
+ admin_subnet_mask=$subnet_mask
+ if ! verify_subnet_size $admin_subnet_mask 5; then
+ echo "${red} Not enough IPs in admin subnet: ${interface_ip_arr[$if_counter]} ${admin_subnet_mask}. Need at least 5 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
+
+ elif [ "$if_counter" -eq 1 ]; then
+ private_subnet_mask=$subnet_mask
+ private_short_subnet_mask=$(find_short_netmask $interface)
+
+ if ! verify_subnet_size $private_subnet_mask 15; then
+ echo "${red} Not enough IPs in private subnet: ${interface_ip_arr[$if_counter]} ${private_subnet_mask}. Need at least 15 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
+ elif [ "$if_counter" -eq 2 ]; then
+ public_subnet_mask=$subnet_mask
+ public_short_subnet_mask=$(find_short_netmask $interface)
+
+ if ! verify_subnet_size $public_subnet_mask 25; then
+ echo "${red} Not enough IPs in public subnet: ${interface_ip_arr[$if_counter]} ${public_subnet_mask}. Need at least 25 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
+ elif [ "$if_counter" -eq 3 ]; then
+ storage_subnet_mask=$subnet_mask
+
+ if ! verify_subnet_size $storage_subnet_mask 10; then
+ echo "${red} Not enough IPs in storage subnet: ${interface_ip_arr[$if_counter]} ${storage_subnet_mask}. Need at least 10 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
+ else
+ echo "${red}ERROR: interface counter outside valid range of 0 to 3: $if_counter ! ${reset}"
+ exit 1
+ fi
+ sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile
+ ((if_counter++))
+ done
+ fi
+ ##now remove interface config in Vagrantfile for 1 node
+ ##if 1, 3, or 4 interfaces set deployment type
+ ##if 2 interfaces remove 2nd interface and set deployment type
+ if [[ "$if_counter" == 1 || "$if_counter" == 2 ]]; then
+ if [ $virtual ]; then
+ deployment_type="single_network"
+ echo "${blue}Single network detected for Virtual deployment...converting to three_network with internal networks! ${reset}"
+ private_internal_ip=155.1.2.2
+ admin_internal_ip=156.1.2.2
+ private_subnet_mask=255.255.255.0
+ private_short_subnet_mask=/24
+ interface_ip_arr[1]=$private_internal_ip
+ interface_ip_arr[0]=$admin_internal_ip
+ admin_subnet_mask=255.255.255.0
+ admin_short_subnet_mask=/24
+ sed -i 's/^.*eth_replace1.*$/ config.vm.network "private_network", virtualbox__intnet: "my_private_network", ip: '\""$private_internal_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile
+ sed -i 's/^.*eth_replace0.*$/ config.vm.network "private_network", virtualbox__intnet: "my_admin_network", ip: '\""$admin_internal_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile
+ remove_vagrant_network eth_replace3
+ deployment_type=three_network
+ else
+ echo "${blue}Single network or 2 network detected for baremetal deployment. This is unsupported! Exiting. ${reset}"
exit 1
fi
- done
+ elif [ "$if_counter" == 3 ]; then
+ deployment_type="three_network"
+ remove_vagrant_network eth_replace3
+ else
+ deployment_type="multi_network"
+ fi
- ##replace foreman site
- next_public_ip=${interface_ip_arr[2]}
- sed -i 's/^.*foreman_url:.*$/ foreman_url:'" https:\/\/$next_public_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml
- ##replace public vips
- next_public_ip=$(increment_ip $next_public_ip 10)
- grep -E '*public_vip' opnfv_ksgen_settings.yml | while read -r line ; do
- sed -i 's/^.*'"$line"'.*$/ '"$line $next_public_ip"'/' opnfv_ksgen_settings.yml
- next_public_ip=$(next_usable_ip $next_public_ip)
- if [ ! "$next_public_ip" ]; then
- printf '%s\n' 'deploy.sh: Unable to find next ip for public network for vip replcement' >&2
- exit 1
+ echo "${blue}Network detected: ${deployment_type}! ${reset}"
+
+ if [ $virtual ]; then
+ if [ -z "$enable_virtual_dhcp" ]; then
+ sed -i 's/^.*disable_dhcp_flag =.*$/ disable_dhcp_flag = true/' Vagrantfile
+ if [ $static_ip_range ]; then
+ ##verify static range is at least 20 IPs
+ static_ip_range_begin=$(echo $static_ip_range | cut -d , -f1)
+ static_ip_range_end=$(echo $static_ip_range | cut -d , -f2)
+ ##verify range is at least 20 ips
+ ##assumes less than 255 range pool
+ begin_octet=$(echo $static_ip_range_begin | cut -d . -f4)
+ end_octet=$(echo $static_ip_range_end | cut -d . -f4)
+ ip_count=$((end_octet-begin_octet+1))
+ if [ "$ip_count" -lt 20 ]; then
+ echo "${red}Static range is less than 20 ips: ${ip_count}, exiting ${reset}"
+ exit 1
+ else
+ echo "${blue}Static IP range is size $ip_count ${reset}"
+ fi
+ fi
fi
- done
+ fi
- ##replace public_network param
- public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
- sed -i 's/^.*public_network:.*$/ public_network:'" $public_subnet"'/' opnfv_ksgen_settings.yml
- ##replace private_network param
- private_subnet=$(find_subnet $next_private_ip $private_subnet_mask)
- sed -i 's/^.*private_network:.*$/ private_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml
- ##replace storage_network
- if [ "$deployment_type" == "three_network" ]; then
- sed -i 's/^.*storage_network:.*$/ storage_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml
+ if route | grep default; then
+ echo "${blue}Default Gateway Detected ${reset}"
+ host_default_gw=$(ip route | grep default | awk '{print $3}')
+ echo "${blue}Default Gateway: $host_default_gw ${reset}"
+ default_gw_interface=$(ip route get $host_default_gw | awk '{print $3}')
+ case "${interface_arr[$default_gw_interface]}" in
+ 0)
+ echo "${blue}Default Gateway Detected on Admin Interface!${reset}"
+ sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile
+ node_default_gw=$host_default_gw
+ ;;
+ 1)
+ echo "${red}Default Gateway Detected on Private Interface!${reset}"
+ echo "${red}Private subnet should be private and not have Internet access!${reset}"
+ exit 1
+ ;;
+ 2)
+ echo "${blue}Default Gateway Detected on Public Interface!${reset}"
+ sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile
+ echo "${blue}Will setup NAT from Admin -> Public Network on VM!${reset}"
+ sed -i 's/^.*nat_flag =.*$/ nat_flag = true/' Vagrantfile
+ echo "${blue}Setting node gateway to be VM Admin IP${reset}"
+ node_default_gw=${interface_ip_arr[0]}
+ public_gateway=$default_gw
+ ;;
+ 3)
+ echo "${red}Default Gateway Detected on Storage Interface!${reset}"
+ echo "${red}Storage subnet should be private and not have Internet access!${reset}"
+ exit 1
+ ;;
+ *)
+ echo "${red}Unable to determine which interface default gateway is on..Exiting!${reset}"
+ exit 1
+ ;;
+ esac
else
- next_storage_ip=${interface_ip_arr[3]}
- storage_subnet=$(find_subnet $next_storage_ip $storage_subnet_mask)
- sed -i 's/^.*storage_network:.*$/ storage_network:'" $storage_subnet"'/' opnfv_ksgen_settings.yml
- fi
-
- ##replace public_subnet param
- public_subnet=$public_subnet'\'$public_short_subnet_mask
- sed -i 's/^.*public_subnet:.*$/ public_subnet:'" $public_subnet"'/' opnfv_ksgen_settings.yml
- ##replace private_subnet param
- private_subnet=$private_subnet'\'$private_short_subnet_mask
- sed -i 's/^.*private_subnet:.*$/ private_subnet:'" $private_subnet"'/' opnfv_ksgen_settings.yml
-
- ##replace public_dns param to be foreman server
- sed -i 's/^.*public_dns:.*$/ public_dns: '${interface_ip_arr[2]}'/' opnfv_ksgen_settings.yml
-
- ##replace public_gateway
- if [ -z "$public_gateway" ]; then
- ##if unset then we assume its the first IP in the public subnet
- public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
- public_gateway=$(increment_subnet $public_subnet 1)
- fi
- sed -i 's/^.*public_gateway:.*$/ public_gateway:'" $public_gateway"'/' opnfv_ksgen_settings.yml
-
- ##we have to define an allocation range of the public subnet to give
- ##to neutron to use as floating IPs
- ##we should control this subnet, so this range should work .150-200
- ##but generally this is a bad idea and we are assuming at least a /24 subnet here
- public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
- public_allocation_start=$(increment_subnet $public_subnet 150)
- public_allocation_end=$(increment_subnet $public_subnet 200)
-
- sed -i 's/^.*public_allocation_start:.*$/ public_allocation_start:'" $public_allocation_start"'/' opnfv_ksgen_settings.yml
- sed -i 's/^.*public_allocation_end:.*$/ public_allocation_end:'" $public_allocation_end"'/' opnfv_ksgen_settings.yml
-
-else
- printf '%s\n' 'deploy.sh: Unknown network type: $deployment_type' >&2
- exit 1
-fi
-
-echo "${blue}Parameters Complete. Settings have been set for Foreman. ${reset}"
-
-fi
-
-if [ $virtual ]; then
- echo "${blue} Virtual flag detected, setting Khaleesi playbook to be opnfv-vm.yml ${reset}"
- sed -i 's/opnfv.yml/opnfv-vm.yml/' bootstrap.sh
-fi
-
-echo "${blue}Starting Vagrant! ${reset}"
-
-##stand up vagrant
-if ! vagrant up; then
- printf '%s\n' 'deploy.sh: Unable to start vagrant' >&2
- exit 1
-else
- echo "${blue}Foreman VM is up! ${reset}"
-fi
-
-if [ $virtual ]; then
-
-##Bring up VM nodes
-echo "${blue}Setting VMs up... ${reset}"
-nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'`
-##due to ODL Helium bug of OVS connecting to ODL too early, we need controllers to install first
-##this is fix kind of assumes more than I would like to, but for now it should be OK as we always have
-##3 static controllers
-compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "`
-controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "`
-nodes=${controller_nodes}${compute_nodes}
-
-for node in ${nodes}; do
- cd /tmp
-
- ##remove VM nodes incase it wasn't cleaned up
- rm -rf /tmp/$node
-
- ##clone bgs vagrant
- ##will change this to be opnfv repo when commit is done
- if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git $node; then
- printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2
- exit 1
+ #assumes 24 bit mask
+ defaultgw=`echo ${interface_ip_arr[0]} | cut -d. -f1-3`
+ firstip=.1
+ defaultgw=$defaultgw$firstip
+ echo "${blue}Unable to find default gateway. Assuming it is $defaultgw ${reset}"
+ sed -i 's/^.*default_gw =.*$/ default_gw = '\""$defaultgw"\"'/' Vagrantfile
+ node_default_gw=$defaultgw
fi
- cd $node
-
if [ $base_config ]; then
if ! cp -f $base_config opnfv_ksgen_settings.yml; then
echo "{red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}"
@@ -581,114 +725,503 @@ for node in ${nodes}; do
fi
fi
- ##parse yaml into variables
- eval $(parse_yaml opnfv_ksgen_settings.yml "config_")
- ##find node type
- node_type=config_nodes_${node}_type
- node_type=$(eval echo \$$node_type)
-
- ##find number of interfaces with ip and substitute in VagrantFile
- output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'`
-
- if [ ! "$output" ]; then
- printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2
- exit 1
+ nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'`
+ controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "`
+ echo "${blue}Controller nodes found in settings: ${controller_nodes}${reset}"
+ my_controller_array=( $controller_nodes )
+ num_control_nodes=${#my_controller_array[@]}
+ if [ "$num_control_nodes" -ne 3 ]; then
+ if cat opnfv_ksgen_settings.yml | grep ha_flag | grep true; then
+ echo "${red}Error: You must define exactly 3 control nodes when HA flag is true!${reset}"
+ exit 1
+ fi
+ else
+ echo "${blue}Number of Controller nodes detected: ${num_control_nodes}${reset}"
fi
+ if [ $no_parse ]; then
+ echo "${blue}Skipping parsing variables into settings file as no_parse flag is set${reset}"
- if_counter=0
- for interface in ${output}; do
-
- if [ "$if_counter" -ge 4 ]; then
- break
- fi
- interface_ip=$(find_ip $interface)
- if [ ! "$interface_ip" ]; then
- continue
- fi
- case "${if_counter}" in
- 0)
- mac_string=config_nodes_${node}_mac_address
- mac_addr=$(eval echo \$$mac_string)
- mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
- if [ $mac_addr == "" ]; then
- echo "${red} Unable to find mac_address for $node! ${reset}"
- exit 1
- fi
- ;;
- 1)
- if [ "$node_type" == "controller" ]; then
- mac_string=config_nodes_${node}_private_mac
- mac_addr=$(eval echo \$$mac_string)
- if [ $mac_addr == "" ]; then
- echo "${red} Unable to find private_mac for $node! ${reset}"
- exit 1
- fi
- else
- ##generate random mac
- mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
- fi
- mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
- ;;
- *)
- mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
- mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
- ;;
- esac
- sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", bridge: '\'"$interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile
- ((if_counter++))
- done
-
- ##now remove interface config in Vagrantfile for 1 node
- ##if 1, 3, or 4 interfaces set deployment type
- ##if 2 interfaces remove 2nd interface and set deployment type
- if [ "$if_counter" == 1 ]; then
- deployment_type="single_network"
- remove_vagrant_network eth_replace1
- remove_vagrant_network eth_replace2
- remove_vagrant_network eth_replace3
- elif [ "$if_counter" == 2 ]; then
- deployment_type="single_network"
- second_interface=`echo $output | awk '{print $2}'`
- remove_vagrant_network $second_interface
- remove_vagrant_network eth_replace2
- elif [ "$if_counter" == 3 ]; then
- deployment_type="three_network"
- remove_vagrant_network eth_replace3
else
- deployment_type="multi_network"
- fi
- ##modify provisioning to do puppet install, config, and foreman check-in
- ##substitute host_name and dns_server in the provisioning script
- host_string=config_nodes_${node}_hostname
- host_name=$(eval echo \$$host_string)
- sed -i 's/^host_name=REPLACE/host_name='$host_name'/' vm_nodes_provision.sh
- ##dns server should be the foreman server
- sed -i 's/^dns_server=REPLACE/dns_server='${interface_ip_arr[0]}'/' vm_nodes_provision.sh
+ echo "${blue}Gathering network parameters for Target System...this may take a few minutes${reset}"
+ ##Edit the ksgen settings appropriately
+ ##ksgen settings will be stored in /vagrant on the vagrant machine
+ ##if single node deployment all the variables will have the same ip
+ ##interface names will be enp0s3, enp0s8, enp0s9 in chef/centos7
+
+ sed -i 's/^.*default_gw:.*$/default_gw:'" $node_default_gw"'/' opnfv_ksgen_settings.yml
+
+ ##replace private interface parameter
+ ##private interface will be of hosts, so we need to know the provisioned host interface name
+ ##we add biosdevname=0, net.ifnames=0 to the kickstart to use regular interface naming convention on hosts
+ ##replace IP for parameters with next IP that will be given to controller
+ if [ "$deployment_type" == "single_network" ]; then
+ ##we also need to assign IP addresses to nodes
+ ##for single node, foreman is managing the single network, so we can't reserve them
+ ##not supporting single network anymore for now
+ echo "{blue}Single Network type is unsupported right now. Please check your interface configuration. Exiting. ${reset}"
+ exit 0
+
+ elif [[ "$deployment_type" == "multi_network" || "$deployment_type" == "three_network" ]]; then
+
+ if [ "$deployment_type" == "three_network" ]; then
+ sed -i 's/^.*network_type:.*$/network_type: three_network/' opnfv_ksgen_settings.yml
+ fi
+
+ sed -i 's/^.*deployment_type:.*$/ deployment_type: '"$deployment_type"'/' opnfv_ksgen_settings.yml
+
+ ##get ip addresses for private network on controllers to make dhcp entries
+ ##required for controllers_ip_array global param
+ next_private_ip=${interface_ip_arr[1]}
+ type=_private
+ control_count=0
+ for node in controller1 controller2 controller3; do
+ next_private_ip=$(next_usable_ip $next_private_ip)
+ if [ ! "$next_private_ip" ]; then
+ printf '%s\n' 'deploy.sh: Unable to find next ip for private network for control nodes' >&2
+ exit 1
+ fi
+ sed -i 's/'"$node$type"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml
+ controller_ip_array=$controller_ip_array$next_private_ip,
+ controllers_ip_arr[$control_count]=$next_private_ip
+ ((control_count++))
+ done
+
+ next_public_ip=${interface_ip_arr[2]}
+ foreman_ip=$next_public_ip
+
+ ##if no dhcp, find all the Admin IPs for nodes in advance
+ if [ $virtual ]; then
+ if [ -z "$enable_virtual_dhcp" ]; then
+ sed -i 's/^.*no_dhcp:.*$/no_dhcp: true/' opnfv_ksgen_settings.yml
+ nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'`
+ compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "`
+ controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "`
+ nodes=${controller_nodes}${compute_nodes}
+ next_admin_ip=${interface_ip_arr[0]}
+ type=_admin
+ for node in ${nodes}; do
+ next_admin_ip=$(next_ip $next_admin_ip)
+ if [ ! "$next_admin_ip" ]; then
+ echo "${red} Unable to find an unused IP in admin_network for $node ! ${reset}"
+ exit 1
+ else
+ admin_ip_arr[$node]=$next_admin_ip
+ sed -i 's/'"$node$type"'/'"$next_admin_ip"'/g' opnfv_ksgen_settings.yml
+ fi
+ done
+
+ ##allocate node public IPs
+ for node in ${nodes}; do
+ next_public_ip=$(next_usable_ip $next_public_ip)
+ if [ ! "$next_public_ip" ]; then
+ echo "${red} Unable to find an unused IP in admin_network for $node ! ${reset}"
+ exit 1
+ else
+ public_ip_arr[$node]=$next_public_ip
+ fi
+ done
+ fi
+ fi
+ ##replace global param for controllers_ip_array
+ controller_ip_array=${controller_ip_array%?}
+ sed -i 's/^.*controllers_ip_array:.*$/ controllers_ip_array: '"$controller_ip_array"'/' opnfv_ksgen_settings.yml
+
+ ##now replace all the VIP variables. admin//private can be the same IP
+ ##we have to use IP's here that won't be allocated to hosts at provisioning time
+ ##therefore we increment the ip by 10 to make sure we have a safe buffer
+ next_private_ip=$(increment_ip $next_private_ip 10)
+
+ private_output=$(grep -E '*private_vip|loadbalancer_vip|db_vip|amqp_vip|*admin_vip' opnfv_ksgen_settings.yml)
+ if [ ! -z "$private_output" ]; then
+ while read -r line; do
+ sed -i 's/^.*'"$line"'.*$/ '"$line $next_private_ip"'/' opnfv_ksgen_settings.yml
+ next_private_ip=$(next_usable_ip $next_private_ip)
+ if [ ! "$next_private_ip" ]; then
+ printf '%s\n' 'deploy.sh: Unable to find next ip for private network for vip replacement' >&2
+ exit 1
+ fi
+ done <<< "$private_output"
+ fi
+
+ ##replace odl_control_ip (non-HA only)
+ odl_control_ip=${controllers_ip_arr[0]}
+ sed -i 's/^.*odl_control_ip:.*$/ odl_control_ip: '"$odl_control_ip"'/' opnfv_ksgen_settings.yml
+
+ ##replace controller_ip (non-HA only)
+ sed -i 's/^.*controller_ip:.*$/ controller_ip: '"$odl_control_ip"'/' opnfv_ksgen_settings.yml
+
+ ##replace foreman site
+ sed -i 's/^.*foreman_url:.*$/ foreman_url:'" https:\/\/$foreman_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml
+ ##replace public vips
+ ##no need to do this if no dhcp
+ if [[ -z "$enable_virtual_dhcp" && ! -z "$virtual" ]]; then
+ next_public_ip=$(next_usable_ip $next_public_ip)
+ else
+ next_public_ip=$(increment_ip $next_public_ip 10)
+ fi
+
+ public_output=$(grep -E '*public_vip' opnfv_ksgen_settings.yml)
+ if [ ! -z "$public_output" ]; then
+ while read -r line; do
+ if echo $line | grep horizon_public_vip; then
+ horizon_public_vip=$next_public_ip
+ fi
+ sed -i 's/^.*'"$line"'.*$/ '"$line $next_public_ip"'/' opnfv_ksgen_settings.yml
+ next_public_ip=$(next_usable_ip $next_public_ip)
+ if [ ! "$next_public_ip" ]; then
+ printf '%s\n' 'deploy.sh: Unable to find next ip for public network for vip replcement' >&2
+ exit 1
+ fi
+ done <<< "$public_output"
+ fi
+
+ ##replace public_network param
+ public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
+ sed -i 's/^.*public_network:.*$/ public_network:'" $public_subnet"'/' opnfv_ksgen_settings.yml
+ ##replace private_network param
+ private_subnet=$(find_subnet $next_private_ip $private_subnet_mask)
+ sed -i 's/^.*private_network:.*$/ private_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml
+ ##replace storage_network
+ if [ "$deployment_type" == "three_network" ]; then
+ sed -i 's/^.*storage_network:.*$/ storage_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml
+ else
+ next_storage_ip=${interface_ip_arr[3]}
+ storage_subnet=$(find_subnet $next_storage_ip $storage_subnet_mask)
+ sed -i 's/^.*storage_network:.*$/ storage_network:'" $storage_subnet"'/' opnfv_ksgen_settings.yml
+ fi
+
+ ##replace public_subnet param
+ public_subnet=$public_subnet'\'$public_short_subnet_mask
+ sed -i 's/^.*public_subnet:.*$/ public_subnet:'" $public_subnet"'/' opnfv_ksgen_settings.yml
+ ##replace private_subnet param
+ private_subnet=$private_subnet'\'$private_short_subnet_mask
+ sed -i 's/^.*private_subnet:.*$/ private_subnet:'" $private_subnet"'/' opnfv_ksgen_settings.yml
+
+ ##replace public_dns param to be foreman server
+ sed -i 's/^.*public_dns:.*$/ public_dns: '${interface_ip_arr[2]}'/' opnfv_ksgen_settings.yml
+
+ ##replace public_gateway
+ if [ -z "$public_gateway" ]; then
+ ##if unset then we assume its the first IP in the public subnet
+ public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
+ public_gateway=$(increment_subnet $public_subnet 1)
+ fi
+ sed -i 's/^.*public_gateway:.*$/ public_gateway:'" $public_gateway"'/' opnfv_ksgen_settings.yml
+
+ ##we have to define an allocation range of the public subnet to give
+ ##to neutron to use as floating IPs
+ ##if static ip range, then we take the difference of the end range and current ip
+ ## to be the allocation pool
+ ##if not static ip, we will use the last 20 IP from the subnet
+ ## note that this is not a really good idea because the subnet must be at least a /27 for this to work...
+ public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
+ if [ ! -z "$static_ip_range" ]; then
+ begin_octet=$(echo $next_public_ip | cut -d . -f4)
+ end_octet=$(echo $static_ip_range_end | cut -d . -f4)
+ ip_diff=$((end_octet-begin_octet))
+ if [ $ip_diff -le 0 ]; then
+ echo "${red}ip range left for floating range is less than or equal to 0! $ipdiff ${reset}"
+ exit 1
+ else
+ public_allocation_start=$(next_ip $next_public_ip)
+ public_allocation_end=$static_ip_range_end
+ fi
+ else
+ last_ip_subnet=$(find_last_ip_subnet $next_public_ip $public_subnet_mask)
+ public_allocation_start=$(subtract_ip $last_ip_subnet $floating_ip_count )
+ public_allocation_end=${last_ip_subnet}
+ fi
+ echo "${blue}Neutron Floating IP range: $public_allocation_start to $public_allocation_end ${reset}"
+
+ sed -i 's/^.*public_allocation_start:.*$/ public_allocation_start:'" $public_allocation_start"'/' opnfv_ksgen_settings.yml
+ sed -i 's/^.*public_allocation_end:.*$/ public_allocation_end:'" $public_allocation_end"'/' opnfv_ksgen_settings.yml
+
+ else
+ printf '%s\n' 'deploy.sh: Unknown network type: $deployment_type' >&2
+ exit 1
+ fi
- ## remove bootstrap and NAT provisioning
- sed -i '/nat_setup.sh/d' Vagrantfile
- sed -i 's/bootstrap.sh/vm_nodes_provision.sh/' Vagrantfile
+ echo "${blue}Parameters Complete. Settings have been set for Foreman. ${reset}"
- ## modify default_gw to be node_default_gw
- sed -i 's/^.*default_gw =.*$/ default_gw = '\""$node_default_gw"\"'/' Vagrantfile
+ fi
+}
- ## modify VM memory to be 4gig
- sed -i 's/^.*vb.memory =.*$/ vb.memory = 4096/' Vagrantfile
+##Configure bootstrap.sh to use the virtual Khaleesi playbook
+##params: none
+##usage: configure_virtual()
+configure_virtual() {
+ if [ $virtual ]; then
+ echo "${blue} Virtual flag detected, setting Khaleesi playbook to be opnfv-vm.yml ${reset}"
+ sed -i 's/opnfv.yml/opnfv-vm.yml/' bootstrap.sh
+ fi
+}
- echo "${blue}Starting Vagrant Node $node! ${reset}"
+##Starts Foreman VM with Vagrant
+##params: none
+##usage: start_vagrant()
+start_foreman() {
+ echo "${blue}Starting Vagrant! ${reset}"
##stand up vagrant
if ! vagrant up; then
- echo "${red} Unable to start $node ${reset}"
+ printf '%s\n' 'deploy.sh: Unable to complete Foreman VM install' >&2
exit 1
else
- echo "${blue} $node VM is up! ${reset}"
+ echo "${blue}Foreman VM is up! ${reset}"
fi
+}
-done
+##start the VM if this is a virtual installation
+##this function does nothing if baremetal servers are being used
+##params: none
+##usage: start_virtual_nodes()
+start_virtual_nodes() {
+ if [ $virtual ]; then
+
+ ##Bring up VM nodes
+ echo "${blue}Setting VMs up... ${reset}"
+ nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'`
+ ##due to ODL Helium bug of OVS connecting to ODL too early, we need controllers to install first
+ ##this is fix kind of assumes more than I would like to, but for now it should be OK as we always have
+ ##3 static controllers
+ compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "`
+ controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "`
+ nodes=${controller_nodes}${compute_nodes}
+ controller_count=0
+ compute_wait_completed=false
+
+ for node in ${nodes}; do
+
+ ##remove VM nodes incase it wasn't cleaned up
+ rm -rf $vm_dir/$node
+ rm -rf /tmp/genesis/
+
+ ##clone genesis and move into node folder
+ clone_bgs $vm_dir/$node
+
+ cd $vm_dir/$node
+
+ if [ $base_config ]; then
+ if ! cp -f $base_config opnfv_ksgen_settings.yml; then
+ echo "${red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}"
+ exit 1
+ fi
+ fi
+
+ ##parse yaml into variables
+ eval $(parse_yaml opnfv_ksgen_settings.yml "config_")
+ ##find node type
+ node_type=config_nodes_${node}_type
+ node_type=$(eval echo \$$node_type)
+
+ ##trozet test make compute nodes wait 20 minutes
+ if [ "$compute_wait_completed" = false ] && [ "$node_type" != "controller" ]; then
+ echo "${blue}Waiting 20 minutes for Control nodes to install before continuing with Compute nodes..."
+ compute_wait_completed=true
+ sleep 1400
+ fi
+
+ ## Add Admin interface
+ mac_string=config_nodes_${node}_mac_address
+ mac_addr=$(eval echo \$$mac_string)
+ mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+ if [ $mac_addr == "" ]; then
+ echo "${red} Unable to find mac_address for $node! ${reset}"
+ exit 1
+ fi
+ this_admin_ip=${admin_ip_arr[$node]}
+ sed -i 's/^.*eth_replace0.*$/ config.vm.network "private_network", virtualbox__intnet: "my_admin_network", ip: '\""$this_admin_ip"\"', netmask: '\""$admin_subnet_mask"\"', :mac => '\""$mac_addr"\"'/' Vagrantfile
+
+ ## Add private interface
+ if [ "$node_type" == "controller" ]; then
+ mac_string=config_nodes_${node}_private_mac
+ mac_addr=$(eval echo \$$mac_string)
+ if [ $mac_addr == "" ]; then
+ echo "${red} Unable to find private_mac for $node! ${reset}"
+ exit 1
+ fi
+ else
+ ##generate random mac
+ mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
+ fi
+ mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+ if [ "$node_type" == "controller" ]; then
+ new_node_ip=${controllers_ip_arr[$controller_count]}
+ if [ ! "$new_node_ip" ]; then
+ echo "{red}ERROR: Empty node ip for controller $controller_count ${reset}"
+ exit 1
+ fi
+ ((controller_count++))
+ else
+ next_private_ip=$(next_ip $next_private_ip)
+ if [ ! "$next_private_ip" ]; then
+ echo "{red}ERROR: Could not find private ip for $node ${reset}"
+ exit 1
+ fi
+ new_node_ip=$next_private_ip
+ fi
+ sed -i 's/^.*eth_replace1.*$/ config.vm.network "private_network", virtualbox__intnet: "my_private_network", :mac => '\""$mac_addr"\"', ip: '\""$new_node_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile
+ ##replace host_ip in vm_nodes_provision with private ip
+ sed -i 's/^host_ip=REPLACE/host_ip='$new_node_ip'/' vm_nodes_provision.sh
+ ##replace ping site
+ if [ ! -z "$ping_site" ]; then
+ sed -i 's/www.google.com/'$ping_site'/' vm_nodes_provision.sh
+ fi
+
+ ##find public ip info and add public interface
+ mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
+ mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+ this_public_ip=${public_ip_arr[$node]}
+
+ if [ -z "$enable_virtual_dhcp" ]; then
+ sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", bridge: '\'"$public_interface"\'', :mac => '\""$mac_addr"\"', ip: '\""$this_public_ip"\"', netmask: '\""$public_subnet_mask"\"'/' Vagrantfile
+ else
+ sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", bridge: '\'"$public_interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile
+ fi
+ remove_vagrant_network eth_replace3
+
+ ##modify provisioning to do puppet install, config, and foreman check-in
+ ##substitute host_name and dns_server in the provisioning script
+ host_string=config_nodes_${node}_hostname
+ host_name=$(eval echo \$$host_string)
+ sed -i 's/^host_name=REPLACE/host_name='$host_name'/' vm_nodes_provision.sh
+ ##dns server should be the foreman server
+ sed -i 's/^dns_server=REPLACE/dns_server='${interface_ip_arr[0]}'/' vm_nodes_provision.sh
+ ## remove bootstrap and NAT provisioning
+ sed -i '/nat_setup.sh/d' Vagrantfile
+ sed -i 's/bootstrap.sh/vm_nodes_provision.sh/' Vagrantfile
+ ## modify default_gw to be node_default_gw
+ sed -i 's/^.*default_gw =.*$/ default_gw = '\""$node_default_gw"\"'/' Vagrantfile
+ ## modify VM memory to be 4gig
+ ##if node type is controller
+ if [ "$node_type" == "controller" ]; then
+ sed -i 's/^.*vb.memory =.*$/ vb.memory = 4096/' Vagrantfile
+ fi
+ echo "${blue}Starting Vagrant Node $node! ${reset}"
+ ##stand up vagrant
+ if ! vagrant up; then
+ echo "${red} Unable to start $node ${reset}"
+ exit 1
+ else
+ echo "${blue} $node VM is up! ${reset}"
+ fi
+ done
+ echo "${blue} All VMs are UP! ${reset}"
+ echo "${blue} Waiting for puppet to complete on the nodes... ${reset}"
+ ##check puppet is complete
+ ##ssh into foreman server, run check to verify puppet is complete
+ pushd $vm_dir/foreman_vm
+ if ! vagrant ssh -c "/opt/khaleesi/run.sh --no-logs --use /vagrant/opnfv_ksgen_settings.yml /opt/khaleesi/playbooks/validate_opnfv-vm.yml"; then
+ echo "${red} Failed to validate puppet completion on nodes ${reset}"
+ exit 1
+ else
+ echo "{$blue} Puppet complete on all nodes! ${reset}"
+ fi
+ popd
+ ##add routes back to nodes
+ for node in ${nodes}; do
+ pushd $vm_dir/$node
+ if ! vagrant ssh -c "route | grep default | grep $this_default_gw"; then
+ echo "${blue} Adding public route back to $node! ${reset}"
+ vagrant ssh -c "route add default gw $this_default_gw"
+ vagrant ssh -c "route delete default gw 10.0.2.2"
+ fi
+ popd
+ done
+ if [ ! -z "$horizon_public_vip" ]; then
+ echo "${blue} Virtual deployment SUCCESS!! Foreman URL: http://${foreman_ip}, Horizon URL: http://${horizon_public_vip} ${reset}"
+ else
+ echo "${blue} Virtual deployment SUCCESS!! Foreman URL: http://${foreman_ip}, Horizon URL: http://${odl_control_ip} ${reset}"
+ fi
+ fi
+}
- echo "${blue} All VMs are UP! ${reset}"
+##check to make sure nodes are powered off
+##this function does nothing if virtual
+##params: none
+##usage: check_baremetal_nodes()
+check_baremetal_nodes() {
+ if [ $virtual ]; then
+ echo "${blue}Skipping Baremetal node power status check as deployment is virtual ${reset}"
+ else
+ echo "${blue}Checking Baremetal nodes power state... ${reset}"
+ if [ ! -z "$base_config" ]; then
+ # Install ipmitool
+ # Major version is pinned to force some consistency for Arno
+ if ! yum list installed | grep -i ipmitool; then
+ echo "${blue}Installing ipmitool...${reset}"
+ if ! yum -y install ipmitool-1*; then
+ echo "${red}Failed to install ipmitool!${reset}"
+ exit 1
+ fi
+ fi
+
+ ###find all the bmc IPs and number of nodes
+ node_counter=0
+ output=`grep bmc_ip $base_config | grep -Eo '[0-9]+.[0-9]+.[0-9]+.[0-9]+'`
+ for line in ${output} ; do
+ bmc_ip[$node_counter]=$line
+ ((node_counter++))
+ done
+
+ max_nodes=$((node_counter-1))
+
+ ###find bmc_users per node
+ node_counter=0
+ output=`grep bmc_user $base_config | sed 's/\s*bmc_user:\s*//'`
+ for line in ${output} ; do
+ bmc_user[$node_counter]=$line
+ ((node_counter++))
+ done
+
+ ###find bmc_pass per node
+ node_counter=0
+ output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'`
+ for line in ${output} ; do
+ bmc_pass[$node_counter]=$line
+ ((node_counter++))
+ done
+
+ for mynode in `seq 0 $max_nodes`; do
+ echo "${blue}Node: ${bmc_ip[$mynode]} ${bmc_user[$mynode]} ${bmc_pass[$mynode]} ${reset}"
+ ipmi_output=`ipmitool -I lanplus -P ${bmc_pass[$mynode]} -U ${bmc_user[$mynode]} -H ${bmc_ip[$mynode]} chassis status \
+ | grep "System Power" | cut -d ':' -f2 | tr -d [:blank:]`
+ if [ "$ipmi_output" == "on" ]; then
+ echo "${red}Error: Node is powered on: ${bmc_ip[$mynode]} ${reset}"
+ echo "${red}Please run clean.sh before running deploy! ${reset}"
+ exit 1
+ elif [ "$ipmi_output" == "off" ]; then
+ echo "${blue}Node: ${bmc_ip[$mynode]} is powered off${reset}"
+ else
+ echo "${red}Warning: Unable to detect node power state: ${bmc_ip[$mynode]} ${reset}"
+ fi
+ done
+ else
+ echo "${red}base_config was not provided for a baremetal install! Exiting${reset}"
+ exit 1
+ fi
+ fi
+}
+
+##END FUNCTIONS
+
+main() {
+ parse_cmdline "$@"
+ disable_selinux
+ check_baremetal_nodes
+ install_EPEL
+ install_vbox
+ install_ansible
+ install_vagrant
+ clean_tmp
+ verify_vm_dir
+ clone_bgs $vm_dir/foreman_vm
+ configure_network
+ configure_virtual
+ start_foreman
+ start_virtual_nodes
+}
-fi
+main "$@"
diff --git a/foreman/ci/opnfv_ksgen_settings.yml b/foreman/ci/opnfv_ksgen_settings.yml
index 21840dd..b41a41b 100644
--- a/foreman/ci/opnfv_ksgen_settings.yml
+++ b/foreman/ci/opnfv_ksgen_settings.yml
@@ -44,6 +44,7 @@ global_params:
deployment_type:
network_type: multi_network
default_gw:
+no_dhcp: false
foreman:
seed_values:
- { name: heat_cfn, oldvalue: true, newvalue: false }
@@ -110,6 +111,7 @@ nodes:
bmc_mac: "10:23:45:67:88:AB"
bmc_user: root
bmc_pass: root
+ admin_ip: compute_admin
ansible_ssh_pass: "Op3nStack"
admin_password: ""
groups:
@@ -130,6 +132,7 @@ nodes:
bmc_mac: "10:23:45:67:88:AC"
bmc_user: root
bmc_pass: root
+ admin_ip: controller1_admin
private_ip: controller1_private
private_mac: "10:23:45:67:87:AC"
ansible_ssh_pass: "Op3nStack"
@@ -152,6 +155,7 @@ nodes:
bmc_mac: "10:23:45:67:88:AD"
bmc_user: root
bmc_pass: root
+ admin_ip: controller2_admin
private_ip: controller2_private
private_mac: "10:23:45:67:87:AD"
ansible_ssh_pass: "Op3nStack"
@@ -174,6 +178,7 @@ nodes:
bmc_mac: "10:23:45:67:88:AE"
bmc_user: root
bmc_pass: root
+ admin_ip: controller3_admin
private_ip: controller3_private
private_mac: "10:23:45:67:87:AE"
ansible_ssh_pass: "Op3nStack"
diff --git a/foreman/ci/opnfv_ksgen_settings_no_HA.yml b/foreman/ci/opnfv_ksgen_settings_no_HA.yml
new file mode 100644
index 0000000..79db257
--- /dev/null
+++ b/foreman/ci/opnfv_ksgen_settings_no_HA.yml
@@ -0,0 +1,264 @@
+global_params:
+ admin_email: opnfv@opnfv.com
+ ha_flag: "false"
+ odl_flag: "true"
+ odl_control_ip:
+ private_network:
+ storage_network:
+ public_network:
+ private_subnet:
+ deployment_type:
+ controller_ip:
+network_type: multi_network
+default_gw:
+no_dhcp: false
+foreman:
+ seed_values:
+ - { name: heat_cfn, oldvalue: true, newvalue: false }
+workaround_puppet_version_lock: false
+opm_branch: master
+installer:
+ name: puppet
+ short_name: pupt
+ network:
+ auto_assign_floating_ip: false
+ variant:
+ short_name: m2vx
+ plugin:
+ name: neutron
+workaround_openstack_packstack_rpm: false
+tempest:
+ repo:
+ Fedora:
+ '19': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-19/
+ '20': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-20/
+ RedHat:
+ '7.0': https://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7/
+ use_virtual_env: false
+ public_allocation_end: 10.2.84.71
+ skip:
+ files: null
+ tests: null
+ public_allocation_start: 10.2.84.51
+ physnet: physnet1
+ use_custom_repo: false
+ public_subnet_cidr: 10.2.84.0/24
+ public_subnet_gateway: 10.2.84.1
+ additional_default_settings:
+ - section: compute
+ option: flavor_ref
+ value: 1
+ cirros_image_file: cirros-0.3.1-x86_64-disk.img
+ setup_method: tempest/rpm
+ test_name: all
+ rdo:
+ version: juno
+ rpm: http://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+ rpm:
+ version: 20141201
+ dir: ~{{ nodes.tempest.remote_user }}/tempest-dir
+tmp:
+ node_prefix: '{{ node.prefix | reject("none") | join("-") }}-'
+ anchors:
+ - https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+ - http://repos.fedorapeople.org/repos/openstack/openstack-juno/
+opm_repo: https://github.com/redhat-openstack/openstack-puppet-modules.git
+workaround_vif_plugging: false
+openstack_packstack_rpm: http://REPLACE_ME/brewroot/packages/openstack-puppet-modules/2013.2/9.el6ost/noarch/openstack-puppet-modules-2013.2-9.el6ost.noarch.rpm
+nodes:
+ compute:
+ name: oscompute11.opnfv.com
+ hostname: oscompute11.opnfv.com
+ short_name: oscompute11
+ type: compute
+ host_type: baremetal
+ hostgroup: Compute
+ mac_address: "10:23:45:67:89:AB"
+ bmc_ip: 10.4.17.2
+ bmc_mac: "10:23:45:67:88:AB"
+ bmc_user: root
+ bmc_pass: root
+ admin_ip: compute_admin
+ ansible_ssh_pass: "Op3nStack"
+ admin_password: ""
+ groups:
+ - compute
+ - foreman_nodes
+ - puppet
+ - rdo
+ - neutron
+ controller1:
+ name: oscontroller1.opnfv.com
+ hostname: oscontroller1.opnfv.com
+ short_name: oscontroller1
+ type: controller
+ host_type: baremetal
+ hostgroup: Controller_Network_ODL
+ mac_address: "10:23:45:67:89:AC"
+ bmc_ip: 10.4.17.3
+ bmc_mac: "10:23:45:67:88:AC"
+ bmc_user: root
+ bmc_pass: root
+ private_ip: controller1_private
+ admin_ip: controller1_admin
+ private_mac: "10:23:45:67:87:AC"
+ ansible_ssh_pass: "Op3nStack"
+ admin_password: "octopus"
+ groups:
+ - controller
+ - foreman_nodes
+ - puppet
+ - rdo
+ - neutron
+workaround_mysql_centos7: true
+distro:
+ name: centos
+ centos:
+ '7.0':
+ repos: []
+ short_name: c
+ short_version: 70
+ version: '7.0'
+ rhel:
+ '7.0':
+ kickstart_url: http://REPLACE_ME/released/RHEL-7/7.0/Server/x86_64/os/
+ repos:
+ - section: rhel7-server-rpms
+ name: Packages for RHEL 7 - $basearch
+ baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0/x86_64/
+ gpgcheck: 0
+ - section: rhel-7-server-update-rpms
+ name: Update Packages for Enterprise Linux 7 - $basearch
+ baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0-z/x86_64/
+ gpgcheck: 0
+ - section: rhel-7-server-optional-rpms
+ name: Optional Packages for Enterprise Linux 7 - $basearch
+ baseurl: http://REPLACE_ME/released/RHEL-7/7.0/Server-optional/x86_64/os/
+ gpgcheck: 0
+ - section: rhel-7-server-extras-rpms
+ name: Optional Packages for Enterprise Linux 7 - $basearch
+ baseurl: http://REPLACE_ME/rel-eng/EXTRAS-7.0-RHEL-7-20140610.0/compose/Server/x86_64/os/
+ gpgcheck: 0
+ '6.5':
+ kickstart_url: http://REPLACE_ME/released/RHEL-6/6.5/Server/x86_64/os/
+ repos:
+ - section: rhel6.5-server-rpms
+ name: Packages for RHEL 6.5 - $basearch
+ baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/$basearch/os/Server
+ gpgcheck: 0
+ - section: rhel-6.5-server-update-rpms
+ name: Update Packages for Enterprise Linux 6.5 - $basearch
+ baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/$basearch/
+ gpgcheck: 0
+ - section: rhel-6.5-server-optional-rpms
+ name: Optional Packages for Enterprise Linux 6.5 - $basearch
+ baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/$basearch/os
+ gpgcheck: 0
+ - section: rhel6.5-server-rpms-32bit
+ name: Packages for RHEL 6.5 - i386
+ baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/i386/os/Server
+ gpgcheck: 0
+ enabled: 1
+ - section: rhel-6.5-server-update-rpms-32bit
+ name: Update Packages for Enterprise Linux 6.5 - i686
+ baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/i686/
+ gpgcheck: 0
+ enabled: 1
+ - section: rhel-6.5-server-optional-rpms-32bit
+ name: Optional Packages for Enterprise Linux 6.5 - i386
+ baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/i386/os
+ gpgcheck: 0
+ enabled: 1
+ subscription:
+ username: REPLACE_ME
+ password: HWj8TE28Qi0eP2c
+ pool: 8a85f9823e3d5e43013e3ddd4e2a0977
+ config:
+ selinux: permissive
+ ntp_server: 0.pool.ntp.org
+ dns_servers:
+ - 10.4.1.1
+ - 10.4.0.2
+ reboot_delay: 1
+ initial_boot_timeout: 180
+node:
+ prefix:
+ - rdo
+ - pupt
+ - ffqiotcxz1
+ - null
+product:
+ repo_type: production
+ name: rdo
+ short_name: rdo
+ rpm:
+ CentOS: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+ Fedora: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+ RedHat: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+ short_version: ju
+ repo:
+ production:
+ CentOS:
+ 7.0.1406: http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+ '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+ '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+ Fedora:
+ '20': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-20
+ '21': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-21
+ RedHat:
+ '6.6': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+ '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+ '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+ version: juno
+ config:
+ enable_epel: y
+ short_repo: prod
+tester:
+ name: tempest
+distro_reboot_options: '--no-wall '' Reboot is triggered by Ansible'' '
+job:
+ verbosity: 1
+ archive:
+ - '{{ tempest.dir }}/etc/tempest.conf'
+ - '{{ tempest.dir }}/etc/tempest.conf.sample'
+ - '{{ tempest.dir }}/*.log'
+ - '{{ tempest.dir }}/*.xml'
+ - /root/
+ - /var/log/
+ - /etc/nova
+ - /etc/ceilometer
+ - /etc/cinder
+ - /etc/glance
+ - /etc/keystone
+ - /etc/neutron
+ - /etc/ntp
+ - /etc/puppet
+ - /etc/qpid
+ - /etc/qpidd.conf
+ - /root
+ - /etc/yum.repos.d
+ - /etc/yum.repos.d
+topology:
+ name: multinode
+ short_name: mt
+workaround_neutron_ovs_udev_loop: true
+workaround_glance_table_utf8: false
+verbosity:
+ debug: 0
+ info: 1
+ warning: 2
+ warn: 2
+ errors: 3
+provisioner:
+ username: admin
+ network:
+ type: nova
+ name: external
+ skip: skip_provision
+ foreman_url: https://10.2.84.2/api/v2/
+ password: octopus
+ type: foreman
+workaround_nova_compute_fix: false
+workarounds:
+ enabled: true
+
diff --git a/foreman/ci/reload_playbook.yml b/foreman/ci/reload_playbook.yml
index 9e3d053..9b3a4d4 100644
--- a/foreman/ci/reload_playbook.yml
+++ b/foreman/ci/reload_playbook.yml
@@ -14,3 +14,4 @@
delay=60
timeout=180
sudo: false
+ - pause: minutes=1
diff --git a/foreman/ci/vm_nodes_provision.sh b/foreman/ci/vm_nodes_provision.sh
index d0bba64..e64c0ad 100755
--- a/foreman/ci/vm_nodes_provision.sh
+++ b/foreman/ci/vm_nodes_provision.sh
@@ -18,6 +18,7 @@ green=`tput setaf 2`
host_name=REPLACE
dns_server=REPLACE
+host_ip=REPLACE
##END VARS
##set hostname
@@ -31,27 +32,52 @@ if ! grep 'PEERDNS=no' /etc/sysconfig/network-scripts/ifcfg-enp0s3; then
systemctl restart NetworkManager
fi
-if ! ping www.google.com -c 5; then
+##modify /etc/resolv.conf to point to foreman
+echo "${blue} Configuring resolv.conf with DNS: $dns_server ${reset}"
+cat > /etc/resolv.conf << EOF
+search ci.com opnfv.com
+nameserver $dns_server
+nameserver 8.8.8.8
+
+EOF
+
+##modify /etc/hosts to add own IP for rabbitmq workaround
+host_short_name=`echo $host_name | cut -d . -f 1`
+echo "${blue} Configuring hosts with: $host_name $host_ip ${reset}"
+cat > /etc/hosts << EOF
+$host_ip $host_short_name $host_name
+127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
+::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
+EOF
+
+if ! ping www.google.com -c 5; then
echo "${red} No internet connection, check your route and DNS setup ${reset}"
exit 1
fi
-# Install EPEL repo for access to many other yum repos
-# Major version is pinned to force some consistency for Arno
-yum install -y epel-release-7*
+##install EPEL
+if ! yum repolist | grep "epel/"; then
+ if ! rpm -Uvh http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm; then
+ printf '%s\n' 'vm_provision_nodes.sh: Unable to configure EPEL repo' >&2
+ exit 1
+ fi
+else
+ printf '%s\n' 'vm_nodes_provision.sh: Skipping EPEL repo as it is already configured.'
+fi
-# Update device-mapper-libs, needed for libvirtd on compute nodes
-# Major version is pinned to force some consistency for Arno
-if ! yum -y upgrade device-mapper-libs-1*; then
+##install device-mapper-libs
+##needed for libvirtd on compute nodes
+if ! yum -y upgrade device-mapper-libs; then
echo "${red} WARN: Unable to upgrade device-mapper-libs...nova-compute may not function ${reset}"
fi
-# Install other required packages
-# Major version is pinned to force some consistency for Arno
echo "${blue} Installing Puppet ${reset}"
-if ! yum install -y puppet-3*; then
- printf '%s\n' 'vm_nodes_provision.sh: failed to install required packages' >&2
- exit 1
+##install puppet
+if ! yum list installed | grep -i puppet; then
+ if ! yum -y install puppet; then
+ printf '%s\n' 'vm_nodes_provision.sh: Unable to install puppet package' >&2
+ exit 1
+ fi
fi
echo "${blue} Configuring puppet ${reset}"
diff --git a/foreman/docs/src/installation-instructions.rst b/foreman/docs/src/installation-instructions.rst
index 2ac872d..20ea983 100644
--- a/foreman/docs/src/installation-instructions.rst
+++ b/foreman/docs/src/installation-instructions.rst
@@ -213,8 +213,6 @@ Follow the steps below to execute:
4. Your nodes will take 40-60 minutes to re-install CentOS 7 and install/configure OPNFV. When complete you will see "Finished: SUCCESS"
-.. _setup_verify:
-
Verifying the Setup
-------------------
@@ -236,9 +234,7 @@ Now that the installer has finished it is a good idea to check and make sure thi
7. Now go to your web browser and insert the Horizon public VIP. The login will be "admin"/"octopus".
-8. You are now able to follow the `OpenStack Verification <openstack_verify_>`_ section.
-
-.. _openstack_verify:
+8. You are now able to follow the `OpenStack Verification`_ section.
OpenStack Verification
----------------------
@@ -316,14 +312,14 @@ Follow the steps below to execute:
Verifying the Setup - VMs
-------------------------
-Follow the instructions in the `Verifying the Setup <setup_verify_>`_ section.
+Follow the instructions in the `Verifying the Setup`_ section.
Also, for VM deployment you are able to easily access your nodes by going to ``/tmp/<node name>`` and then ``vagrant ssh`` (password is "vagrant"). You can use this to go to a controller and check OpenStack services, OpenDaylight, etc.
OpenStack Verification - VMs
----------------------------
-Follow the steps in `OpenStack Verification <openstack_verify_>`_ section.
+Follow the steps in `OpenStack Verification`_ section.
Frequently Asked Questions
==========================
@@ -353,7 +349,11 @@ OpenStack
OpenDaylight
------------
-`OpenDaylight artifacts <http://www.opendaylight.org/software/downloads>`_
+Upstream OpenDaylight provides `a number of packaging and deployment options <https://wiki.opendaylight.org/view/Deployment>`_ meant for consumption by downstream projects like OPNFV.
+
+Currently, OPNFV Foreman uses `OpenDaylight's Puppet module <https://github.com/dfarrell07/puppet-opendaylight>`_, which in turn depends on `OpenDaylight's RPM <https://copr.fedoraproject.org/coprs/dfarrell07/OpenDaylight/>`_.
+
+Note that the RPM is currently hosted on Copr, but `will soon <https://trello.com/c/qseotfgL/171-host-odl-rpm-on-odl-infra>`_ be migrated to OpenDaylight's infrastructure and/or the new CentOS NFV SIG.
Foreman
-------
diff --git a/fuel/build/Makefile b/fuel/build/Makefile
index d3afe83..4acf5e2 100644
--- a/fuel/build/Makefile
+++ b/fuel/build/Makefile
@@ -42,8 +42,6 @@ SUBDIRS += f_osnaily
SUBDIRS += f_l23network
SUBDIRS += f_resolvconf
SUBDIRS += f_ntp
-SUBDIRS += f_odl_docker
-#SUBDIRS += f_odl
# f_example is only an example of how to generate a .deb package and
# should not be enabled in official builds.
@@ -63,7 +61,6 @@ all:
@echo "Versions of cached build results built by" $(shell hostname) "at" $(shell date -u) > $(VERSION_FILE)
@echo "cache.mk" $(shell md5sum $(BUILD_BASE)/cache.mk | cut -f1 -d " ") >> $(VERSION_FILE)
@echo "config.mk" $(shell md5sum $(BUILD_BASE)/config.mk | cut -f1 -d " ") >> $(VERSION_FILE)
- $(MAKE) -C f_odl_docker -f Makefile all
@make -C docker
@docker/runcontext $(DOCKERIMG) $(MAKE) $(MAKEFLAGS) iso
@@ -128,10 +125,6 @@ patch-packages:
prepare:
#$(MAKE) -C opendaylight -f Makefile setup
-.PHONY: odl
-odl:
- #$(MAKE) -C opendaylight -f Makefile
-
.PHONY: build-clean $(SUBCLEAN)
build-clean: $(SUBCLEAN)
$(MAKE) -C patch-packages -f Makefile clean
@@ -146,7 +139,6 @@ clean: clean-cache prepare $(SUBCLEAN)
@rm -f *.iso
@rm -Rf release
@rm -Rf newiso
- @rm -f f_odl
@rm -f $(NEWISO)
@rm -f $(BUILD_BASE)/.versions
@@ -156,6 +148,6 @@ $(SUBCLEAN): %.clean:
# Todo: Make things smarter - we shouldn't need to clean everything
# betwen make invocations.
.PHONY: iso
-iso: prepare build-clean odl $(ISOCACHE) $(SUBDIRS) patch-packages
+iso: prepare build-clean $(ISOCACHE) $(SUBDIRS) patch-packages
install/install.sh iso $(ISOCACHE) $(NEWISO) $(PRODNO) $(REVSTATE)
@printf "\n\nProduct ISO is $(NEWISO)\n\n"
diff --git a/fuel/build/docker/ubuntu-builder/Dockerfile b/fuel/build/docker/ubuntu-builder/Dockerfile
index b4e1b4e..76fe401 100644
--- a/fuel/build/docker/ubuntu-builder/Dockerfile
+++ b/fuel/build/docker/ubuntu-builder/Dockerfile
@@ -24,8 +24,5 @@ ADD ./setcontext /root/setcontext
RUN chmod +x /root/setcontext
ADD ./enable_dockerx2 /root/enable_dockerx2
RUN chmod +x /root/enable_dockerx2
-ADD ./install.sh /root/install.sh
-RUN chmod +x /root/install.sh
-RUN /root/install.sh
VOLUME /var/lib/docker
diff --git a/fuel/build/docker/ubuntu-builder/install.sh b/fuel/build/docker/ubuntu-builder/install.sh
deleted file mode 100755
index df1af72..0000000
--- a/fuel/build/docker/ubuntu-builder/install.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-#Install Oracle Java 7 jdk
-echo "Installing JAVA 7"
-apt-get update
-add-apt-repository ppa:webupd8team/java -y
-apt-get update
-echo debconf shared/accepted-oracle-license-v1-1 select true | debconf-set-selections
-apt-get install oracle-java7-installer -y
-
-#Install Maven 3
-echo deb http://ppa.launchpad.net/natecarlson/maven3/ubuntu precise main >> /etc/apt/sources.list
-echo deb-src http://ppa.launchpad.net/natecarlson/maven3/ubuntu precise main >> /etc/apt/sources.list
-apt-get update || exit 1
-sudo apt-get install -y --force-yes maven3 || exit 1
-ln -s /usr/share/maven3/bin/mvn /usr/bin/mvn
diff --git a/fuel/build/f_odl_docker/Makefile b/fuel/build/f_odl_docker/Makefile
deleted file mode 100755
index 6135e71..0000000
--- a/fuel/build/f_odl_docker/Makefile
+++ /dev/null
@@ -1,51 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-TOP := $(shell pwd)
-BUILDTAG := loving_daniel
-
-# Edit this to match the GENESIS / OPNFV in your environment
-export OPNFV_PUPPET := $(BUILD_BASE)/../../common/puppet-opnfv
-include ../config.mk
-
-.PHONY: all
-all:
- @mkdir -p puppet/modules/opnfv/odl_docker
- @rm -rf tmp
- @mkdir -p tmp
- @cp -Rvp ${OPNFV_PUPPET}/manifests/templates/dockerfile tmp/.
- @docker build -t ${BUILDTAG} tmp/dockerfile/.
- @docker save ${BUILDTAG} > puppet/modules/opnfv/odl_docker/odl_docker_image.tar
- @wget ${DOCKER_REPO}/${DOCKER_TAG} -O puppet/modules/opnfv/odl_docker/docker-latest
- @echo "OPFNV_PUPPET is: ${OPNFV_PUPPET}"
- @cp -Rvp ${OPNFV_PUPPET}/manifests/templates/dockerfile/container_scripts puppet/modules/opnfv
-
-.PHONY: clean
-clean:
- @rm -rf tmp
- @rm -rf release
-
-.PHONY: build-clean
-build-clean:
- @rm -rf tmp
- @rm -rf release
- @rm -rf puppet/modules/opnfv/odl_docker/odl_docker_image.tar
- @rm -rf puppet/modules/opnfv/odl_docker/docker-latest
-
-.PHONY: validate-cache
-validate-cache:
- @echo "No cache validation schema available for $(shell pwd)"
- @echo "Continuing ..."
-
-.PHONY: release
-release:
- # Fetch PP from OPNFV Common
- @cp -Rvp ${OPNFV_PUPPET}/manifests/odl_docker.pp ${PUPPET_DEST}
- @cp -Rvp puppet/modules/* $(PUPPET_DEST)
diff --git a/fuel/build/f_odl_docker/dockerfile/Dockerfile b/fuel/build/f_odl_docker/dockerfile/Dockerfile
deleted file mode 100755
index e3c7ee5..0000000
--- a/fuel/build/f_odl_docker/dockerfile/Dockerfile
+++ /dev/null
@@ -1,72 +0,0 @@
-####################################################################
-#
-# Dockerfile to build a ODL (Karaf) Docker Container
-#
-# Copyright daniel.smith@ericsson.com
-# License: Apache GPL
-#
-####################################################################
-
-
-#Set the base image - note: the current release of Karaf is using Jdk7 and alot of 12.04, so we will use it rather than 14.04 and backport a ton of stuff
-FROM ubuntu:12.04
-
-# Maintainer Info
-MAINTAINER Daniel Smith
-
-#Run apt-get update one start just to check for updates when building
-RUN echo "Updating APT"
-RUN apt-get update
-RUN echo "Adding wget"
-RUN apt-get install -y wget
-RUN apt-get install -y net-tools
-RUN apt-get install -y openjdk-7-jre
-RUN apt-get install -y openjdk-7-jdk
-RUN apt-get install -y openssh-server
-RUN apt-get install -y vim
-RUN apt-get install -y expect
-RUN apt-get install -y daemontools
-RUN mkdir -p /opt/odl_source
-RUN bash -c 'echo "export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64" >> ~/.bashrc'
-
-
-#Now lets got and fetch the ODL distribution
-RUN echo "Fetching ODL"
-RUN wget https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.3-Helium-SR3/distribution-karaf-0.2.3-Helium-SR3.tar.gz -O /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz
-
-RUN echo "Untarring ODL inplace"
-RUN mkdir -p /opt/odl
-RUN tar zxvf /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz -C /opt/odl
-
-RUN echo "Installing DLUX and other features into ODL"
-COPY tmp/dockerfile/container_scripts/start_odl_docker.sh /etc/init.d/start_odl_docker.sh
-COPY tmp/dockerfile/container_scripts/speak.sh /etc/init.d/speak.sh
-RUN chmod 777 /etc/init.d/start_odl_docker.sh
-RUN chmod 777 /etc/init.d/speak.sh
-
-
-# Expose the ports
-# PORTS FOR BASE SYSTEM AND DLUX
-EXPOSE 8101
-EXPOSE 6633
-EXPOSE 1099
-EXPOSE 43506
-EXPOSE 8181
-EXPOSE 8185
-EXPOSE 9000
-EXPOSE 39378
-EXPOSE 33714
-EXPOSE 44444
-EXPOSE 6653
-
-# PORTS FOR OVSDB AND ODL CONTROL
-EXPOSE 12001
-EXPOSE 6640
-EXPOSE 8080
-EXPOSE 7800
-EXPOSE 55130
-EXPOSE 52150
-EXPOSE 36826
-
-# set the ENTRYPOINT - An entry point allows us to run this container as an exectuable
-CMD ["/etc/init.d/start_odl_docker.sh"]
diff --git a/fuel/build/f_odl_docker/dockerfile/container_scripts/check_feature.sh b/fuel/build/f_odl_docker/dockerfile/container_scripts/check_feature.sh
deleted file mode 100755
index 3e5d0b2..0000000
--- a/fuel/build/f_odl_docker/dockerfile/container_scripts/check_feature.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/usr/bin/expect
-spawn /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/client
-expect "root>"
-send "feature:list | grep -i odl-restconf\r"
-send "\r\r\r"
-expect "root>"
-send "logout\r"
-
diff --git a/fuel/build/f_odl_docker/dockerfile/container_scripts/speak.sh b/fuel/build/f_odl_docker/dockerfile/container_scripts/speak.sh
deleted file mode 100755
index 3ba07a8..0000000
--- a/fuel/build/f_odl_docker/dockerfile/container_scripts/speak.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/expect
-# Ericsson Research Canada
-#
-# Author: Daniel Smith <daniel.smith@ericsson.com>
-#
-# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB
-#
-# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY
-# DEPRECATED AFTER ARNO
-
-spawn /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/client
-expect "root>"
-send "feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core"
-send "\r\r\r"
-expect "root>"
-send "logout\r"
-
diff --git a/fuel/build/f_odl_docker/dockerfile/container_scripts/start_odl_docker.sh b/fuel/build/f_odl_docker/dockerfile/container_scripts/start_odl_docker.sh
deleted file mode 100755
index 1c72dda..0000000
--- a/fuel/build/f_odl_docker/dockerfile/container_scripts/start_odl_docker.sh
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/bash
-# Ericsson Research Canada
-#
-# Author: Daniel Smith <daniel.smith@ericsson.com>
-#
-# Start up script for calling karaf / ODL inside a docker container.
-#
-# This script will also call a couple expect scripts to load the feature set that we want
-
-
-#ENV
-export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
-
-#MAIN
-echo "Starting up the da Sheilds..."
-/opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/karaf server &
-echo "Sleeping 5 bad hack"
-sleep 10
-echo "should see stuff listening now"
-netstat -na
-echo " should see proess running for karaf"
-ps -efa
-echo " Starting the packages we want"
-/etc/init.d/speak.sh
-echo "Printout the status - if its right, you should see 8181 appear now"
-netstat -na
-ps -efa
-
-
-
-## This is a loop that keeps our container going currently, prinout the "status of karaf" to the docker logs every minute
-## Cheap - but effective
-while true;
-do
- echo "Checking status of ODL:"
- /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/status
- sleep 60
-done
diff --git a/fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp b/fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp
deleted file mode 100644
index c286127..0000000
--- a/fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp
+++ /dev/null
@@ -1,77 +0,0 @@
-class opnfv::odl_docker
-{
- case $::fuel_settings['role'] {
- /controller/: {
-
- file { '/opt':
- ensure => 'directory',
- }
-
- file { '/opt/opnfv':
- ensure => 'directory',
- owner => 'root',
- group => 'root',
- mode => 777,
- }
-
- file { '/opt/opnfv/odl':
- ensure => 'directory',
- }
-
- file { '/opt/opnfv/odl/odl_docker_image.tar':
- ensure => present,
- source => '/etc/puppet/modules/opnfv/odl_docker/odl_docker_image.tar',
- mode => 750,
- }
-
- file { '/opt/opnfv/odl/docker-latest':
- ensure => present,
- source => '/etc/puppet/modules/opnfv/odl_docker/docker-latest',
- mode => 750,
- }
-
- file { '/opt/opnfv/odl/start_odl_conatiner.sh':
- ensure => present,
- source => '/etc/puppet/modules/opnfv/scripts/start_odl_container.sh',
- mode => 750,
- }
- file { '/opt/opnfv/odl/stage_odl.sh':
- ensure => present,
- source => '/etc/puppet/modules/opnfv/scripts/stage_odl.sh',
- mode => 750,
- }
- file { '/opt/opnfv/odl/config_net_odl.sh':
- ensure => present,
- source => '/etc/puppet/modules/opnfv/scripts/config_net_odl.sh',
- mode => 750,
- }
- file { '/opt/opnfv/odl/change.sh':
- ensure => present,
- source => '/etc/puppet/modules/opnfv/scripts/change.sh',
- mode => 750,
- }
-
-
- # fix failed to find the cgroup root issue
- # https://github.com/docker/docker/issues/8791
- case $::operatingsystem {
- 'ubuntu': {
- package {'cgroup-lite':
- ensure => present,
- }
-
- service {'cgroup-lite':
- ensure => running,
- enable => true,
- require => Package['cgroup-lite'],
- }
- }
- 'centos': {
- package {'docker-io':
- ensure => latest,
- }
- }
- }
- }
- }
-}
diff --git a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/change.sh b/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/change.sh
deleted file mode 100644
index f7f3d6e..0000000
--- a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/change.sh
+++ /dev/null
@@ -1,219 +0,0 @@
-#!/bin/bash
-# script to remove bridges and reset networking for ODL
-
-
-#VARS
-MODE=0
-DNS=8.8.8.8
-
-#ENV
-source ~/openrc
-
-# GET IPS for that node
-function get_ips {
- BR_MGMT=`grep address /etc/network/ifcfg_backup/ifcfg-br-mgmt | awk -F" " '{print $2}'`
- BR_STORAGE=`grep address /etc/network/ifcfg_backup/ifcfg-br-storage | awk -F" " '{print $2}'`
- BR_FW_ADMIN=`grep address /etc/network/ifcfg_backup/ifcfg-br-fw-admin | awk -F" " '{print $2}'`
- BR_EX=`grep address /etc/network/ifcfg_backup/ifcfg-br-ex | awk -F" " '{print $2}'`
- DEF_NETMASK=255.255.255.0
- DEF_GW=172.30.9.1
-}
-
-function backup_ifcfg {
- echo " backing up "
- mkdir -p /etc/network/ifcfg_backup
- mv /etc/network/interfaces.d/ifcfg-br-ex /etc/network/ifcfg_backup/.
- mv /etc/network/interfaces.d/ifcfg-br-fw-admin /etc/network/ifcfg_backup/.
- mv /etc/network/interfaces.d/ifcfg-br-mgmt /etc/network/ifcfg_backup/.
- mv /etc/network/interfaces.d/ifcfg-br-storage /etc/network/ifcfg_backup/.
- mv /etc/network/interfaces.d/ifcfg-br-prv /etc/network/ifcfg_backup/.
- mv /etc/network/interfaces.d/ifcfg-eth0 /etc/network/ifcfg_backup/.
- mv /etc/network/interfaces.d/ifcfg-eth1 /etc/network/ifcfg_backup/.
- rm -rf /etc/network/interfaces.d/ifcfg-eth1.300
- rm -rf /etc/network/interfaces.d/ifcfg-eth1.301
- rm -rf /etc/network/interfaces.d/ifcfg-eth1
- rm -rf /etc/network/interfaces.d/ifcfg-eth0
-
-}
-
-
-function create_ifcfg_br_mgmt {
- echo "migrating br_mgmt"
- echo "auto eth1.300" >> /etc/network/interfaces.d/ifcfg-eth1.300
- echo "iface eth1.300 inet static" >> /etc/network/interfaces.d/ifcfg-eth1.300
- echo " address $BR_MGMT" >> /etc/network/interfaces.d/ifcfg-eth1.300
- echo " netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1.300
-}
-
-function create_ifcfg_br_storage {
- echo "migration br_storage"
- echo "auto eth1.301" >> /etc/network/interfaces.d/ifcfg-eth1.301
- echo "iface eth1.301 inet static" >> /etc/network/interfaces.d/ifcfg-eth1.301
- echo " address $BR_STORAGE" >> /etc/network/interfaces.d/ifcfg-eth1.301
- echo " netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1.301
-}
-
-function create_ifcfg_br_fw_admin {
- echo " migratinng br_fw_admin"
- echo "auto eth1" >> /etc/network/interfaces.d/ifcfg-eth1
- echo "iface eth1 inet static" >> /etc/network/interfaces.d/ifcfg-eth1
- echo " address $BR_FW_ADMIN" >> /etc/network/interfaces.d/ifcfg-eth1
- echo " netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1
-}
-
-function create_ifcfg_eth0 {
- echo "migratinng br-ex to eth0 - temporarily"
- echo "auto eth0" >> /etc/network/interfaces.d/ifcfg-eth0
- echo "iface eth0 inet static" >> /etc/network/interfaces.d/ifcfg-eth0
- echo " address $BR_EX" >> /etc/network/interfaces.d/ifcfg-eth0
- echo " netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth0
- echo " gateway $DEF_GW" >> /etc/network/interfaces.d/ifcfg-eth0
-}
-
-function set_mode {
- if [ -d "/var/lib/glance/images" ]
- then
- echo " controller "
- MODE=0
- else
- echo " compute "
- MODE=1
- fi
-}
-
-
-function stop_ovs {
- echo "Stopping OpenVSwitch"
- service openvswitch-switch stop
-
-}
-
-function start_ovs {
- echo "Starting OVS"
- service openvswitch-switch start
- ovs-vsctl show
-}
-
-
-function clean_ovs {
- echo "cleaning OVS DB"
- stop_ovs
- rm -rf /var/log/openvswitch/*
- mkdir -p /opt/opnfv/odl/ovs_back
- cp -pr /etc/openvswitch/* /opt/opnfv/odl/ovs_back/.
- rm -rf /etc/openvswitch/conf.db
- echo "restarting OVS - you should see Nothing there"
- start_ovs
-}
-
-
-
-function reboot_me {
- reboot
-}
-
-function allow_challenge {
- sed -i -e 's/ChallengeResponseAuthentication no/ChallengeResponseAuthentication yes/g' /etc/ssh/sshd_config
- service ssh restart
-}
-
-function clean_neutron {
- subnets=( `neutron subnet-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
- networks=( `neutron net-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
- ports=( `neutron port-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
- routers=( `neutron router-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` )
-
- #display all elements
- echo "SUBNETS: ${subnets[@]} "
- echo "NETWORKS: ${networks[@]} "
- echo "PORTS: ${ports[@]} "
- echo "ROUTERS: ${routers[@]} "
-
-
- # get port and subnet for each router
- for i in "${routers[@]}"
- do
- routerport=( `neutron router-port-list $i | awk -F" " '{print $2}' | grep -v id | sed '/^$/d' `)
- routersnet=( `neutron router-port-list $i | awk -F" " '{print $8}' | grep -v fixed | sed '/^$/d' | sed 's/,$//' | sed -e 's/^"//' -e 's/"$//' `)
- done
-
- echo "ROUTER PORTS: ${routerport[@]} "
- echo "ROUTER SUBNET: ${routersnet[@]} "
-
- #remove router subnets
- echo "router-interface-delete"
- for i in "${routersnet[@]}"
- do
- neutron router-interface-delete ${routers[0]} $i
- done
-
- #remove subnets
- echo "subnet-delete"
- for i in "${subnets[@]}"
- do
- neutron subnet-delete $i
- done
-
- #remove nets
- echo "net-delete"
- for i in "${networks[@]}"
- do
- neutron net-delete $i
- done
-
- #remove routers
- echo "router-delete"
- for i in "${routers[@]}"
- do
- neutron router-delete $i
- done
-
- #remove ports
- echo "port-delete"
- for i in "${ports[@]}"
- do
- neutron port-delete $i
- done
-
- #remove subnets
- echo "subnet-delete second pass"
- for i in "${subnets[@]}"
- do
- neutron subnet-delete $i
- done
-
-}
-
-function set_dns {
- sed -i -e 's/nameserver 10.20.0.2/nameserver $DNS/g' /etc/resolv.conf
-}
-
-
-#OUTPUT
-
-function check {
- echo $BR_MGMT
- echo $BR_STORAGE
- echo $BR_FW_ADMIN
- echo $BR_EX
-}
-
-### MAIN
-
-
-set_mode
-backup_ifcfg
-get_ips
-create_ifcfg_br_mgmt
-create_ifcfg_br_storage
-create_ifcfg_br_fw_admin
-if [ $MODE == "0" ]
-then
- create_ifcfg_eth0
-fi
-allow_challenge
-clean_ovs
-check
-reboot_me
-
-
diff --git a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh b/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh
deleted file mode 100755
index 145da80..0000000
--- a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh
+++ /dev/null
@@ -1,192 +0,0 @@
-#!/bin/bash
-#
-# Author: Daniel Smith (Ericsson)
-#
-# Script to update neutron configuration for OVSDB/ODL integratino
-#
-# Usage - Set / pass CONTROL_HOST to your needs
-#
-### SET THIS VALUE TO MATCH YOUR SYSTEM
-CONTROL_HOST=192.168.0.2
-BR_EX_IP=172.30.9.70
-
-# ENV
-source ~/openrc
-# VARS
-ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
-MODE=0
-
-
-# FUNCTIONS
-# Update ml2_conf.ini
-function update_ml2conf {
- echo "Backing up and modifying ml2_conf.ini"
- cp $ML2_CONF $ML2_CONF.bak
- sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
- sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF
- sed -i -e 's/bridge_mappings=physnet2:br-prv/bridge_mappings=physnet1:br-ex/g' $ML2_CONF
- echo "[ml2_odl]" >> $ML2_CONF
- echo "password = admin" >> $ML2_CONF
- echo "username = admin" >> $ML2_CONF
- echo "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF
-}
-
-function reset_neutrondb {
- echo "Reseting DB"
- mysql -e "drop database if exists neutron_ml2;"
- mysql -e "create database neutron_ml2 character set utf8;"
- mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';"
- neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
-}
-
-function restart_neutron {
- echo "Restarting Neutron Server"
- service neutron-server restart
- echo "Should see Neutron runing now"
- service neutron-server status
- echo "Shouldnt be any nets, but should work (return empty)"
- neutron net-list
-}
-
-function stop_neutron {
- echo "Stopping Neutron / OVS components"
- service neutron-plugin-openvswitch-agent stop
- if [ $MODE == "0" ]
- then
- service neutron-server stop
- fi
-}
-
-function disable_agent {
- echo "Disabling Neutron Plugin Agents from running"
- service neutron-plugin-openvswitch-agent stop
- echo 'manual' > /etc/init/neutron-plugin-openvswitch-agent.override
-}
-
-
-
-function verify_ML2_working {
- echo "checking that we can talk via ML2 properly"
- curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2
- if grep "network" /tmp/check_ml2
- then
- echo "Success - ML2 to ODL is working"
- else
- echo "im sorry Jim, but its dead"
- fi
-
-}
-
-
-function set_mode {
- if [ -d "/var/lib/glance/images" ]
- then
- echo "Controller Mode"
- MODE=0
- else
- echo "Compute Mode"
- MODE=1
- fi
-}
-
-function stop_ovs {
- echo "Stopping OpenVSwitch"
- service openvswitch-switch stop
-
-}
-
-function start_ovs {
- echo "Starting OVS"
- service openvswitch-vswitch start
- ovs-vsctl show
-}
-
-
-function control_setup {
- echo "Modifying Controller"
- stop_neutron
- stop_ovs
- disable_agent
- rm -rf /var/log/openvswitch/*
- mkdir -p /opt/opnfv/odl/ovs_back
- mv /etc/openvswitch/conf.db /opt/opnfv/odl/ovs_back/.
- mv /etc/openvswitch/.conf*lock* /opt/opnfv/odl/ovs_back/.
- rm -rf /etc/openvswitch/conf.db
- rm -rf /etc/openvswitch/.conf*
- service openvswitch-switch start
- ovs-vsctl add-br br-ex
- ovs-vsctl add-port br-ex eth0
- ovs-vsctl set interface br-ex type=external
- ifconfig br-ex 172.30.9.70/24 up
- service neutron-server restart
-
- echo "setting up networks"
- ip link add link eth1 name br-mgmt type vlan id 300
- ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24 up arp
- ip link add link eth1 name br-storage type vlan id 301
- ip link add link eth1 name br-prv type vlan id 1000
- ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24 up arp
- ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-admin | awk -F" " '{print $2}'`/24 up arp
-
- echo "Setting ODL Manager IP"
- ovs-vsctl set-manager tcp:192.168.0.2:6640
-
- echo "Verifying ODL ML2 plugin is working"
- verify_ML2_working
-
- # BAD HACK - Should be parameterized - this is to catch up
- route add default gw 172.30.9.1
-
-}
-
-function clean_ovs {
- echo "cleaning OVS DB"
- stop_ovs
- rm -rf /var/log/openvswitch/*
- mkdir -p /opt/opnfv/odl/ovs_back
- cp -pr /etc/openvswitch/* /opt/opnfv/odl/ovs_back/.
- rm -rf /etc/openvswitch/conf.db
- echo "restarting OVS - you should see Nothing there"
- start_ovs
-}
-
-function compute_setup {
- echo "Modifying Compute"
- echo "Disabling neutron openvswitch plugin"
- stop_neutron
- disable_agent
- ip link add link eth1 name br-mgmt type vlan id 300
- ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24 up arp
- ip link add link eth1 name br-storage type vlan id 301
- ip link add link eth1 name br-prv type vlan id 1000
- ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24 up arp
- ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-admin | awk -F" " '{print $2}'`/24 up arp
-
- echo "set manager, and route for ODL controller"
- ovs-vsctl set-manager tcp:192.168.0.2:6640
- route add 172.17.0.1 gw 192.168.0.2
- verify_ML2_working
-}
-
-
-# MAIN
-echo "Starting to make call"
-update_ml2conf
-echo "Check Mode"
-set_mode
-
-if [ $MODE == "0" ];
-then
- echo "Calling control setup"
- control_setup
-elif [ $MODE == "1" ];
-then
- echo "Calling compute setup"
- compute_setup
-
-else
- echo "Something is bad - call for help"
- exit
-fi
-
-
diff --git a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh b/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh
deleted file mode 100755
index fa14b47..0000000
--- a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/bash
-# Author: Daniel Smith (Ericsson)
-# Stages ODL Controlleer
-# Inputs: odl_docker_image.tar
-# Usage: ./stage_odl.sh
-
-# ENVS
-source ~/.bashrc
-source ~/openrc
-
-LOCALPATH=/opt/opnfv/odl
-DOCKERBIN=docker-latest
-ODLIMGNAME=odl_docker_image.tar
-DNS=8.8.8.8
-HOST_IP=`ifconfig br-ex | grep -i "inet addr" | awk -F":" '{print $2}' | awk -F" " '{print $1}'`
-
-
-
-# DEBUG ECHOS
-echo $LOCALPATH
-echo $DOCKERBIN
-echo $ODLIMGNAME
-echo $DNS
-echo $HOST_IP
-
-
-# Set DNS to someting external and default GW - ODL requires a connection to the internet
-sed -i -e 's/nameserver 10.20.0.2/nameserver 8.8.8.8/g' /etc/resolv.conf
-route delete default gw 10.20.0.2
-route add default gw 172.30.9.1
-
-# Start Docker daemon and in background
-echo "Starting Docker"
-chmod +x $LOCALPATH/$DOCKERBIN
-$LOCALPATH/$DOCKERBIN -d &
-#courtesy sleep for virtual env
-sleep 2
-
-# Import the ODL Container
-echo "Importing ODL Container"
-$LOCALPATH/$DOCKERBIN load -i $LOCALPATH/$ODLIMGNAME
-
-# Start ODL, load DLUX and OVSDB modules
-echo "Removing any old install found - file not found is ok here"
-$LOCALPATH/$DOCKERBIN rm odl_docker
-echo "Starting up ODL controller in Daemon mode - no shell possible"
-$LOCALPATH/$DOCKERBIN run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -d -t loving_daniel
-
-# Following, you should see the docker ps listed and a port opened
-echo " you should reach ODL controller at http://HOST_IP:8181/dlux/index.html"
-$LOCALPATH/$DOCKERBINNAME ps -a
-netstat -lnt
-
-
diff --git a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh b/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh
deleted file mode 100755
index 347ac74..0000000
--- a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/bash
-# Ericsson Canada Inc.
-# Authoer: Daniel Smith
-#
-# A helper script to install and setup the ODL docker conatiner on the controller
-#
-#
-# Inputs: odl_docker_image.tar
-#
-# Usage: ./start_odl_docker.sh
-echo "DEPRECATED - USE stage_odl.sh instead - this will be removed shortly once automated deployment is working - SR1"
-
-
-# ENVS
-source ~/.bashrc
-source ~/openrc
-
-# VARS
-
-# Switch for Dev mode - uses apt-get on control to cheat and get docker installed locally rather than from puppet source
-
-DEV=1
-
-# Switch for 1:1 port mapping of EXPOSED ports in Docker to the host, if set to 0, then random ports will be used - NOTE: this doesnt work for all web services X port on Host --> Y port in Container,
-# especially for SSL/HTTPS cases. Be aware.
-
-MATCH_PORT=1
-
-LOCALPATH=/opt/opnfv/odl
-DOCKERBINNAME=docker-latest
-DOCKERIMAGENAME=odl_docker_image.tar
-DNS=8.8.8.8
-HOST_IP=`ifconfig br-fw-admin | grep -i "inet addr" | awk -F":" '{print $2}' | awk -F" " '{print $1}'`
-
-
-# Set this to "1" if you want to have your docker container startup into a shell
-
-
-ENABLE_SHELL=1
-
-
-echo " Fetching Docker "
-if [ "$DEV" -eq "1" ];
-# If testing Locally (on a control node) you can set DEV=1 to enable apt-get based install on the control node (not desired target, but good for testing).
-then
- echo "Dev Mode - Fetching from Internet";
- echo " this wont work in production builds";
- apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
- mkdir -p $LOCALPATH
- wget https://get.docker.com/builds/Linux/x86_64/docker-latest -O $LOCALPATH/$DOCKERBINNAME
- wget http://ftp.us.debian.org/debian/pool/main/d/docker.io/docker.io_1.3.3~dfsg1-2_amd64.deb
- chmod 777 $LOCALPATH/$DOCKERBINNAME
- echo "done ";
-else
- echo "Using Binaries delivered from Puppet"
- echo "Starting Docker in Daemon mode"
- chmod +x $LOCALPATH/$DOCKERBINNAME
- $LOCALPATH/$DOCKERBINNAME -d &
-
- # wait until docker will be fully initialized
- # before any further action against just started docker
- sleep 5
-fi
-
-
-# We need to perform some cleanup of the Openstack Environment
-echo "TODO -- This should be automated in the Fuel deployment at some point"
-echo "However, the timing should come after basic tests are running, since this "
-echo " part will remove the subnet router association that is deployed automativally"
-echo " via fuel. Refer to the ODL + Openstack Integration Page "
-
-# Import the ODL container into docker
-
-echo "Importing ODL container into docker"
-$LOCALPATH/$DOCKERBINNAME load -i $LOCALPATH/$DOCKERIMAGENAME
-
-echo " starting up ODL - DLUX and Mapping Ports"
-if [ "$MATCH_PORT" -eq "1" ]
-then
- echo "Starting up Docker..."
- $LOCALPATH/$DOCKERBINNAME rm odl_docker
-fi
-
-if [ "$ENABLE_SHELL" -eq "1" ];
-then
- echo "Starting Container in Interactive Mode (/bin/bash will be provided, you will need to run ./start_odl_docker.sh inside the container yourself)"
- $LOCALPATH/$DOCKERBINNAME run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -t loving_daniel /bin/bash
-else
- echo "Starting Conatiner in Daemon mode - no shell will be provided and docker attach will not provide shell)"
- $LOCALPATH/$DOCKERBINNAME run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -d -t loving_daniel
- echo "should see the process listed here in docker ps -a"
- $LOCALPATH/$DOCKERBINNAME ps -a;
- echo "Match Port enabled, you can reach the DLUX login at: "
- echo "http://$HOST_IP:8181/dlux.index.html"
-fi
diff --git a/fuel/build/f_odl_docker/scripts/config_net_odl.sh b/fuel/build/f_odl_docker/scripts/config_net_odl.sh
deleted file mode 100644
index d292acd..0000000
--- a/fuel/build/f_odl_docker/scripts/config_net_odl.sh
+++ /dev/null
@@ -1,164 +0,0 @@
-#!/bin/bash
-#
-# Author: Daniel Smith (Ericsson)
-#
-# Script to update neutron configuration for OVSDB/ODL integratino
-#
-# Usage - Set / pass CONTROL_HOST to your needs
-#
-CONTROL_HOST=172.30.9.70
-
-# ENV
-source ~/openrc
-
-# VARS
-ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
-MODE=0
-
-
-# FUNCTIONS
-
-# Update ml2_conf.ini
-function update_ml2conf {
- echo "Backing up and modifying ml2_conf.ini"
- cp $ML2_CONF $ML2_CONF.bak
- sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
- sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF
- cat "[ml2_odl]" >> $ML2_CONF
- cat "password = admin" >> $ML2_CONF
- cat "username = admin" >> $ML2_CONF
- cat "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF
-}
-
-function reset_neutrondb {
- echo "Reseting DB"
- mysql -e "drop database if exists neutron_ml2;"
- mysql -e "create database neutron_ml2 character set utf8;"
- mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';"
- neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
-}
-
-function restart_neutron {
- echo "Restarting Neutron Server"
- service neutron-server restart
- echo "Should see Neutron runing now"
- service neutron-server status
- echo "Shouldnt be any nets, but should work (return empty)"
- neutron net-list
-}
-
-function stop_neutron {
- echo "Stopping Neutron / OVS components"
- service neutron-plugin-openvswitch-agent stop
- if [ $MODE == "0" ]
- then
- service neutron-server stop
- fi
-}
-
-
-
-function verify_ML2_working {
- echo "checking that we can talk via ML2 properly"
- curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2
- if grep "network" /tmp/check_ml2
- then
- echo "Success - ML2 to ODL is working"
- else
- echo "im sorry Jim, but its dead"
- fi
-
-}
-
-
-function set_mode {
- if ls -l /var/lib/glance/images
- then
- echo "Controller Mode"
- MODE=0
- else
- echo "Compute Mode"
- MODE=1
- fi
-}
-
-function stop_ovs {
- echo "Stopping OpenVSwitch"
- service openvswitch-switch stop
-
-}
-
-function control_setup {
- echo "Modifying Controller"
- stop_neutron
- stop_ovs
- rm -rf /var/log/openvswitch/*
- mkdir -p /opt/opnfv/odl/ovs_back
- mv /etc/openvswitch/conf.db /opt/opnfv/odl/ovs_back/.
- mv /etc/openvswitch/.conf*lock* /opt/opnfv/odl/ovs_back/.
- service openvswitch-switch start
- ovs-vsctl set-manager tcp:172.30.9.70:6640
- ovs-vsctl add-br br-eth0
- ovs-vsctl add-br br-ex
- ovs-vsctl add-port br-eth0 eth0
- ovs-vsctl add-port br-eth0 br-eth0--br-ex
- ovs-vsctl add-port br-ex br-ex--br-eth0
- ovs-vsctl set interface br-ex--br-eth0 type=patch
- ovs-vsctl set interface br-eth0--br-ex type=patch
- ovs-vsctl set interface br-ex--br-eth0 options:peer=br-eth0--br-ex
- ovs-vsctl set interface br-eth0--br-ex options:peer=br-ex--br-eth0
- ifconfig br-ex 172.30.9.70/24 up
- service neutron-server restart
-
- echo "setting up networks"
- ip link add link eth1 name br-mgmt type vlan id 300
- ip link add link eth1 name br-storage type vlan id 301
- /etc/init.d/networking restart
-
-
- echo "Reset Neutron DB"
- #reset_neutrondb
- echo "Restarting Neutron Components"
- #restart_neutron
- echo "Verifying ODL ML2 plugin is working"
- verify_ML2_working
-
-}
-
-function compute_setup {
- echo "do compute stuff here"
- echo "stopping neutron openvswitch plugin"
- stop_neutron
- ip link add link eth1 name br-mgmt type vlan id 300
- ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24
- ip link add link eth1 name br-storage type vlan id 301
- ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24
- ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-mgmt | awk -F" " '{print $2}'`/24
- echo "set manager, and route for ODL controller"
- ovs-vsctl set-manager tcp:192.168.0.2:6640
- route add 172.17.0.1 gw 192.168.0.2
- verify_ML2_working
-}
-
-
-# MAIN
-echo "Starting to make call"
-update_ml2conf
-echo "Check Mode"
-set_mode
-
-if [ $MODE == "0" ];
-then
- echo "Calling control setup"
- control_setup
-elif [ $MODE == "1" ];
-then
- echo "Calling compute setup"
- compute_setup
-
-else
- echo "Something is bad - call for help"
- exit
-fi
-
-
diff --git a/fuel/build/f_odl_docker/scripts/config_neutron_for_odl.sh b/fuel/build/f_odl_docker/scripts/config_neutron_for_odl.sh
deleted file mode 100644
index 3b688ae..0000000
--- a/fuel/build/f_odl_docker/scripts/config_neutron_for_odl.sh
+++ /dev/null
@@ -1,146 +0,0 @@
-#!/bin/bash
-CONTROL_HOST=172.17.0.3
-
-# ENV
-source ~/openrc
-
-
-
-# VARS
-ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
-MODE=0
-
-
-# FUCNTIONS
-
-
-# Update ml2_conf.ini
-function update_ml2conf {
- echo "Backing up and modifying ml2_conf.ini"
- cp $ML2_CONF $ML2_CONF.bak
- sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
-#!/bin/bash
-CONTROL_HOST=172.17.0.3
-
-# ENV
-source ~/openrc
-
-
-
-# VARS
-ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
-MODE=0
-
-
-# FUCNTIONS
-
-
-# Update ml2_conf.ini
-function update_ml2conf {
- echo "Backing up and modifying ml2_conf.ini"
- cp $ML2_CONF $ML2_CONF.bak
- sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF
- sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF
- cat "[ml2_odl]" >> $ML2_CONF
- cat "password = admin" >> $ML2_CONF
- cat "username = admin" >> $ML2_CONF
- cat "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF
-}
-
-function reset_neutrondb {
- echo "Reseting DB"
- mysql -e "drop database if exists neutron_ml2;"
- mysql -e "create database neutron_ml2 character set utf8;"
- mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';"
- neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
-}
-
-function restart_neutron {
- echo "Restarting Neutron Server"
- service neutron-server restart
- echo "Should see Neutron runing now"
- service neutron-server status
- echo "Shouldnt be any nets, but should work (return empty)"
- neutron net-list
-}
-
-function stop_neutron {
- echo "Stopping Neutron / OVS components"
- service neutron-plugin-openvswitch-agent stop
- if [ $MODE == "0" ]
- then
- service neutron-server stop
- fi
-}
-
-
-
-function verify_ML2_working {
- echo "checking that we can talk via ML2 properly"
- curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2
- if grep "network" /tmp/check_ml2
- then
- echo "Success - ML2 to ODL is working"
- else
- echo "im sorry Jim, but its dead"
- fi
-
-}
-
-
-function set_mode {
- if df -k | grep glance
- then
- echo "Controller Mode"
- MODE=0
- else
- echo "Compute Mode"
- MODE=1
- fi
-}
-
-function stop_ovs {
- echo "Stopping OpenVSwitch"
- service openvswitch-switch stop
-
-}
-
-function control_setup {
- echo "do control stuff here"
- echo "Reset Neutron DB"
- #reset_neutrondb
- echo "Restarting Neutron Components"
- #restart_neutron
- echo "Verifying ODL ML2 plugin is working"
- verify_ML2_working
-
-}
-
-function compute_setup {
- echo "do compute stuff here"
- stop_neutron
- verify_ML2_working
-}
-
-
-# MAIN
-echo "Starting to make call"
-#update_ml2conf
-echo "Check Mode"
-set_mode
-
-if [ $MODE == "0" ];
-then
- echo "Calling control setup"
- control_setup
-elif [ $MODE == "1" ];
-then
- echo "Calling compute setup"
- compute_setup
-
-else
- echo "Something is bad - call for help"
- exit
-fi
-
-
diff --git a/fuel/build/f_odl_docker/scripts/prep_nets_for_odl.sh b/fuel/build/f_odl_docker/scripts/prep_nets_for_odl.sh
deleted file mode 100755
index dd4fc9f..0000000
--- a/fuel/build/f_odl_docker/scripts/prep_nets_for_odl.sh
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/bin/bash
-# a "cheat" way to install docker on the controller
-# can only be used if you have a connecting out to the internet
-
-# Usage: ./install_docker.sh <ip of default route to remove> <ip of default gw to add>
-
-OLDGW=$1
-#!/bin/bash
-# a "cheat" way to install docker on the controller
-# can only be used if you have a connecting out to the internet
-
-# Usage: ./install_docker.sh <ip of default route to remove> <ip of default gw to add>
-
-OLDGW=$1
-NEWGW=$2
-IMAGEPATH=/opt/opnfv
-IMAGENAME=odl_docker_image.tar
-SOURCES=/etc/apt/sources.list
-
-
-if [ "$#" -ne 2]; then
- echo "Two args not provided, will not touch networking"
-else
-
- # Fix routes
- echo "Fixing routes"
- #DEBUG
- netstat -rn
-
- echo "delete old def route"
- route delete default gw $1
- echo "adding new def route"
- route add default gw $2
-
- echo " you should see a good nslookup now"
- nslookup www.google.ca
-#!/bin/bash
-# a "cheat" way to install docker on the controller
-# can only be used if you have a connecting out to the internet
-
-# Usage: ./install_docker.sh <ip of default route to remove> <ip of default gw to add>
-
-OLDGW=$1
-NEWGW=$2
-IMAGEPATH=/opt/opnfv
-IMAGENAME=odl_docker_image.tar
-SOURCES=/etc/apt/sources.list
-
-
-if [ "$#" -ne 2]; then
- echo "Two args not provided, will not touch networking"
-else
-
- # Fix routes
- echo "Fixing routes"
- #DEBUG
- netstat -rn
-
- echo "delete old def route"
- route delete default gw $1
- echo "adding new def route"
- route add default gw $2
-
- echo " you should see a good nslookup now"
- nslookup www.google.ca
-fi
-
-
-if egrep "mirrors.txt" $SOURCES
-then
- echo "Sources was already updated, not touching"
-else
- echo "adding the closests mirrors and docker mirror to the mix"
- echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise main restricted universe multiverse" >> /etc/apt/sources.list
- echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-updates main restricted universe multiverse" >> /etc/apt/sources.list
- echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-backports main restricted universe multiverse" >> /etc/apt/sources.list
- echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-security main restricted universe multiverse" >> /etc/apt/sources.list
- apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
- echo "deb https://get.docker.com/ubuntu docker main " > /etc/apt/sources.list.d/docker.list
-fi
-
-echo "Updating"
-apt-get update
-echo "Installing Docker"
-apt-get install -y lxc-docker
-
-echo "Loading ODL Docker Image"
-docker load -i $IMAGEPATH/$IMAGENAME
-
-
diff --git a/fuel/build/f_odl_docker/scripts/setup_ovs_for_odl.sh b/fuel/build/f_odl_docker/scripts/setup_ovs_for_odl.sh
deleted file mode 100644
index 42c9451..0000000
--- a/fuel/build/f_odl_docker/scripts/setup_ovs_for_odl.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash
-
-
-
-ok .. so they created br-int
-
-so lets add a physical nic to it
-
-
-# First - Removal all the bridges you find
-
-for i in $(ovs-vsctl list-br)
-do
- if [ "$i" == "br-int" ];
- then
- echo "skipped br-int"
- elif [ "$i" == "br-prv"];
- then
- echo "skipped br-pr"
- else
- ovs-vsctl del-br $i
- fi
-done
diff --git a/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp b/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp
index 54f1c86..fabe30a 100644
--- a/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp
+++ b/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp
@@ -23,6 +23,4 @@ class opnfv {
include opnfv::ntp
# Make sure all added packages are installed
include opnfv::add_packages
- # Setup OpenDaylight
- include opnfv::odl_docker
}
diff --git a/fuel/build/opendaylight/Makefile b/fuel/build/opendaylight/Makefile
deleted file mode 100644
index bd2eeb5..0000000
--- a/fuel/build/opendaylight/Makefile
+++ /dev/null
@@ -1,102 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-############################################################################
-# BEGIN of variables to customize
-#
-SHELL = /bin/bash
-
-
-BUILD_DIR := $(shell pwd)
-GIT_DIR := /tmp
-export CONFIG_SPEC_SCRIPT
-export MAVEN_OPTS = -Xmx1024m -XX:MaxPermSize=512m
-MAINTAINER = "Main Tainer <main.tainer@example.org>"
-ODL_SHORT_NAME = odl
-ODL_VERSION = 0.1-1
-DEPEND = openjdk-8-jdk
-TARGET_BUILD_PATH="/tmp/controller/opendaylight/distribution/opendaylight-karaf/target/"
-MAVEN_SPEC = $(BUILD_DIR)/odl_maven/settings.xml
-
-#
-# END of variables to customize
-#############################################################################
-
-.PHONY: all
-all: odl
-
-############################################################################
-# BEGIN of Include definitions
-#
-include ../config.mk
-#
-# END Include definitions
-#############################################################################
-
-.PHONY: setup
-setup:
- rm -f "$(BUILD_BASE)/f_odl"
- ln -s "$(shell readlink -e $(BUILD_DIR))/f_odl" "$(shell readlink -e $(BUILD_BASE))/f_odl"
-
-.PHONY: validate-cache
-validate-cache:
- @REMOTE_ID=$(shell git ls-remote $(ODL_MAIN_REPO) $(ODL_MAIN_TAG)^{} | awk '{print $$(NF-1)}'); \
- if [ -z $$REMOTE_ID ] || [ $$REMOTE_ID = " " ]; \
- then \
- REMOTE_ID=$(shell git ls-remote $(ODL_MAIN_REPO) $(ODL_MAIN_TAG) | awk '{print $$(NF-1)}'); \
- fi; \
- if [ $$REMOTE_ID != $(shell cat $(VERSION_FILE) | grep odl | awk '{print $$NF}') ]; \
- then \
- echo "Cache does not match upstream OpenDaylight, cache must be rebuilt!"; \
- exit 1; \
- fi
-
-.PHONY: odl
-odl:
-
-ifeq ($(ODL_MAIN_REPO),)
- @echo "No config-spec target for ODL, nothing to build"
-else
-
-ifeq ($(shell if [ -e .odl-build.log ];then cat .odl-build.log; fi;),$(ODL_MAIN_TAG))
- @cd /tmp && git clone $(ODL_MAIN_REPO) && cd /tmp/controller && git checkout $(ODL_MAIN_TAG)
-
- @echo "ODL is up to date"
-else
- @if [ ! -d "/tmp/controller" ]; then\
- cd /tmp && git clone $(ODL_MAIN_REPO);\
- fi;
-
- @if [ "$(UNIT_TEST)" = "FALSE" ]; then\
- echo "Building ODL without unit test";\
- cd /tmp/controller &&\
- git checkout $(ODL_MAIN_TAG) &&\
- mvn -D maven.test.skip=true -gs $(MAVEN_SPEC) clean install;\
- else\
- echo "Building ODL with unit test";\
- cd /tmp/controller &&\
- git checkout $(ODL_MAIN_TAG) &&\
- mvn -gs $(MAVEN_SPEC) clean install;\
- fi;
-
- @echo "odl" `git -C /tmp/controller show | grep commit | head -1 | cut -d " " -f2` >> $(VERSION_FILE)
- @./make-odl-deb.sh -N $(ODL_SHORT_NAME)_`cd /tmp/controller; git rev-parse --short HEAD` -n $(ODL_SHORT_NAME) -v "$(ODL_VERSION)" -t "$(ODL_MAIN_TAG)" -m $(MAINTAINER) -d $(DEPEND) -p $(TARGET_BUILD_PATH)
- @echo $(ODL_MAIN_TAG) > .odl-build.log
-endif
-endif
-
-.PHONY: clean $(SUBCLEAN)
-clean: $(SUBCLEAN)
- @rm -Rf /tmp/controller
- @rm -f .odl-build.log
- @./make-odl-deb.sh -C
-
-.PHONY: release
-release:
diff --git a/fuel/build/opendaylight/README b/fuel/build/opendaylight/README
deleted file mode 100644
index 7aa392e..0000000
--- a/fuel/build/opendaylight/README
+++ /dev/null
@@ -1,52 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-This directory builds the OpenDaylight debian package by cloning the
-opendaylight.org repo, building the odl tag specified in
-"fuel-build/config-spec" and constructing a debian package source tree under
-"f_odl", which automatically is linked into "fuel_build/." for further build processing.
-
-The opendaylight has the following structure:
-.
-+--------+-------------+-----------+
-| | | |
-| Makefile make-odl-deb.sh README
-| (this file)
-|
-+----------+----------+
- | |
- odl_maven/ f_odl/
-
-Makefile:
-Invoked by the git root Makefile, it builds the clones the odl repo from
-odl, checkout the tag/branch indicated in "fuelbuild/config-spec", builds
-odl, and calls "make-odl-deb.sh" to create a debian package source tree.
-
-make-odl-deb.sh:
-Creates the odl debian package source tree in "f_odl" from the odl build
-results.
-
-odl_maven/:
-Contains needed control files for maven OpenDaylight build
-
-f_odl/:
-Contains buildscripts and the generated odl debian package source tree produced
-by the odl build (make-odl-deb.sh) which is later used by the root build system.
-
-NOTE on the controller/ git repo clone:
-The git controller repo clone Contains all artifacts from the odl build, it only
-exists in /tmp inside the build docker container and is not visible anywhere on
-the build host
-
-Note on build caching:
-The latest build results are cached, and will not be rebuilt unless the fuel-build/
-config-spec is changed in respect to odl version or if make clean is applied.
-./.odl-build.log and ./.odl-build.history are used to keep adequate bookmaking to
-track caching and needed cleanout.
diff --git a/fuel/build/opendaylight/f_odl/Makefile b/fuel/build/opendaylight/f_odl/Makefile
deleted file mode 100644
index f7ebd3e..0000000
--- a/fuel/build/opendaylight/f_odl/Makefile
+++ /dev/null
@@ -1,49 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-include $(BUILD_BASE)/config.mk
-ODL_NAME_SHORT := odl
-PACKAGE := odl_$(shell cd /tmp/controller; git rev-parse --short HEAD)
-VERSION := 0.1-1
-DEB_NAME := $(PACKAGE)_$(VERSION)
-
-.PHONY: all
-all: release/pool/main/$(DEB_NAME).deb
-
-release/pool/main/$(DEB_NAME).deb:
-ifeq ($(ODL_MAIN_REPO),)
- @echo "No config-spec target for ODL, nothing to build"
-else
- @mkdir -p tmp/src
- @mkdir -p release/pool/main
- @cp -rp package/$(DEB_NAME) tmp/src
- @gzip -f9 tmp/src/$(DEB_NAME)/usr/share/doc/$(ODL_NAME_SHORT)/changelog.Debian
- @fakeroot dpkg-deb --build tmp/src/$(DEB_NAME)
- @lintian tmp/src/$(DEB_NAME).deb
- @cp tmp/src/$(DEB_NAME).deb release/pool/main
-endif
-
-.PHONY: clean
-clean:
- @rm -rf tmp
- @rm -rf release
- @rm -f $(DEB_DEST)/$(DEB_NAME).deb
-
-.PHONY: validate-cache
-validate-cache:
- @echo "No cache validation schema available for $(shell pwd)"
- @echo "Continuing ..."
-
-.PHONY: release
-release:release/pool/main/$(DEB_NAME).deb
-ifneq ($(ODL_MAIN_REPO),)
- @cp release/pool/main/$(DEB_NAME).deb $(DEB_DEST)
- @cp -Rvp puppet/modules/* $(PUPPET_DEST)
-endif
diff --git a/fuel/build/opendaylight/f_odl/README b/fuel/build/opendaylight/f_odl/README
deleted file mode 100644
index 077962d..0000000
--- a/fuel/build/opendaylight/f_odl/README
+++ /dev/null
@@ -1,49 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-This directory adds the OpenDaylight (odl) package and related puppet
-deployment manifest such that it gets built into the .iso image an deployed
-on the stack controller cluster.
-
-The f_odl has the following structure:
-.
-+--------+----------+-----------+------------+
- | | | |
- puppet/ Makefile README odl_<change_id>
- | (this file) /<version>
- | |
- | odl deb pkg src
- modules/
- |
- |
- |
- opnfv/
- |
- |
- |
- manifests/
- |
- |
- |
- odl.pp
-
-Makefile:
-Invoked by the git root Makefile, it builds the odl debian package from the
-debian pkg source directory (inside this directory) and pushes it together
-with the manifests to the fuel build source artifact directory, such that it
-eventually gets built into the new fuel .iso
-
-odl.pp:
-Controls the installation and configuration of odl
-
-odl deb pkg src:
-Is the debian package source directory tree including all needed odl artifacts
-and debian pakage meta data. This debian source package tree is built from
-fuel-build/opendaylight and doesnt exist before it has bee built.
diff --git a/fuel/build/opendaylight/f_odl/puppet/modules/opnfv/manifests/odl.pp b/fuel/build/opendaylight/f_odl/puppet/modules/opnfv/manifests/odl.pp
deleted file mode 100644
index 6165646..0000000
--- a/fuel/build/opendaylight/f_odl/puppet/modules/opnfv/manifests/odl.pp
+++ /dev/null
@@ -1,13 +0,0 @@
-class opnfv::odl {
- if $::osfamily == 'Debian' {
-
-
- case $::fuel_settings['role'] {
- /controller/: {
- package { 'odl':
- ensure => installed,
- }
- }
- }
- }
-}
diff --git a/fuel/build/opendaylight/f_odl/testing/README b/fuel/build/opendaylight/f_odl/testing/README
deleted file mode 100644
index 2ef4976..0000000
--- a/fuel/build/opendaylight/f_odl/testing/README
+++ /dev/null
@@ -1,12 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-In order to test the functionality without performing a full deployment, run "puppet apply" on the
-fake_init.pp which will call only the opnfv::odl class.
diff --git a/fuel/build/opendaylight/f_odl/testing/fake_init.pp b/fuel/build/opendaylight/f_odl/testing/fake_init.pp
deleted file mode 100644
index 0600d2e..0000000
--- a/fuel/build/opendaylight/f_odl/testing/fake_init.pp
+++ /dev/null
@@ -1,13 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-$fuel_settings = parseyaml($astute_settings_yaml)
-
-include opnfv::odl
diff --git a/fuel/build/opendaylight/make-odl-deb.sh b/fuel/build/opendaylight/make-odl-deb.sh
deleted file mode 100755
index 5222087..0000000
--- a/fuel/build/opendaylight/make-odl-deb.sh
+++ /dev/null
@@ -1,314 +0,0 @@
-#!/bin/bash
-set -e
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-##############################################################################
-# Default variable declarations
-
-COMMAND=
-PACKAGE_NAME=
-PACKAGE_SHORT_NAME=
-PACKAGE_VERSION=
-TARGET_BUILD_PATH=
-DEPENDENCIES=
-MAINTAINER=
-ARCH="amd64"
-BUILD_HISTORY=".odl-build-history"
-
-##############################################################################
-# subroutine: usage
-# Description: Prints out usage of this script
-
-usage ()
-{
-cat <<EOF
-usage: $0 options
-
-$0 creates a ${PACKAGE_NAME} Debian package
-
-OPTIONS:
- -n Package shoer name
- -N Package name
- -v Version
- -t Tag
- -p Target build path, the path where the built tar ball is to be fetched
- -m Maintainer
- -d Package dependencies
- -h Prints this message
- -C Clean
-
-E.g.: $0 -n my/deb/src/dest/path -N my-package -v 1.0-1 -t myTag -p path/to/the/source -m "Main Tainer <main.tainer.exampe.org> -d myJavaDependence
-EOF
-}
-
-##############################################################################
-# subroutine: clean
-# Description: Cleans up all artifacts from earlier builds
-
-clean ()
-{
-if [ -e $BUILD_HISTORY ]; then
- while read line
- do
- rm -rf $line
- done < $BUILD_HISTORY
- rm ${BUILD_HISTORY}
- exit 0
-fi
-}
-
-##############################################################################
-# make-DEBIAN_control
-# Description: constructs the Debian pack control file
-
-make-DEBIAN_control ()
-{
-cat <<EOF
-Package: $PACKAGE_SHORT_NAME
-Version: $PACKAGE_VERSION
-Section: base
-Priority: optional
-Architecture: $ARCH
-Depends: $DEPENDENCIES
-Maintainer: $MAINTAINER
-Description: OpenDaylight deamon
- This is a daemon for the opendaylight/odl controller service.
-EOF
-}
-
-##############################################################################
-# subroutine: make-DEBIAN_conffiles
-# Description: Constructs the Debian package config files assignment
-
-make-DEBIAN_conffiles ()
-{
-cat <<EOF
-/etc/odl/etc/all.policy
-/etc/odl/etc/config.properties
-/etc/odl/etc/custom.properties
-/etc/odl/etc/distribution.info
-/etc/odl/etc/equinox-debug.properties
-/etc/odl/etc/java.util.logging.properties
-/etc/odl/etc/jmx.acl.cfg
-/etc/odl/etc/jmx.acl.java.lang.Memory.cfg
-/etc/odl/etc/jmx.acl.org.apache.karaf.bundle.cfg
-/etc/odl/etc/jmx.acl.org.apache.karaf.config.cfg
-/etc/odl/etc/jmx.acl.org.apache.karaf.security.jmx.cfg
-/etc/odl/etc/jmx.acl.osgi.compendium.cm.cfg
-/etc/odl/etc/jre.properties
-/etc/odl/etc/keys.properties
-/etc/odl/etc/org.apache.felix.fileinstall-deploy.cfg
-/etc/odl/etc/org.apache.karaf.command.acl.bundle.cfg
-/etc/odl/etc/org.apache.karaf.command.acl.config.cfg
-/etc/odl/etc/org.apache.karaf.command.acl.feature.cfg
-/etc/odl/etc/org.apache.karaf.command.acl.jaas.cfg
-/etc/odl/etc/org.apache.karaf.command.acl.kar.cfg
-/etc/odl/etc/org.apache.karaf.command.acl.shell.cfg
-/etc/odl/etc/org.apache.karaf.command.acl.system.cfg
-/etc/odl/etc/org.apache.karaf.features.cfg
-/etc/odl/etc/org.apache.karaf.features.obr.cfg
-/etc/odl/etc/org.apache.karaf.features.repos.cfg
-/etc/odl/etc/org.apache.karaf.jaas.cfg
-/etc/odl/etc/org.apache.karaf.kar.cfg
-/etc/odl/etc/org.apache.karaf.log.cfg
-/etc/odl/etc/org.apache.karaf.management.cfg
-/etc/odl/etc/org.apache.karaf.shell.cfg
-/etc/odl/etc/org.ops4j.pax.logging.cfg
-/etc/odl/etc/org.ops4j.pax.url.mvn.cfg
-/etc/odl/etc/regions-config.xml
-/etc/odl/etc/shell.init.script
-/etc/odl/etc/startup.properties
-/etc/odl/etc/system.properties
-/etc/odl/etc/users.properties
-/etc/odl/configuration/context.xml
-/etc/odl/configuration/logback.xml
-/etc/odl/configuration/tomcat-logging.properties
-/etc/odl/configuration/tomcat-server.xml
-EOF
-}
-
-##############################################################################
-# subroutine: make-DEBIAN_postinst
-# Description: Constructs the Debian package post installation script
-
-make-DEBIAN_postinst ()
-{
-cat <<EOF
-#!/bin/bash -e
-ln -s /etc/${PACKAGE_SHORT_NAME}/* ${TARGET_INSTALL_PATH}
-echo "OpenDaylight $TAG version $PACKAGE_VERSION has been installed"
-EOF
-}
-
-##############################################################################
-# subroutine: make-DEBIAN_bin
-# Description: Constructs the bin script (normally under /usr/bin)
-
-make-DEBIAN_bin ()
-{
-cat <<EOF
-#!/bin/bash -e
-${TARGET_INSTALL_PATH}bin/karaf $@
-EOF
-}
-
-##############################################################################
-# subroutine: make-DEBIAN_copyright
-# Description: Constructs the copyright text (normally under /usr/share/doc...)
-
-make-DEBIAN_copyright ()
-{
-cat <<EOF
-OpenDaylight - an open source SDN controller
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-EOF
-}
-
-##############################################################################
-# subroutine: make-DEBIAN_changelog
-# Description: Constructs the changelog text (normally under /usr/share/doc...)
-
-make-DEBIAN_changelog ()
-{
-cat <<EOF
-$PACKAGE_SHORT_NAME ($PACKAGE_VERSION) precise-proposed; urgency=low
-
- * Derived from $PACKAGE_NAME $PACHAGE_VERSION
-
- -- $MAINTAINER $(date)
-EOF
-}
-
-##############################################################################
-# MAIN
-
-while getopts "N:n:v:d:Chm:t:p:" OPTION
-do
- case $OPTION in
- h)
- usage
- exit 0
- ;;
-
- N)
- PACKAGE_NAME=$OPTARG
- COMMAND+="-N ${PACKAGE_NAME} "
- ;;
-
- n)
- PACKAGE_SHORT_NAME=$OPTARG
- COMMAND+="-n ${PACKAGE_SHORT_NAME} "
- ;;
-
- v)
- PACKAGE_VERSION=$OPTARG
- COMMAND+="-v ${PACKAGE_VERSION} "
- ;;
-
- p)
- TARGET_BUILD_PATH=$OPTARG
- COMMAND+="-p ${TARGET_BUILD_PATH} "
- ;;
-
- t)
- TAG=$OPTARG
- COMMAND+="-t ${TAG} "
- ;;
-
- m)
- MAINTAINER=$OPTARG
- COMMAND+="-m ${MAINTAINER} "
- ;;
-
- d)
- DEPENDENCIES=$OPTARG
- COMMAND+="-d ${DEPENDENCIES} "
- ;;
-
- A)
- ARCH=$OPTARG
- COMMAND+="-A ${ARCH} "
- ;;
-
- C)
- COMMAND+="-C "
- clean
- exit 0
- ;;
- esac
-done
-
-# Constructing script variables
-DEB_PACK_BASE_PATH="f_${PACKAGE_SHORT_NAME}/package/${PACKAGE_NAME}_${PACKAGE_VERSION}"
-echo ${DEB_PACK_BASE_PATH} >> "$BUILD_HISTORY"
-TARGET_INSTALL_PATH="/usr/share/java/${PACKAGE_SHORT_NAME}/"
-DEB_PACK_CONTENT_PATH="${DEB_PACK_BASE_PATH}/usr/share/java/${PACKAGE_SHORT_NAME}/"
-DEB_PACK_CONFIG_PATH="${DEB_PACK_BASE_PATH}/etc/${PACKAGE_SHORT_NAME}"
-TARGET_TAR=$(ls ${TARGET_BUILD_PATH}*.tar.gz)
-TARGET_TAR="${TARGET_TAR##*/}"
-TAR_PATH="${TARGET_TAR%.*}"
-TAR_PATH="${TAR_PATH%.*}"
-if [ -e $DEB_PACK_BASE_PATH ]; then
- rm -R $DEB_PACK_BASE_PATH
-fi
-
-# Create Deb pack content and configuration
-mkdir -p ${DEB_PACK_CONTENT_PATH}
-cp ${TARGET_BUILD_PATH}${TARGET_TAR} ${DEB_PACK_CONTENT_PATH}
-tar -xzf ${DEB_PACK_CONTENT_PATH}${TARGET_TAR} -C ${DEB_PACK_CONTENT_PATH}
-rm ${DEB_PACK_CONTENT_PATH}${TARGET_TAR}
-mv ${DEB_PACK_CONTENT_PATH}${TAR_PATH}/* ${DEB_PACK_CONTENT_PATH}.
-rm -R ${DEB_PACK_CONTENT_PATH}${TAR_PATH}
-
-# Crate and populate Deb pack config target
-mkdir -p ${DEB_PACK_CONFIG_PATH}/etc
-mv ${DEB_PACK_CONTENT_PATH}etc/* ${DEB_PACK_CONFIG_PATH}/etc/
-rm -R ${DEB_PACK_CONTENT_PATH}etc
-mkdir -p ${DEB_PACK_CONFIG_PATH}/configuration
-mv ${DEB_PACK_CONTENT_PATH}configuration/* ${DEB_PACK_CONFIG_PATH}/configuration/
-rm -R ${DEB_PACK_CONTENT_PATH}configuration
-
-# Set package permisions
-find ${DEB_PACK_CONTENT_PATH} -type d -print -exec chmod 755 {} \;
-find ${DEB_PACK_CONFIG_PATH}/etc/ -type f -print -exec chmod 644 {} \;
-find ${DEB_PACK_CONFIG_PATH}/etc/ -type d -print -exec chmod 755 {} \;
-
-# Create package usr/bin odl script
-mkdir "${DEB_PACK_BASE_PATH}/usr/bin"
-chmod 755 "${DEB_PACK_BASE_PATH}/usr/bin"
-make-DEBIAN_bin > "${DEB_PACK_BASE_PATH}/usr/bin/odl"
-chmod 755 "${DEB_PACK_BASE_PATH}/usr/bin/odl"
-
-# Create Deb pack install meta-data
-mkdir "${DEB_PACK_BASE_PATH}/DEBIAN"
-make-DEBIAN_control > "${DEB_PACK_BASE_PATH}/DEBIAN/control"
-make-DEBIAN_conffiles > "${DEB_PACK_BASE_PATH}/DEBIAN/conffiles"
-mkdir -p "${DEB_PACK_BASE_PATH}/usr/share/doc/${PACKAGE_SHORT_NAME}"
-make-DEBIAN_copyright > "${DEB_PACK_BASE_PATH}/usr/share/doc/${PACKAGE_SHORT_NAME}/copyright"
-make-DEBIAN_changelog > "${DEB_PACK_BASE_PATH}/usr/share/doc/${PACKAGE_SHORT_NAME}/changelog.Debian"
-
-# Create Deb pack post install symlinks and usr/bin scripts
-make-DEBIAN_postinst > "${DEB_PACK_BASE_PATH}/DEBIAN/postinst"
-chmod 755 "${DEB_PACK_BASE_PATH}/DEBIAN/postinst"
-mkdir -p "${DEB_PACK_BASE_PATH}/usr/bin"
diff --git a/fuel/build/opendaylight/odl_maven/settings.xml b/fuel/build/opendaylight/odl_maven/settings.xml
deleted file mode 100644
index 35a4442..0000000
--- a/fuel/build/opendaylight/odl_maven/settings.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
-
- <profiles>
- <profile>
- <id>opendaylight-release</id>
- <repositories>
- <repository>
- <releases>
- <enabled>true</enabled>
- <updatePolicy>never</updatePolicy>
- </releases>
- <snapshots>
- <enabled>false</enabled>
- </snapshots>
- <id>opendaylight-mirror</id>
- <name>opendaylight-mirror</name>
- <url>http://nexus.opendaylight.org/content/groups/public/</url>
- </repository>
- </repositories>
- </profile>
-
- <profile>
- <id>opendaylight-snapshots</id>
- <repositories>
- <repository>
- <releases>
- <enabled>false</enabled>
- </releases>
- <snapshots>
- <enabled>true</enabled>
- </snapshots>
- <id>opendaylight-snapshot</id>
- <name>opendaylight-snapshot</name>
- <url>http://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
- </repository>
- </repositories>
- </profile>
- </profiles>
-
- <activeProfiles>
- <activeProfile>opendaylight-release</activeProfile>
- <activeProfile>opendaylight-snapshots</activeProfile>
- </activeProfiles>
-</settings>
diff --git a/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/fuel-master b/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/fuel-master
index 9ff8017..d2a7841 100644
--- a/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/fuel-master
+++ b/fuel/prototypes/auto-deploy/examples/libvirt/conf/vms/fuel-master
@@ -98,6 +98,5 @@
<address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
</memballoon>
</devices>
- <seclabel type='dynamic' model='apparmor' relabel='yes'/>
</domain>
diff --git a/opensteak/ci/build.sh b/opensteak/ci/build.sh
index e69de29..7a85332 100644
--- a/opensteak/ci/build.sh
+++ b/opensteak/ci/build.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+#placeholder
diff --git a/opensteak/ci/deploy.sh b/opensteak/ci/deploy.sh
index e69de29..bd6ff86 100644
--- a/opensteak/ci/deploy.sh
+++ b/opensteak/ci/deploy.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+
+# TODO: find a way to create the openvswitch bridge
+
+
+# This will create a Foreman Virtual Machine with KVM (libvirt)
+cd ../tools/
+sudo python3 create_foreman.py --config ../config/infra.yaml
+
+
diff --git a/opensteak/config/common.yaml b/opensteak/config/common.yaml
new file mode 100644
index 0000000..144e84f
--- /dev/null
+++ b/opensteak/config/common.yaml
@@ -0,0 +1,119 @@
+# common.yaml
+---
+
+###
+## OpenStack passwords
+###
+ceph_password: "password"
+admin_password: "password"
+mysql_service_password: "password"
+mysql_root_password: "password"
+rabbitmq_password: "password"
+glance_password: "password"
+nova_password: "password"
+neutron_shared_secret: "password"
+neutron_password: "password"
+cinder_password: "password"
+keystone_admin_token: "password"
+horizon_secret_key: "12345"
+
+domain: "infra.opensteak.fr"
+
+###
+## Class parameters
+###
+# Rabbit
+opensteak::rabbitmq::rabbitmq_password: "%{hiera('rabbitmq_password')}"
+
+# MySQL
+opensteak::mysql::root_password: "%{hiera('mysql_root_password')}"
+opensteak::mysql::mysql_password: "%{hiera('mysql_service_password')}"
+
+# Key
+opensteak::key::password: "%{hiera('admin_password')}"
+opensteak::key::stack_domain: "%{hiera('domain')}"
+
+# Keystone
+opensteak::keystone::mysql_password: "%{hiera('mysql_root_password')}"
+opensteak::keystone::rabbitmq_password: "%{hiera('rabbitmq_password')}"
+opensteak::keystone::keystone_token: "%{hiera('keystone_admin_token')}"
+opensteak::keystone::stack_domain: "%{hiera('domain')}"
+opensteak::keystone::admin_mail: "admin@opensteak.fr"
+opensteak::keystone::admin_password: "%{hiera('admin_password')}"
+opensteak::keystone::glance_password: "%{hiera('glance_password')}"
+opensteak::keystone::nova_password: "%{hiera('nova_password')}"
+opensteak::keystone::neutron_password: "%{hiera('neutron_password')}"
+opensteak::keystone::cinder_password: "%{hiera('cinder_password')}"
+
+# Glance
+opensteak::glance::mysql_password: "%{hiera('mysql_root_password')}"
+opensteak::glance::rabbitmq_password: "%{hiera('rabbitmq_password')}"
+opensteak::glance::stack_domain: "%{hiera('domain')}"
+opensteak::glance::glance_password: "%{hiera('glance_password')}"
+
+# Nova
+opensteak::nova::mysql_password: "%{hiera('mysql_root_password')}"
+opensteak::nova::rabbitmq_password: "%{hiera('rabbitmq_password')}"
+opensteak::nova::stack_domain: "%{hiera('domain')}"
+opensteak::nova::nova_password: "%{hiera('nova_password')}"
+opensteak::nova::neutron_password: "%{hiera('neutron_password')}"
+opensteak::nova::neutron_shared: "%{hiera('neutron_shared_secret')}"
+
+# Cinder
+opensteak::cinder::mysql_password: "%{hiera('mysql_root_password')}"
+opensteak::cinder::rabbitmq_password: "%{hiera('rabbitmq_password')}"
+opensteak::cinder::stack_domain: "%{hiera('domain')}"
+opensteak::cinder::nova_password: "%{hiera('cinder_password')}"
+
+# Compute
+opensteak::nova-compute::mysql_password: "%{hiera('mysql_root_password')}"
+opensteak::nova-compute::rabbitmq_password: "%{hiera('rabbitmq_password')}"
+opensteak::nova-compute::stack_domain: "%{hiera('domain')}"
+opensteak::nova-compute::neutron_password: "%{hiera('neutron_password')}"
+
+
+# Neutron controller
+opensteak::neutron-controller::mysql_password: "%{hiera('mysql_root_password')}"
+opensteak::neutron-controller::rabbitmq_password: "%{hiera('rabbitmq_password')}"
+opensteak::neutron-controller::stack_domain: "%{hiera('domain')}"
+opensteak::neutron-controller::nova_password: "%{hiera('nova_password')}"
+opensteak::neutron-controller::neutron_password: "%{hiera('neutron_password')}"
+# Neutron compute
+opensteak::neutron-compute::mysql_password: "%{hiera('mysql_root_password')}"
+opensteak::neutron-compute::rabbitmq_password: "%{hiera('rabbitmq_password')}"
+opensteak::neutron-compute::stack_domain: "%{hiera('domain')}"
+opensteak::neutron-compute::neutron_password: "%{hiera('neutron_password')}"
+opensteak::neutron-compute::neutron_shared: "%{hiera('neutron_shared_secret')}"
+opensteak::neutron-compute::infra_nodes:
+ server186:
+ ip: 192.168.1.27
+ bridge_uplinks:
+ - 'br-vm:p3p1'
+ server187:
+ ip: 192.168.1.155
+ bridge_uplinks:
+ - 'br-vm:p3p1'
+ server188:
+ ip: 192.168.1.116
+ bridge_uplinks:
+ - 'br-vm:p3p1'
+ server189:
+ ip: 192.168.1.117
+ bridge_uplinks:
+ - 'br-vm:p3p1'
+# Neutron network
+opensteak::neutron-network::mysql_password: "%{hiera('mysql_root_password')}"
+opensteak::neutron-network::rabbitmq_password: "%{hiera('rabbitmq_password')}"
+opensteak::neutron-network::stack_domain: "%{hiera('domain')}"
+opensteak::neutron-network::neutron_password: "%{hiera('neutron_password')}"
+opensteak::neutron-network::neutron_shared: "%{hiera('neutron_shared_secret')}"
+opensteak::neutron-network::infra_nodes:
+ server98:
+ ip: 192.168.1.58
+ bridge_uplinks:
+ - 'br-ex:em2'
+ - 'br-vm:em5'
+
+# Horizon
+opensteak::horizon::stack_domain: "%{hiera('domain')}"
+opensteak::horizon::secret_key: "%{hiera('horizon_secret_key')}"
diff --git a/opensteak/config/infra.yaml b/opensteak/config/infra.yaml
new file mode 100644
index 0000000..2ff02a1
--- /dev/null
+++ b/opensteak/config/infra.yaml
@@ -0,0 +1,81 @@
+domains: "infra.opensteak.fr"
+media: "Ubuntu mirror"
+environments: "production"
+operatingsystems: "Ubuntu14.04Cloud"
+subnets: "Admin"
+compute_profiles: "Test"
+smart_proxies: "foreman.infra.opensteak.fr"
+ptables: "Preseed default"
+architectures: "x86_64"
+
+operatingsystems:
+ "Ubuntu 14.04.2 LTS":
+ name: "Ubuntu"
+ description: "Ubuntu 14.04.2 LTS"
+ major: "14"
+ minor: "04"
+ family: "Debian"
+ release_name: "trusty"
+ password_hash: "MD5"
+ "Ubuntu 14.04 Cloud":
+ name: "Ubuntu14.04Cloud"
+ description: "Ubuntu 14.04 Cloud"
+ major: "14"
+ minor: "04"
+ family: "Debian"
+ release_name: "trusty"
+ password_hash: "MD5"
+
+hostgroupTop:
+ name: 'test'
+ classes:
+ - "ntp"
+ subnet: "Admin"
+ params:
+ password: 'toto'
+hostgroups:
+ hostgroupController:
+ name: 'controller'
+ classes:
+ - "opensteak::base-network"
+ - "opensteak::libvirt"
+ params:
+ foreman_sshkey: 'xxxx'
+ hostgroupControllerVM:
+ name: 'controller_VM'
+ classes:
+ - "opensteak::apt"
+ params:
+ foreman_sshkey: 'xxxx'
+ password: 'toto'
+ hostgroupCompute:
+ name: 'compute'
+ classes:
+ - "opensteak::neutron-compute"
+ - "opensteak::nova-compute"
+subnets:
+ Admin:
+ shared: False
+ data:
+ network: "192.168.4.0"
+ mask: "255.255.255.0"
+ vlanid:
+ gateway: "192.168.4.1"
+ dns_primary: "192.168.1.4"
+ from: "192.168.4.10"
+ to: "192.168.4.200"
+ ipam: "DHCP"
+ boot_mode: "DHCP"
+
+foreman:
+ ip: "192.168.4.2"
+ admin: "admin"
+ password: "opnfv"
+ cpu: "4"
+ ram: "4194304"
+ iso: "trusty-server-cloudimg-amd64-disk1.img"
+ disksize: "5G"
+ force: True
+ dns: "8.8.8.8"
+ bridge: "br-libvirt"
+ bridge_type: "openvswitch"
diff --git a/opensteak/tools/README.rst b/opensteak/tools/README.rst
new file mode 100644
index 0000000..188addc
--- /dev/null
+++ b/opensteak/tools/README.rst
@@ -0,0 +1,52 @@
+:Authors: Arnaud Morin (arnaud1.morin@orange.com)
+:Version: 0.0.2
+
+=======================================================
+OPNFV Installation instructions using Foreman/OpenSteak
+=======================================================
+
+Abstract
+========
+
+This document describes how to setup OPNFV from a foreman Virtual Machine on an Ubuntu server.
+
+License
+=======
+OPNFV Installation instructions using Foreman/OpenSteak (c) by Arnaud Morin (Orange)
+
+OPNFV Installation instructions using Foreman/OpenSteak are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
+
+Version history
+===================
+
++--------------------+--------------------+--------------------+--------------------+
+| **Date** | **Ver.** | **Author** | **Comment** |
+| | | | |
++--------------------+--------------------+--------------------+--------------------+
+| 2015-06-08 | 0.0.1 | Arnaud Morin | First draft |
+| | | (Orange) | |
++--------------------+--------------------+--------------------+--------------------+
+
+Table of contents
+===================
+
+.. contents::
+ :backlinks: none
+
+Introduction
+============
+
+This document describes how to setup OPNFV from a foreman Virtual Machine on an Ubuntu server.
+Before starting, you should have an Ubuntu 14.04 LTS server already installed.
+
+Here is the manual workflow that you will have to perform:
+
+- Install
+- Manually prepare configuration files from templates.
+
+
+Here is the current workflow of the automated installation:
+
+- Dependencies installation (such as libvirt, impitools, etc.)
+- Foreman Virtual Machine creation
+- to be completed
diff --git a/opensteak/tools/config.yaml b/opensteak/tools/config.yaml
new file mode 100644
index 0000000..c618a52
--- /dev/null
+++ b/opensteak/tools/config.yaml
@@ -0,0 +1,78 @@
+domains: "test-infra.opensteak.fr"
+media: "Ubuntu mirror"
+environments: "production"
+operatingsystems: "Ubuntu14.04Cloud"
+subnets: "Admin"
+compute_profiles: "Test"
+smart_proxies: "foreman.infra.opensteak.fr"
+ptables: "Preseed default"
+architectures: "x86_64"
+
+operatingsystems:
+ "Ubuntu 14.04.1 LTS":
+ name: "Ubuntu"
+ description: "Ubuntu 14.04.1 LTS"
+ major: "14"
+ minor: "04"
+ family: "Debian"
+ release_name: "trusty"
+ password_hash: "MD5"
+ "Ubuntu 14.04 Cloud":
+ name: "Ubuntu14.04Cloud"
+ description: "Ubuntu 14.04 Cloud"
+ major: "14"
+ minor: "04"
+ family: "Debian"
+ release_name: "trusty"
+ password_hash: "MD5"
+
+hostgroupTop:
+ name: 'test'
+ classes:
+ - "ntp"
+ subnet: "Admin"
+ params:
+ password: 'toto'
+hostgroups:
+ hostgroupController:
+ name: 'controller'
+ classes:
+ - "opensteak::base-network"
+ - "opensteak::libvirt"
+ params:
+ foreman_sshkey: 'xxxx'
+ hostgroupControllerVM:
+ name: 'controller_VM'
+ classes:
+ - "opensteak::apt"
+ params:
+ foreman_sshkey: 'xxxx'
+ password: 'toto'
+ hostgroupCompute:
+ name: 'compute'
+ classes:
+ - "opensteak::neutron-compute"
+ - "opensteak::nova-compute"
+subnets:
+ Admin:
+ shared: False
+ data:
+ network: "172.16.0.0"
+ mask: "255.255.255.0"
+ vlanid:
+ gateway: "172.16.0.1"
+ dns_primary: "172.16.0.1"
+ from: "172.16.0.10"
+ to: "172.16.0.200"
+ ipam: "DHCP"
+ boot_mode: "DHCP"
+
+foreman:
+ ip: "172.16.0.2"
+ password: "opnfv"
+ cpu: "2"
+ ram: "2097152"
+ iso: "trusty-server-cloudimg-amd64-disk1.img"
+ disksize: "5G"
+ force: True
+ dns: "8.8.8.8"
diff --git a/opensteak/tools/create_foreman.py b/opensteak/tools/create_foreman.py
new file mode 100644
index 0000000..6cf4510
--- /dev/null
+++ b/opensteak/tools/create_foreman.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+"""
+Create Virtual Machines
+"""
+
+# TODO: be sure that we are runnning as root
+
+from opensteak.conf import OpenSteakConfig
+from opensteak.printer import OpenSteakPrinter
+# from opensteak.argparser import OpenSteakArgParser
+from opensteak.templateparser import OpenSteakTemplateParser
+from opensteak.virsh import OpenSteakVirsh
+from pprint import pprint as pp
+# from ipaddress import IPv4Address
+import argparse
+import tempfile
+import shutil
+import os
+# import sys
+
+p = OpenSteakPrinter()
+
+#
+# Check for params
+#
+p.header("Check parameters")
+args = {}
+
+# Update args with values from CLI
+parser = argparse.ArgumentParser(description='This script will create a foreman VM.', usage='%(prog)s [options]')
+parser.add_argument('-c', '--config', help='YAML config file to use (default is config/infra.yaml).', default='config/infra.yaml')
+args.update(vars(parser.parse_args()))
+
+# Open config file
+conf = OpenSteakConfig(config_file=args["config"])
+# pp(conf.dump())
+
+a = {}
+a["name"] = "foreman"
+a["ip"] = conf["foreman"]["ip"]
+a["netmask"] = conf["subnets"]["Admin"]["data"]["mask"]
+a["netmaskshort"] = sum([bin(int(x)).count('1')
+ for x in conf["subnets"]["Admin"]
+ ["data"]["mask"]
+ .split('.')])
+a["gateway"] = conf["subnets"]["Admin"]["data"]["gateway"]
+a["network"] = conf["subnets"]["Admin"]["data"]["network"]
+a["admin"] = conf["foreman"]["admin"]
+a["password"] = conf["foreman"]["password"]
+a["cpu"] = conf["foreman"]["cpu"]
+a["ram"] = conf["foreman"]["ram"]
+a["iso"] = conf["foreman"]["iso"]
+a["disksize"] = conf["foreman"]["disksize"]
+a["force"] = conf["foreman"]["force"]
+a["dhcprange"] = "{0} {1}".format(conf["subnets"]["Admin"]
+ ["data"]["from"],
+ conf["subnets"]["Admin"]
+ ["data"]["to"])
+a["domain"] = conf["domains"]
+reverse_octets = str(conf["foreman"]["ip"]).split('.')[-2::-1]
+a["reversedns"] = '.'.join(reverse_octets) + '.in-addr.arpa'
+a["dns"] = conf["foreman"]["dns"]
+a["bridge"] = conf["foreman"]["bridge"]
+if conf["foreman"]["bridge_type"] == "openvswitch":
+ a["bridgeconfig"] = "<virtualport type='openvswitch'></virtualport>"
+else:
+ # no specific config for linuxbridge
+ a["bridgeconfig"] = ""
+
+# Update args with values from config file
+args.update(a)
+del a
+
+p.list_id(args)
+
+# Ask confirmation
+if args["force"] is not True:
+ p.ask_validation()
+
+# Create the VM
+p.header("Initiate configuration")
+
+###
+# Work on templates
+###
+# Create temporary folders and files
+tempFolder = tempfile.mkdtemp(dir="/tmp")
+tempFiles = {}
+
+for f in os.listdir("templates_foreman/"):
+ tempFiles[f] = "{0}/{1}".format(tempFolder, f)
+ try:
+ OpenSteakTemplateParser("templates_foreman/{0}".format(f),
+ tempFiles[f], args)
+ except Exception as err:
+ p.status(False, msg=("Something went wrong when trying to create "
+ "the file {0} from the template "
+ "templates_foreman/{1}").format(tempFiles[f], f),
+ failed="{0}".format(err))
+
+###
+# Work on files
+###
+for f in os.listdir("files_foreman/"):
+ tempFiles[f] = "{0}/{1}".format(tempFolder, f)
+ shutil.copyfile("files_foreman/{0}".format(f), tempFiles[f])
+
+p.status(True, msg="Temporary files created:")
+p.list_id(tempFiles)
+
+
+###
+# Delete if already exists
+###
+
+# Get all volumes and VM
+p.header("Virsh calls")
+OpenSteakVirsh = OpenSteakVirsh()
+volumeList = OpenSteakVirsh.volumeList()
+domainList = OpenSteakVirsh.domainList()
+# p.list_id(volumeList)
+# p.list_id(domainList)
+
+# TODO: check that the default image is in the list
+# (trusty-server-cloudimg-amd64-disk1.img by default)
+
+# Delete the volume if exists
+try:
+ oldVolume = volumeList[args["name"]]
+
+ # Ask confirmation
+ if args["force"] is not True:
+ p.ask_validation()
+
+ status = OpenSteakVirsh.volumeDelete(volumeList[args["name"]])
+ if (status["stderr"]):
+ p.status(False, msg=status["stderr"])
+ p.status(True, msg=status["stdout"])
+except KeyError as err:
+ # no old volume, do nothing
+ pass
+
+# Delete the VM if exists
+try:
+ vmStatus = domainList[args["name"]]
+
+ # Ask confirmation
+ if args["force"] is not True:
+ p.ask_validation()
+
+ # Destroy (stop)
+ if vmStatus == "running":
+ status = OpenSteakVirsh.domainDestroy(args["name"])
+ if (status["stderr"]):
+ p.status(False, msg=status["stderr"])
+ p.status(True, msg=status["stdout"])
+
+ # Undefine (delete)
+ status = OpenSteakVirsh.domainUndefine(args["name"])
+ if (status["stderr"]):
+ p.status(False, msg=status["stderr"])
+ p.status(True, msg=status["stdout"])
+except KeyError as err:
+ # no old VM defined, do nothing
+ pass
+
+###
+# Create the configuration image file from metadata and userdata
+###
+status = OpenSteakVirsh.generateConfiguration(args["name"], tempFiles)
+if (status["stderr"]):
+ p.status(False, msg=status["stderr"])
+p.status(True, msg=("Configuration generated successfully in "
+ "/var/lib/libvirt/images/{0}-configuration.iso")
+ .format(args["name"]))
+
+# Refresh the pool
+status = OpenSteakVirsh.poolRefresh()
+if (status["stderr"]):
+ p.status(False, msg=status["stderr"])
+p.status(True, msg=status["stdout"])
+
+###
+# Create the new VM
+###
+# Create the volume from a clone
+status = OpenSteakVirsh.volumeClone(args["iso"], args["name"])
+if (status["stderr"]):
+ p.status(False, msg=status["stderr"])
+p.status(True, msg=status["stdout"])
+
+# Resize the volume
+status = OpenSteakVirsh.volumeResize(args["name"], args["disksize"])
+if (status["stderr"]):
+ p.status(False, msg=status["stderr"])
+p.status(True, msg=status["stdout"])
+
+# Create the VM
+status = OpenSteakVirsh.domainDefine(tempFiles["kvm-config"])
+if (status["stderr"]):
+ p.status(False, msg=status["stderr"])
+p.status(True, msg=status["stdout"])
+
+
+###
+# Start the VM
+###
+status = OpenSteakVirsh.domainStart(args["name"])
+if (status["stderr"]):
+ p.status(False, msg=status["stderr"])
+p.status(True, msg=status["stdout"])
+
+p.status(True, msg="Log file is at: /var/log/libvirt/qemu/{0}-serial.log"
+ .format(args["name"]))
+
+p.header("fini")
+
+# Delete temporary dir
+shutil.rmtree(tempFolder)
diff --git a/opensteak/tools/files_foreman/id_rsa b/opensteak/tools/files_foreman/id_rsa
new file mode 100644
index 0000000..d53ba88
--- /dev/null
+++ b/opensteak/tools/files_foreman/id_rsa
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAz0jMplucYXoe0xJ21ASL98PGbwZYCI5Xr4/kHXOdGvHvZr3z
+58tWU1Ta4qMf0qa272VsdQiO1pCmSlqrDW5C9rEeqLhhRX/yLbgv35mOdjRoIIAX
+6RfNniT/xXrfvPZYdw603fIbbw5igTRwc6W5QvJHRcKRKb762Vw2gPSS0GgFBLCk
+vC2kQbW4cfP+9elo86FAhNBs2TbBHLc9H2W+9KzYfgsigjJLsgRXL6/uhu3+sL2d
+3F1J9Nhyy3aoUOVxD2YPJlJvzYhLZcSXgXI+Oi0gZmhh3uImc4WRyOihK5jRpJaw
+desygyXo4lVskzxBjm7L9ynbCNMOO85ZVVJGxQIDAQABAoIBAQCaOWcSy4yRtiPj
+FZTV8MAXS1GD36t2SjoRhLTL+O5GUwW1YtVrfA2xmKv2/jm6KJJpkgPdG83y9NLU
+9ZrZNlWaaHQQQocVB7ovrB/qdLzbU+i5bbTcl/pDlPG8g8yeMoflpUqK7AzfV0uR
+KGwWj5JErjC7RaVt8wt+164xykbFyZeUu9htNthFD/OPaIPqgv6AoJdEULyGrTbd
+SRyJ01n0beGkB0o+0dnOEO34K+pU0Zzk+rAcOEl3UNkpxOzedEFOR6NdnX1eH4t4
+a6OZgskcVjyxFQPAyhcSkQ2iWncQx2ritTclst4NFjBae5hwYgEB4S9ZN5IOueMH
+eYhxYthNAoGBAPXtSDmRGPc4EHDBrbgDn4vhxK7QN35bWFW1KvHLD0hBBJO57GqT
+jGCJsbkw6peERuFV8qq+Bvz0nvlKl9humB1djlndUETksUTrNz73XxpJJ8L5parF
+okx0QLMXONOP5b6yGWYay3QD0gNz/HYVf//oDTdWRhbq5EY6VarOagfjAoGBANfG
+UrlxEYHwq3TE7unvgaao5Vpmw8Hqir2bnl2zKmPoV8ds/V+paMnV6Hhzgzu3bKgF
+ukZgAizEcfvxrxnfIraRJTI5xgBoIl8gdbsWkLre4qKpVSAkw4JLyzVVlXCyKYHp
+ocjeNVbO5Z2Yft0cv30LfeX+DEDeQS12RHLu/Sc3AoGBAMns2ZfC5p/encknje8A
+spjVeHwdJOOQNxiwl6FPHK40DIELcO4VVnbRuGaZnpVoHBbbTlQZkX1TkdCZCdLB
+BA9giQiKamUW7eLry0HdNW5M0OQLvZZZjih+b71c/ODhTz/j1mz65UDN/jutmYaP
+orjJnUhpg0U/+s0bCsojj/YHAoGBAKtsMhiFjaUv8OdJ9Y0A7H3dPKk/b1JF5YeR
+dJV4W7sXwXT8T6eKTWfce14GV0JADSDHvB9g8xlh0DSa48OoFEn6shRe9cEo+fWd
+Mis6WC0+Gcukv65TxsdjM8PhhGIOCQ/e7ttIPhQDN0Sm/FLqHe9YC+OGm3GFoT5e
+8S5mU9StAoGABFwqkFELU84twzKYJCVPZPktwtfrD0Hkbd9pk0ebuSnQ3bATFIyU
+CDspTADbY2IgC53u+XAhTd5BOsicTtMM9x1p5EOglbK1ANagWuGlzVfdbp+bmql9
+S8AaH22lha5vCfHHfAN2NSkQ+ABZnNpP66nFx06VcyEYkhuZgd6s5A0=
+-----END RSA PRIVATE KEY-----
diff --git a/opensteak/tools/files_foreman/id_rsa.pub b/opensteak/tools/files_foreman/id_rsa.pub
new file mode 100644
index 0000000..8b4c6a1
--- /dev/null
+++ b/opensteak/tools/files_foreman/id_rsa.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPSMymW5xheh7TEnbUBIv3w8ZvBlgIjlevj+Qdc50a8e9mvfPny1ZTVNriox/SprbvZWx1CI7WkKZKWqsNbkL2sR6ouGFFf/ItuC/fmY52NGgggBfpF82eJP/Fet+89lh3DrTd8htvDmKBNHBzpblC8kdFwpEpvvrZXDaA9JLQaAUEsKS8LaRBtbhx8/716WjzoUCE0GzZNsEctz0fZb70rNh+CyKCMkuyBFcvr+6G7f6wvZ3cXUn02HLLdqhQ5XEPZg8mUm/NiEtlxJeBcj46LSBmaGHe4iZzhZHI6KErmNGklrB16zKDJejiVWyTPEGObsv3KdsI0w47zllVUkbF arnaud@l-bibicy
diff --git a/opensteak/tools/opensteak/.gitignore b/opensteak/tools/opensteak/.gitignore
new file mode 100644
index 0000000..a65d046
--- /dev/null
+++ b/opensteak/tools/opensteak/.gitignore
@@ -0,0 +1,58 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*,cover
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
diff --git a/opensteak/tools/opensteak/__init__.py b/opensteak/tools/opensteak/__init__.py
new file mode 100644
index 0000000..01f9c9a
--- /dev/null
+++ b/opensteak/tools/opensteak/__init__.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+# This directory is a Python package.
diff --git a/opensteak/tools/opensteak/argparser.py b/opensteak/tools/opensteak/argparser.py
new file mode 100644
index 0000000..de980b6
--- /dev/null
+++ b/opensteak/tools/opensteak/argparser.py
@@ -0,0 +1,46 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+# @author: Pawel Chomicki <pawel.chomicki@nokia.com>
+
+"""
+Parse arguments from CLI
+"""
+
+import argparse
+
+class OpenSteakArgParser:
+
+ def __init__(self):
+ """
+ Parse the command line
+ """
+ self.parser = argparse.ArgumentParser(description='This script will create config files for a VM in current folder.', usage='%(prog)s [options] name')
+ self.parser.add_argument('name', help='Set the name of the machine')
+ self.parser.add_argument('-i', '--ip', help='Set the ip address of the machine. (Default is 192.168.42.42)', default='192.168.42.42')
+ self.parser.add_argument('-n', '--netmask', help='Set the netmask in short format. (Default is 24)', default='24')
+ self.parser.add_argument('-g', '--gateway', help='Set the gateway to ping internet. (Default is 192.168.42.1)', default='192.168.42.1')
+ self.parser.add_argument('-p', '--password', help='Set the ssh password. Login is ubuntu. (Default password is moutarde)', default='moutarde')
+ self.parser.add_argument('-u', '--cpu', help='Set number of CPU for the VM. (Default is 2)', default='2')
+ self.parser.add_argument('-r', '--ram', help='Set quantity of RAM for the VM in kB. (Default is 2097152)', default='2097152')
+ self.parser.add_argument('-o', '--iso', help='Use this iso file. (Default is trusty-server-cloudimg-amd64-disk1.img)', default='trusty-server-cloudimg-amd64-disk1.img')
+ self.parser.add_argument('-d', '--disksize', help='Create a disk with that size. (Default is 5G)', default='5G')
+ self.parser.add_argument('-f', '--force', help='Force creation without asking questions. This is dangerous as it will delete old VM with same name.', default=False, action='store_true')
+
+ def parse(self):
+ return self.parser.parse_args()
+
diff --git a/opensteak/tools/opensteak/conf.py b/opensteak/tools/opensteak/conf.py
new file mode 100644
index 0000000..65eaf43
--- /dev/null
+++ b/opensteak/tools/opensteak/conf.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from yaml import load, dump
+try:
+ from yaml import CLoader as Loader, CDumper as Dumper
+except ImportError:
+ from yaml import Loader, Dumper
+
+
+class OpenSteakConfig:
+ """OpenSteak config class
+ Use this object as a dict
+ """
+
+ def __init__(self,
+ config_file="/usr/local/opensteak/infra/config/common.yaml",
+ autosave=False):
+ """ Function __init__
+ Load saved opensteak config.
+
+ @param PARAM: DESCRIPTION
+ @return RETURN: DESCRIPTION
+ @param config_file: the yaml config file to read.
+ default is '/usr/local/opensteak/infra/config/common.yaml'
+ @param autosave: save automaticly the config at destroy
+ default is False
+ """
+ self.config_file = config_file
+ self.autosave = autosave
+ with open(self.config_file, 'r') as stream:
+ self._data = load(stream, Loader=Loader)
+
+ def __getitem__(self, index):
+ """Get an item of the configuration"""
+ return self._data[index]
+
+ def __setitem__(self, index, value):
+ """Set an item of the configuration"""
+ self._data[index] = value
+
+ def list(self):
+ """Set an item of the configuration"""
+ return self._data.keys()
+
+ def dump(self):
+ """Dump the configuration"""
+ return dump(self._data, Dumper=Dumper)
+
+ def save(self):
+ """Save the configuration to the file"""
+ with open(self.config_file, 'w') as f:
+ f.write(dump(self._data, Dumper=Dumper))
+
+ def __del__(self):
+ if self.autosave:
+ self.save()
diff --git a/opensteak/tools/opensteak/foreman.py b/opensteak/tools/opensteak/foreman.py
new file mode 100644
index 0000000..b7cbf42
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman.py
@@ -0,0 +1,60 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.api import Api
+from opensteak.foreman_objects.objects import ForemanObjects
+from opensteak.foreman_objects.domains import Domains
+from opensteak.foreman_objects.smart_proxies import SmartProxies
+from opensteak.foreman_objects.operatingsystems import OperatingSystems
+from opensteak.foreman_objects.hostgroups import HostGroups
+from opensteak.foreman_objects.hosts import Hosts
+from opensteak.foreman_objects.architectures import Architectures
+from opensteak.foreman_objects.subnets import Subnets
+from opensteak.foreman_objects.puppetClasses import PuppetClasses
+from opensteak.foreman_objects.compute_resources import ComputeResources
+
+
+class OpenSteakForeman:
+ """
+ HostGroup class
+ """
+ def __init__(self, password, login='admin', ip='127.0.0.1'):
+ """ Function __init__
+ Init the API with the connection params
+ @param password: authentication password
+ @param password: authentication login - default is admin
+ @param ip: api ip - default is localhost
+ @return RETURN: self
+ """
+ self.api = Api(login=login, password=password, ip=ip,
+ printErrors=False)
+ self.domains = Domains(self.api)
+ self.smartProxies = SmartProxies(self.api)
+ self.puppetClasses = PuppetClasses(self.api)
+ self.operatingSystems = OperatingSystems(self.api)
+ self.architectures = Architectures(self.api)
+ self.subnets = Subnets(self.api)
+ self.hostgroups = HostGroups(self.api)
+ self.hosts = Hosts(self.api)
+ self.computeResources = ComputeResources(self.api)
+ self.environments = ForemanObjects(self.api,
+ 'environments',
+ 'environment')
+ self.smartClassParameters = ForemanObjects(self.api,
+ 'smart_class_parameters',
+ 'smart_class_parameter')
diff --git a/opensteak/tools/opensteak/foreman_objects/.gitignore b/opensteak/tools/opensteak/foreman_objects/.gitignore
new file mode 100644
index 0000000..a65d046
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/.gitignore
@@ -0,0 +1,58 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*,cover
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
diff --git a/opensteak/tools/opensteak/foreman_objects/__init__.py b/opensteak/tools/opensteak/foreman_objects/__init__.py
new file mode 100644
index 0000000..01f9c9a
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/__init__.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+# This directory is a Python package.
diff --git a/opensteak/tools/opensteak/foreman_objects/api.py b/opensteak/tools/opensteak/foreman_objects/api.py
new file mode 100644
index 0000000..dc99734
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/api.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+import json
+import requests
+from requests_futures.sessions import FuturesSession
+from pprint import pformat
+
+
+class Api:
+ """
+ Api class
+ Class to deal with the foreman API v2
+ """
+ def __init__(self, password, login='admin', ip='127.0.0.1', printErrors=False):
+ """ Function __init__
+ Init the API with the connection params
+
+ @param password: authentication password
+ @param password: authentication login - default is admin
+ @param ip: api ip - default is localhost
+ @return RETURN: self
+ """
+ self.base_url = 'http://{}/api/v2/'.format(ip)
+ self.headers = {'Accept': 'version=2',
+ 'Content-Type': 'application/json; charset=UTF-8'}
+ self.auth = (login, password)
+ self.errorMsg = ''
+ self.printErrors = printErrors
+
+ def list(self, obj, filter=False, only_id=False, limit=20):
+ """ Function list
+ Get the list of an object
+
+ @param obj: object name ('hosts', 'puppetclasses'...)
+ @param filter: filter for objects
+ @param only_id: boolean to only return dict with name/id
+ @return RETURN: the list of the object
+ """
+ self.url = '{}{}/?per_page={}'.format(self.base_url, obj, limit)
+ if filter:
+ self.url += '&search={}'.format(filter)
+ self.resp = requests.get(url=self.url, auth=self.auth,
+ headers=self.headers)
+ if only_id:
+ if self.__process_resp__(obj) is False:
+ return False
+ if type(self.res['results']) is list:
+ return dict((x['name'], x['id']) for x in self.res['results'])
+ elif type(self.res['results']) is dict:
+ r = {}
+ for v in self.res['results'].values():
+ for vv in v:
+ r[vv['name']] = vv['id']
+ return r
+ else:
+ return False
+ else:
+ return self.__process_resp__(obj)
+
+ def get(self, obj, id, sub_object=None):
+ """ Function get
+ Get an object by id
+
+ @param obj: object name ('hosts', 'puppetclasses'...)
+ @param id: the id of the object (name or id)
+ @return RETURN: the targeted object
+ """
+ self.url = '{}{}/{}'.format(self.base_url, obj, id)
+ if sub_object:
+ self.url += '/' + sub_object
+ self.resp = requests.get(url=self.url, auth=self.auth,
+ headers=self.headers)
+ if self.__process_resp__(obj):
+ return self.res
+ return False
+
+ def get_id_by_name(self, obj, name):
+ """ Function get_id_by_name
+ Get the id of an object
+
+ @param obj: object name ('hosts', 'puppetclasses'...)
+ @param id: the id of the object (name or id)
+ @return RETURN: the targeted object
+ """
+ list = self.list(obj, filter='name = "{}"'.format(name),
+ only_id=True, limit=1)
+ return list[name] if name in list.keys() else False
+
+ def set(self, obj, id, payload, action='', async=False):
+ """ Function set
+ Set an object by id
+
+ @param obj: object name ('hosts', 'puppetclasses'...)
+ @param id: the id of the object (name or id)
+ @param action: specific action of an object ('power'...)
+ @param payload: the dict of the payload
+ @param async: should this request be async, if true use
+ return.result() to get the response
+ @return RETURN: the server response
+ """
+ self.url = '{}{}/{}'.format(self.base_url, obj, id)
+ if action:
+ self.url += '/{}'.format(action)
+ self.payload = json.dumps(payload)
+ if async:
+ session = FuturesSession()
+ return session.put(url=self.url, auth=self.auth,
+ headers=self.headers, data=self.payload)
+ else:
+ self.resp = requests.put(url=self.url, auth=self.auth,
+ headers=self.headers, data=self.payload)
+ if self.__process_resp__(obj):
+ return self.res
+ return False
+
+ def create(self, obj, payload, async=False):
+ """ Function create
+ Create an new object
+
+ @param obj: object name ('hosts', 'puppetclasses'...)
+ @param payload: the dict of the payload
+ @param async: should this request be async, if true use
+ return.result() to get the response
+ @return RETURN: the server response
+ """
+ self.url = self.base_url + obj
+ self.payload = json.dumps(payload)
+ if async:
+ session = FuturesSession()
+ return session.post(url=self.url, auth=self.auth,
+ headers=self.headers, data=self.payload)
+ else:
+ self.resp = requests.post(url=self.url, auth=self.auth,
+ headers=self.headers,
+ data=self.payload)
+ return self.__process_resp__(obj)
+
+ def delete(self, obj, id):
+ """ Function delete
+ Delete an object by id
+
+ @param obj: object name ('hosts', 'puppetclasses'...)
+ @param id: the id of the object (name or id)
+ @return RETURN: the server response
+ """
+ self.url = '{}{}/{}'.format(self.base_url, obj, id)
+ self.resp = requests.delete(url=self.url,
+ auth=self.auth,
+ headers=self.headers, )
+ return self.__process_resp__(obj)
+
+ def __process_resp__(self, obj):
+ """ Function __process_resp__
+ Process the response sent by the server and store the result
+
+ @param obj: object name ('hosts', 'puppetclasses'...)
+ @return RETURN: the server response
+ """
+ self.last_obj = obj
+ if self.resp.status_code > 299:
+ self.errorMsg = ">> Error {} for object '{}'".format(self.resp.status_code,
+ self.last_obj)
+ try:
+ self.ret = json.loads(self.resp.text)
+ self.errorMsg += pformat(self.ret[list(self.ret.keys())[0]])
+ except:
+ self.ret = self.resp.text
+ self.errorMsg += self.ret
+ if self.printErrors:
+ print(self.errorMsg)
+ return False
+ self.res = json.loads(self.resp.text)
+ if 'results' in self.res.keys():
+ return self.res['results']
+ return self.res
+
+ def __str__(self):
+ ret = pformat(self.base_url) + "\n"
+ ret += pformat(self.headers) + "\n"
+ ret += pformat(self.auth) + "\n"
+ return ret
diff --git a/opensteak/tools/opensteak/foreman_objects/architectures.py b/opensteak/tools/opensteak/foreman_objects/architectures.py
new file mode 100644
index 0000000..5e4303e
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/architectures.py
@@ -0,0 +1,49 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.objects import ForemanObjects
+
+
+class Architectures(ForemanObjects):
+ """
+ Architectures class
+ """
+ objName = 'architectures'
+ payloadObj = 'architecture'
+
+ def checkAndCreate(self, key, payload, osIds):
+ """ Function checkAndCreate
+ Check if an architectures exists and create it if not
+
+ @param key: The targeted architectures
+ @param payload: The targeted architectures description
+ @param osIds: The list of os ids liked with this architecture
+ @return RETURN: The id of the object
+ """
+ if key not in self:
+ self[key] = payload
+ oid = self[key]['id']
+ if not oid:
+ return False
+ #~ To be sure the OS list is good, we ensure our os are in the list
+ for os in self[key]['operatingsystems']:
+ osIds.add(os['id'])
+ self[key]["operatingsystem_ids"] = list(osIds)
+ if (len(self[key]['operatingsystems']) is not len(osIds)):
+ return False
+ return oid
diff --git a/opensteak/tools/opensteak/foreman_objects/compute_resources.py b/opensteak/tools/opensteak/foreman_objects/compute_resources.py
new file mode 100644
index 0000000..9ada9c4
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/compute_resources.py
@@ -0,0 +1,62 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.objects import ForemanObjects
+from opensteak.foreman_objects.item import ForemanItem
+
+
+class ComputeResources(ForemanObjects):
+ """
+ HostGroups class
+ """
+ objName = 'compute_resources'
+ payloadObj = 'compute_resource'
+
+ def list(self):
+ """ Function list
+ list the hostgroups
+
+ @return RETURN: List of ForemanItemHostsGroup objects
+ """
+ return list(map(lambda x: ForemanItem(self.api, x['id'], x),
+ self.api.list(self.objName)))
+
+ def __getitem__(self, key):
+ """ Function __getitem__
+ Get an hostgroup
+
+ @param key: The hostgroup name or ID
+ @return RETURN: The ForemanItemHostsGroup object of an host
+ """
+ # Because Hostgroup did not support get by name we need to do it by id
+ if type(key) is not int:
+ key = self.getId(key)
+ ret = self.api.get(self.objName, key)
+ return ForemanItem(self.api, key, ret)
+
+ def __delitem__(self, key):
+ """ Function __delitem__
+ Delete an hostgroup
+
+ @param key: The hostgroup name or ID
+ @return RETURN: The API result
+ """
+ # Because Hostgroup did not support get by name we need to do it by id
+ if type(key) is not int:
+ key = self.getId(key)
+ return self.api.delete(self.objName, key)
diff --git a/opensteak/tools/opensteak/foreman_objects/domains.py b/opensteak/tools/opensteak/foreman_objects/domains.py
new file mode 100644
index 0000000..753833f
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/domains.py
@@ -0,0 +1,44 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.objects import ForemanObjects
+
+
+class Domains(ForemanObjects):
+ """
+ Domain class
+ """
+ objName = 'domains'
+ payloadObj = 'domain'
+
+ def load(self, id='0', name=''):
+ """ Function load
+ To be rewriten
+
+ @param id: The Domain ID
+ @return RETURN: DESCRIPTION
+ """
+
+ if name:
+ id = self.__getIdByName__(name)
+ self.data = self.foreman.get('domains', id)
+ if 'parameters' in self.data:
+ self.params = self.data['parameters']
+ else:
+ self.params = []
+ self.name = self.data['name']
diff --git a/opensteak/tools/opensteak/foreman_objects/freeip.py b/opensteak/tools/opensteak/foreman_objects/freeip.py
new file mode 100644
index 0000000..86c003f
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/freeip.py
@@ -0,0 +1,79 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+#~ from foreman.api import Api
+import requests
+from bs4 import BeautifulSoup
+import sys
+import json
+
+class FreeIP:
+ """ FreeIP return an available IP in the targeted network """
+
+ def __init__ (self, login, password):
+ """ Init: get authenticity token """
+ with requests.session() as self.session:
+ try:
+ #~ 1/ Get login token and authentify
+ payload = {}
+ log_soup = BeautifulSoup(self.session.get('https://127.0.0.1/users/login', verify=False).text)
+ payload['utf8'] = log_soup.findAll('input',attrs={'name':'utf8'})[0].get('value')
+ payload['authenticity_token'] = log_soup.findAll('input',attrs={'name':'authenticity_token'})[0].get('value')
+ if payload['authenticity_token'] == None:
+ raise requests.exceptions.RequestException("Bad catch of authenticity_token")
+ payload['commit']='Login'
+ payload['login[login]'] = login
+ payload['login[password]'] = password
+ #~ 2/ Log in
+ r = self.session.post('https://127.0.0.1/users/login', verify=False, data=payload)
+ if r.status_code != 200:
+ raise requests.exceptions.RequestException("Bad login or password")
+ #~ Get token for host creation
+ log_soup = BeautifulSoup(self.session.get('https://127.0.0.1/hosts/new', verify=False).text)
+ self.authenticity_token = log_soup.findAll('input',attrs={'name':'authenticity_token'})[0].get('value')
+ if payload['authenticity_token'] == None:
+ raise requests.exceptions.RequestException("Bad catch of authenticity_token")
+ except requests.exceptions.RequestException as e:
+ print("Error connection Foreman to get a free ip")
+ print(e)
+ sys.exit(1)
+ pass
+
+ def get(self, subnet, mac = ""):
+ payload = {"host_mac": mac, "subnet_id": subnet}
+ payload['authenticity_token'] = self.authenticity_token
+ try:
+ self.last_ip = json.loads(self.session.post('https://127.0.0.1/subnets/freeip', verify=False, data=payload).text)['ip']
+ if payload['authenticity_token'] == None:
+ raise requests.exceptions.RequestException("Error getting free IP")
+ except requests.exceptions.RequestException as e:
+ print("Error connection Foreman to get a free ip")
+ print(e)
+ sys.exit(1)
+ return self.last_ip
+
+
+
+if __name__ == "__main__":
+ import pprint
+ import sys
+ if len(sys.argv) == 4:
+ f = FreeIP(sys.argv[1], sys.argv[2])
+ print(f.get(sys.argv[3]))
+ else:
+ print('Error: Usage\npython {} foreman_user foreman_password subnet'.format(sys.argv[0]))
diff --git a/opensteak/tools/opensteak/foreman_objects/hostgroups.py b/opensteak/tools/opensteak/foreman_objects/hostgroups.py
new file mode 100644
index 0000000..55b8ba6
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/hostgroups.py
@@ -0,0 +1,103 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.objects import ForemanObjects
+from opensteak.foreman_objects.itemHostsGroup import ItemHostsGroup
+from pprint import pprint as pp
+
+
+class HostGroups(ForemanObjects):
+ """
+ HostGroups class
+ """
+ objName = 'hostgroups'
+ payloadObj = 'hostgroup'
+
+ def list(self):
+ """ Function list
+ list the hostgroups
+
+ @return RETURN: List of ItemHostsGroup objects
+ """
+ return list(map(lambda x: ItemHostsGroup(self.api, x['id'], x),
+ self.api.list(self.objName)))
+
+ def __getitem__(self, key):
+ """ Function __getitem__
+ Get an hostgroup
+
+ @param key: The hostgroup name or ID
+ @return RETURN: The ItemHostsGroup object of an host
+ """
+ # Because Hostgroup did not support get by name we need to do it by id
+ if type(key) is not int:
+ key = self.getId(key)
+ ret = self.api.get(self.objName, key)
+ return ItemHostsGroup(self.api, key, ret)
+
+ def __delitem__(self, key):
+ """ Function __delitem__
+ Delete an hostgroup
+
+ @param key: The hostgroup name or ID
+ @return RETURN: The API result
+ """
+ # Because Hostgroup did not support get by name we need to do it by id
+ if type(key) is not int:
+ key = self.getId(key)
+ return self.api.delete(self.objName, key)
+
+ def checkAndCreate(self, key, payload,
+ hostgroupConf,
+ hostgroupParent,
+ puppetClassesId):
+ """ Function checkAndCreate
+ check And Create procedure for an hostgroup
+ - check the hostgroup is not existing
+ - create the hostgroup
+ - Add puppet classes from puppetClassesId
+ - Add params from hostgroupConf
+
+ @param key: The hostgroup name or ID
+ @param payload: The description of the hostgroup
+ @param hostgroupConf: The configuration of the host group from the
+ foreman.conf
+ @param hostgroupParent: The id of the parent hostgroup
+ @param puppetClassesId: The dict of puppet classes ids in foreman
+ @return RETURN: The ItemHostsGroup object of an host
+ """
+ if key not in self:
+ self[key] = payload
+ oid = self[key]['id']
+ if not oid:
+ return False
+
+ # Create Hostgroup classes
+ hostgroupClassIds = self[key]['puppetclass_ids']
+ if 'classes' in hostgroupConf.keys():
+ if not self[key].checkAndCreateClasses(puppetClassesId.values()):
+ print("Failed in classes")
+ return False
+
+ # Set params
+ if 'params' in hostgroupConf.keys():
+ if not self[key].checkAndCreateParams(hostgroupConf['params']):
+ print("Failed in params")
+ return False
+
+ return oid
diff --git a/opensteak/tools/opensteak/foreman_objects/hosts.py b/opensteak/tools/opensteak/foreman_objects/hosts.py
new file mode 100644
index 0000000..95d47af
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/hosts.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.objects import ForemanObjects
+from opensteak.foreman_objects.itemHost import ItemHost
+import time
+
+
+class Hosts(ForemanObjects):
+ """
+ Host sclass
+ """
+ objName = 'hosts'
+ payloadObj = 'host'
+
+ def list(self):
+ """ Function list
+ list the hosts
+
+ @return RETURN: List of ItemHost objects
+ """
+ return list(map(lambda x: ItemHost(self.api, x['id'], x),
+ self.api.list(self.objName)))
+
+ def __getitem__(self, key):
+ """ Function __getitem__
+ Get an host
+
+ @param key: The host name or ID
+ @return RETURN: The ItemHost object of an host
+ """
+ return ItemHost(self.api, key, self.api.get(self.objName, key))
+
+ def __printProgression__(self, status, msg, eol):
+ """ Function __printProgression__
+ Print the creation progression or not
+ It uses the foreman.printer lib
+
+ @param status: Status of the message
+ @param msg: Message
+ @param eol: End Of Line (to get a new line or not)
+ @return RETURN: None
+ """
+ if self.printHostProgress:
+ self.__printProgression__(status, msg, eol=eol)
+
+ def createVM(self, key, attributes, printHostProgress=False):
+ """ Function createVM
+ Create a Virtual Machine
+
+ The creation of a VM with libVirt is a bit complexe.
+ We first create the element in foreman, the ask to start before
+ the result of the creation.
+ To do so, we make async calls to the API and check the results
+
+ @param key: The host name or ID
+ @param attributes:The payload of the host creation
+ @param printHostProgress: The link to opensteak.printerlib
+ to print or not the
+ progression of the host creation
+ @return RETURN: The API result
+ """
+
+ self.printHostProgress = printHostProgress
+ self.async = True
+ # Create the VM in foreman
+ self.__printProgression__('In progress',
+ key + ' creation: push in Foreman', eol='\r')
+ future1 = self.api.create('hosts', attributes, async=True)
+
+ # Wait before asking to power on the VM
+ sleep = 5
+ for i in range(0, sleep):
+ time.sleep(1)
+ self.__printProgression__('In progress',
+ key + ' creation: start in {0}s'
+ .format(sleep - i),
+ eol='\r')
+
+ # Power on the VM
+ self.__printProgression__('In progress',
+ key + ' creation: starting', eol='\r')
+ future2 = self[key].powerOn()
+
+ # Show Power on result
+ if future2.result().status_code is 200:
+ self.__printProgression__('In progress',
+ key + ' creation: wait for end of boot',
+ eol='\r')
+ else:
+ self.__printProgression__(False,
+ key + ' creation: Error',
+ failed=str(future2.result().status_code))
+ return False
+ # Show creation result
+ if future1.result().status_code is 200:
+ self.__printProgression__('In progress',
+ key + ' creation: created',
+ eol='\r')
+ else:
+ self.__printProgression__(False,
+ key + ' creation: Error',
+ failed=str(future1.result().status_code))
+ return False
+
+ # Wait for puppet catalog to be applied
+ loop_stop = False
+ while not loop_stop:
+ status = self[key].getStatus()
+ if status == 'No Changes' or status == 'Active':
+ self.__printProgression__(True,
+ key + ' creation: provisioning OK')
+ loop_stop = True
+ elif status == 'Error':
+ self.__printProgression__(False,
+ key + ' creation: Error',
+ failed="Error during provisioning")
+ loop_stop = True
+ return False
+ else:
+ self.__printProgression__('In progress',
+ key + ' creation: provisioning ({})'
+ .format(status),
+ eol='\r')
+ time.sleep(5)
+
+ return True
diff --git a/opensteak/tools/opensteak/foreman_objects/item.py b/opensteak/tools/opensteak/foreman_objects/item.py
new file mode 100644
index 0000000..f418f8c
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/item.py
@@ -0,0 +1,135 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# David Blaisonneau <david.blaisonneau@orange.com>
+# Arnaud Morin <arnaud1.morin@orange.com>
+
+from pprint import pprint as pp
+
+
+class ForemanItem(dict):
+ """
+ Item class
+ Represent the content of a foreman object as a dict
+ """
+
+ def __init__(self, api, key,
+ objName, payloadObj,
+ *args, **kwargs):
+ """ Function __init__
+ Represent the content of a foreman object as a dict
+
+ @param api: The foreman api
+ @param key: The object Key
+ @param *args, **kwargs: the dict representation
+ @return RETURN: Itself
+ """
+ self.api = api
+ self.key = key
+ if objName:
+ self.objName = objName
+ if payloadObj:
+ self.payloadObj = payloadObj
+ self.store = dict()
+ if args[0]:
+ self.update(dict(*args, **kwargs))
+ # We get the smart class parameters for the good items
+ if objName in ['hosts', 'hostgroups',
+ 'puppet_classes', 'environments']:
+ from opensteak.foreman_objects.itemSmartClassParameter\
+ import ItemSmartClassParameter
+ scp_ids = map(lambda x: x['id'],
+ self.api.list('{}/{}/smart_class_parameters'
+ .format(self.objName, key)))
+ scp_items = list(map(lambda x: ItemSmartClassParameter(self.api, x,
+ self.api.get('smart_class_parameters', x)),
+ scp_ids))
+ scp = {'{}::{}'.format(x['puppetclass']['name'],
+ x['parameter']): x
+ for x in scp_items}
+ self.update({'smart_class_parameters_dict': scp})
+
+ def __setitem__(self, key, attributes):
+ """ Function __setitem__
+ Set a parameter of a foreman object as a dict
+
+ @param key: The key to modify
+ @param attribute: The data
+ @return RETURN: The API result
+ """
+ if key is 'puppetclass_ids':
+ payload = {"puppetclass_id": attributes,
+ self.payloadObj + "_class":
+ {"puppetclass_id": attributes}}
+ return self.api.create("{}/{}/{}"
+ .format(self.objName,
+ self.key,
+ "puppetclass_ids"),
+ payload)
+ elif key is 'parameters':
+ payload = {"parameter": attributes}
+ return self.api.create("{}/{}/{}"
+ .format(self.objName,
+ self.key,
+ "parameters"),
+ payload)
+ else:
+ payload = {self.payloadObj: {key: attributes}}
+ return self.api.set(self.objName, self.key, payload)
+
+ def getParam(self, name=None):
+ """ Function getParam
+ Return a dict of parameters or a parameter value
+
+ @param key: The parameter name
+ @return RETURN: dict of parameters or a parameter value
+ """
+ if 'parameters' in self.keys():
+ l = {x['name']: x['value'] for x in self['parameters']}
+ if name:
+ if name in l.keys():
+ return l[name]
+ else:
+ return False
+ else:
+ return l
+
+ def checkAndCreateClasses(self, classes):
+ """ Function checkAndCreateClasses
+ Check and add puppet classe
+
+ @param key: The parameter name
+ @param classes: The classes ids list
+ @return RETURN: boolean
+ """
+ actual_classes = self['puppetclass_ids']
+ for v in classes:
+ if v not in actual_classes:
+ self['puppetclass_ids'] = v
+ return list(classes).sort() is list(self['puppetclass_ids']).sort()
+
+ def checkAndCreateParams(self, params):
+ """ Function checkAndCreateParams
+ Check and add global parameters
+
+ @param key: The parameter name
+ @param params: The params dict
+ @return RETURN: boolean
+ """
+ actual_params = self['param_ids']
+ for k, v in params.items():
+ if k not in actual_params:
+ self['parameters'] = {"name": k, "value": v}
+ return self['param_ids'].sort() == list(params.values()).sort()
diff --git a/opensteak/tools/opensteak/foreman_objects/itemHost.py b/opensteak/tools/opensteak/foreman_objects/itemHost.py
new file mode 100644
index 0000000..c531e5c
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/itemHost.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+import base64
+from string import Template
+from opensteak.foreman_objects.item import ForemanItem
+
+
+class ItemHost(ForemanItem):
+ """
+ ItemHostsGroup class
+ Represent the content of a foreman hostgroup as a dict
+ """
+
+ objName = 'hosts'
+ payloadObj = 'host'
+
+ def __init__(self, api, key, *args, **kwargs):
+ """ Function __init__
+ Represent the content of a foreman object as a dict
+
+ @param api: The foreman api
+ @param key: The object Key
+ @param *args, **kwargs: the dict representation
+ @return RETURN: Itself
+ """
+ ForemanItem.__init__(self, api, key,
+ self.objName, self.payloadObj,
+ *args, **kwargs)
+ self.update({'puppetclass_ids':
+ self.api.list('{}/{}/puppetclass_ids'
+ .format(self.objName, key))})
+ self.update({'param_ids':
+ list(self.api.list('{}/{}/parameters'
+ .format(self.objName, key),
+ only_id=True)
+ .keys())})
+
+
+ def getStatus(self):
+ """ Function getStatus
+ Get the status of an host
+
+ @return RETURN: The host status
+ """
+ return self.api.get('hosts', self.key, 'status')['status']
+
+ def powerOn(self):
+ """ Function powerOn
+ Power on a host
+
+ @return RETURN: The API result
+ """
+ return self.api.set('hosts', self.key,
+ {"power_action": "start"},
+ 'power', async=self.async)
+
+ def getParamFromEnv(self, var, default=''):
+ """ Function getParamFromEnv
+ Search a parameter in the host environment
+
+ @param var: the var name
+ @param hostgroup: the hostgroup item linked to this host
+ @param default: default value
+ @return RETURN: the value
+ """
+ if self.getParam(var):
+ return self.getParam(var)
+ if self.hostgroup:
+ if self.hostgroup.getParam(var):
+ return self.hostgroup.getParam(var)
+ if self.domain.getParam('password'):
+ return self.domain.getParam('password')
+ else:
+ return default
+
+ def getUserData(self,
+ hostgroup,
+ domain,
+ defaultPwd='',
+ defaultSshKey='',
+ proxyHostname='',
+ tplFolder='templates_metadata/'):
+ """ Function getUserData
+ Generate a userdata script for metadata server from Foreman API
+
+ @param domain: the domain item linked to this host
+ @param hostgroup: the hostgroup item linked to this host
+ @param defaultPwd: the default password if no password is specified
+ in the host>hostgroup>domain params
+ @param defaultSshKey: the default ssh key if no password is specified
+ in the host>hostgroup>domain params
+ @param proxyHostname: hostname of the smartproxy
+ @param tplFolder: the templates folder
+ @return RETURN: the user data
+ """
+ if 'user-data' in self.keys():
+ return self['user-data']
+ else:
+ self.hostgroup = hostgroup
+ self.domain = domain
+ if proxyHostname == '':
+ proxyHostname = 'foreman.' + domain
+ password = self.getParamFromEnv('password', defaultPwd)
+ sshauthkeys = self.getParamFromEnv('global_sshkey', defaultSshKey)
+ with open(tplFolder+'puppet.conf', 'rb') as puppet_file:
+ p = MyTemplate(puppet_file.read())
+ enc_puppet_file = base64.b64encode(p.substitute(
+ foremanHostname=proxyHostname))
+ with open(tplFolder+'cloud-init.tpl', 'r') as content_file:
+ s = MyTemplate(content_file.read())
+ if sshauthkeys:
+ sshauthkeys = ' - '+sshauthkeys
+ self.userdata = s.substitute(
+ password=password,
+ fqdn=self['name'],
+ sshauthkeys=sshauthkeys,
+ foremanurlbuilt="http://{}/unattended/built"
+ .format(proxyHostname),
+ puppet_conf_content=enc_puppet_file.decode('utf-8'))
+ return self.userdata
+
+
+class MyTemplate(Template):
+ delimiter = '%'
+ idpattern = r'[a-z][_a-z0-9]*'
diff --git a/opensteak/tools/opensteak/foreman_objects/itemHostsGroup.py b/opensteak/tools/opensteak/foreman_objects/itemHostsGroup.py
new file mode 100644
index 0000000..d6a641c
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/itemHostsGroup.py
@@ -0,0 +1,50 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.item import ForemanItem
+
+
+class ItemHostsGroup(ForemanItem):
+ """
+ ItemHostsGroup class
+ Represent the content of a foreman hostgroup as a dict
+ """
+
+ objName = 'hostgroups'
+ payloadObj = 'hostgroup'
+
+ def __init__(self, api, key, *args, **kwargs):
+ """ Function __init__
+ Represent the content of a foreman object as a dict
+
+ @param api: The foreman api
+ @param key: The object Key
+ @param *args, **kwargs: the dict representation
+ @return RETURN: Itself
+ """
+ ForemanItem.__init__(self, api, key,
+ self.objName, self.payloadObj,
+ *args, **kwargs)
+ self.update({'puppetclass_ids':
+ self.api.list('{}/{}/puppetclass_ids'
+ .format(self.objName, key))})
+ self.update({'param_ids':
+ list(self.api.list('{}/{}/parameters'
+ .format(self.objName, key),
+ only_id=True)
+ .keys())})
diff --git a/opensteak/tools/opensteak/foreman_objects/itemOverrideValues.py b/opensteak/tools/opensteak/foreman_objects/itemOverrideValues.py
new file mode 100644
index 0000000..936185e
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/itemOverrideValues.py
@@ -0,0 +1,61 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+
+from opensteak.foreman_objects.item import ForemanItem
+from pprint import pprint as pp
+
+class ItemOverrideValues(ForemanItem):
+ """
+ ItemOverrideValues class
+ Represent the content of a foreman smart class parameter as a dict
+ """
+
+ objName = 'override_values'
+ payloadObj = 'override_value'
+
+ def __init__(self, api, key, parentName, parentKey, *args, **kwargs):
+ """ Function __init__
+ Represent the content of a foreman object as a dict
+
+ @param api: The foreman api
+ @param key: The object Key
+ @param parentName: The object parent name (eg: smart_class_parameter)
+ @param parentKey: The object parent key
+ @param *args, **kwargs: the dict representation
+ @return RETURN: Itself
+ """
+ self.parentName = parentName
+ self.parentKey = parentKey
+ ForemanItem.__init__(self, api, key,
+ self.objName, self.payloadObj,
+ *args, **kwargs)
+
+ def __setitem__(self, key, attributes):
+ """ Function __setitem__
+ Set a parameter of a foreman object as a dict
+
+ @param key: The key to modify
+ @param attribute: The data
+ @return RETURN: The API result
+ """
+ payload = {self.payloadObj: {key: attributes}}
+ return self.api.set('{}/{}/{}'.format(self.parentName,
+ self.parentKey,
+ self.objName),
+ self.key, payload)
diff --git a/opensteak/tools/opensteak/foreman_objects/itemSmartClassParameter.py b/opensteak/tools/opensteak/foreman_objects/itemSmartClassParameter.py
new file mode 100644
index 0000000..2d7ca2a
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/itemSmartClassParameter.py
@@ -0,0 +1,62 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+
+from opensteak.foreman_objects.item import ForemanItem
+from opensteak.foreman_objects.itemOverrideValues import ItemOverrideValues
+
+
+class ItemSmartClassParameter(ForemanItem):
+ """
+ ItemSmartClassParameter class
+ Represent the content of a foreman smart class parameter as a dict
+ """
+
+ objName = 'smart_class_parameters'
+ payloadObj = 'smart_class_parameter'
+
+ def __init__(self, api, key, *args, **kwargs):
+ """ Function __init__
+ Represent the content of a foreman object as a dict
+
+ @param api: The foreman api
+ @param key: The object Key
+ @param *args, **kwargs: the dict representation
+ @return RETURN: Itself
+ """
+ ForemanItem.__init__(self, api, key,
+ self.objName, self.payloadObj,
+ *args, **kwargs)
+ self.update({'override_values':
+ list(map(lambda x: ItemOverrideValues(self.api,
+ x['id'],
+ self.objName,
+ key,
+ x),
+ self['override_values']))})
+
+ def __setitem__(self, key, attributes):
+ """ Function __setitem__
+ Set a parameter of a foreman object as a dict
+
+ @param key: The key to modify
+ @param attribute: The data
+ @return RETURN: The API result
+ """
+ payload = {self.payloadObj: {key: attributes}}
+ return self.api.set(self.objName, self.key, payload)
diff --git a/opensteak/tools/opensteak/foreman_objects/objects.py b/opensteak/tools/opensteak/foreman_objects/objects.py
new file mode 100644
index 0000000..c20c5a1
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/objects.py
@@ -0,0 +1,136 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.item import ForemanItem
+
+
+class ForemanObjects:
+ """
+ ForemanObjects class
+ Parent class for Foreman Objects
+ """
+
+ def __init__(self, api, objName=None, payloadObj=None):
+ """ Function __init__
+ Init the foreman object
+
+ @param api: The foreman API
+ @param objName: The object name (linked with the Foreman API)
+ @param payloadObj: The object name inside the payload (in general
+ the singular of objName)
+ @return RETURN: Itself
+ """
+
+ self.api = api
+ if objName:
+ self.objName = objName
+ if payloadObj:
+ self.payloadObj = payloadObj
+ # For asynchronous creations
+ self.async = False
+
+ def __iter__(self):
+ """ Function __iter__
+
+ @return RETURN: The iteration of objects list
+ """
+ return iter(self.list())
+
+ def __getitem__(self, key):
+ """ Function __getitem__
+
+ @param key: The targeted object
+ @return RETURN: A ForemanItem
+ """
+ return ForemanItem(self.api,
+ key,
+ self.objName,
+ self.payloadObj,
+ self.api.get(self.objName, key))
+
+ def __setitem__(self, key, attributes):
+ """ Function __setitem__
+
+ @param key: The targeted object
+ @param attributes: The attributes to apply to the object
+ @return RETURN: API result if the object was not present, or False
+ """
+ if key not in self:
+ payload = {self.payloadObj: {'name': key}}
+ payload[self.payloadObj].update(attributes)
+ return self.api.create(self.objName, payload, async=self.async)
+ return False
+
+ def __delitem__(self, key):
+ """ Function __delitem__
+
+ @return RETURN: API result
+ """
+ return self.api.delete(self.objName, key)
+
+ def __contains__(self, key):
+ """ Function __contains__
+
+ @param key: The targeted object
+ @return RETURN: True if the object exists
+ """
+ return bool(key in self.listName().keys())
+
+ def getId(self, key):
+ """ Function getId
+ Get the id of an object
+
+ @param key: The targeted object
+ @return RETURN: The ID
+ """
+ return self.api.get_id_by_name(self.objName, key)
+
+ def list(self, limit=20):
+ """ Function list
+ Get the list of all objects
+
+ @param key: The targeted object
+ @param limit: The limit of items to return
+ @return RETURN: A ForemanItem list
+ """
+ return list(map(lambda x:
+ ForemanItem(self.api, x['id'],
+ self.objName, self.payloadObj,
+ x),
+ self.api.list(self.objName, limit=limit)))
+
+ def listName(self):
+ """ Function listName
+ Get the list of all objects name with Ids
+
+ @param key: The targeted object
+ @return RETURN: A dict of obejct name:id
+ """
+ return self.api.list(self.objName, limit=999999, only_id=True)
+
+ def checkAndCreate(self, key, payload):
+ """ Function checkAndCreate
+ Check if an object exists and create it if not
+
+ @param key: The targeted object
+ @param payload: The targeted object description
+ @return RETURN: The id of the object
+ """
+ if key not in self:
+ self[key] = payload
+ return self[key]['id']
diff --git a/opensteak/tools/opensteak/foreman_objects/operatingsystems.py b/opensteak/tools/opensteak/foreman_objects/operatingsystems.py
new file mode 100644
index 0000000..8cce606
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/operatingsystems.py
@@ -0,0 +1,66 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.objects import ForemanObjects
+from opensteak.foreman_objects.item import ForemanItem
+
+
+class OperatingSystems(ForemanObjects):
+ """
+ OperatingSystems class
+ """
+ objName = 'operatingsystems'
+ payloadObj = 'operatingsystem'
+
+ def __getitem__(self, key):
+ """ Function __getitem__
+
+ @param key: The operating system id/name
+ @return RETURN: The item
+ """
+ ret = self.api.list(self.objName,
+ filter='title = "{}"'.format(key))
+ if len(ret):
+ return ForemanItem(self.api, key,
+ self.objName, self.payloadObj,
+ ret[0])
+ else:
+ return None
+
+ def __setitem__(self, key, attributes):
+ """ Function __getitem__
+
+ @param key: The operating system id/name
+ @param attributes: The content of the operating system to create
+ @return RETURN: The API result
+ """
+ if key not in self:
+ payload = {self.payloadObj: {'title': key}}
+ payload[self.payloadObj].update(attributes)
+ return self.api.create(self.objName, payload)
+ return False
+
+ def listName(self):
+ """ Function listName
+ Get the list of all objects name with Ids
+
+ @param key: The targeted object
+ @return RETURN: A dict of obejct name:id
+ """
+ return { x['title']: x['id'] for x in self.api.list(self.objName,
+ limit=999999)}
diff --git a/opensteak/tools/opensteak/foreman_objects/puppetClasses.py b/opensteak/tools/opensteak/foreman_objects/puppetClasses.py
new file mode 100644
index 0000000..7f397f2
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/puppetClasses.py
@@ -0,0 +1,46 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.objects import ForemanObjects
+from opensteak.foreman_objects.item import ForemanItem
+from pprint import pprint as pp
+
+
+class PuppetClasses(ForemanObjects):
+ """
+ OperatingSystems class
+ """
+ objName = 'puppetclasses'
+ payloadObj = 'puppetclass'
+
+ def list(self, limit=20):
+ """ Function list
+ Get the list of all objects
+
+ @param key: The targeted object
+ @param limit: The limit of items to return
+ @return RETURN: A ForemanItem list
+ """
+ puppetClassList = list()
+ for v in self.api.list(self.objName, limit=limit).values():
+ puppetClassList.extend(v)
+ return list(map(lambda x:
+ ForemanItem(self.api, x['id'],
+ self.objName, self.payloadObj,
+ x),
+ puppetClassList))
diff --git a/opensteak/tools/opensteak/foreman_objects/smart_proxies.py b/opensteak/tools/opensteak/foreman_objects/smart_proxies.py
new file mode 100644
index 0000000..2d6518b
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/smart_proxies.py
@@ -0,0 +1,36 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.objects import ForemanObjects
+
+
+class SmartProxies(ForemanObjects):
+ """
+ Domain class
+ """
+ objName = 'smart_proxies'
+ payloadObj = 'smart_proxy'
+
+ def importPuppetClasses(self, smartProxyId):
+ """ Function importPuppetClasses
+ Force the reload of puppet classes
+
+ @param smartProxyId: smartProxy Id
+ @return RETURN: the API result
+ """
+ return self.api.create('smart_proxies/{}/import_puppetclasses'.format(smartProxyId), '{}')
diff --git a/opensteak/tools/opensteak/foreman_objects/subnets.py b/opensteak/tools/opensteak/foreman_objects/subnets.py
new file mode 100644
index 0000000..b1cac54
--- /dev/null
+++ b/opensteak/tools/opensteak/foreman_objects/subnets.py
@@ -0,0 +1,67 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+from opensteak.foreman_objects.objects import ForemanObjects
+
+
+class Subnets(ForemanObjects):
+ """
+ Subnets class
+ """
+ objName = 'subnets'
+ payloadObj = 'subnet'
+
+ def checkAndCreate(self, key, payload, domainId):
+ """ Function checkAndCreate
+ Check if a subnet exists and create it if not
+
+ @param key: The targeted subnet
+ @param payload: The targeted subnet description
+ @param domainId: The domainId to be attached wiuth the subnet
+ @return RETURN: The id of the subnet
+ """
+ if key not in self:
+ self[key] = payload
+ oid = self[key]['id']
+ if not oid:
+ return False
+ #~ Ensure subnet contains the domain
+ subnetDomainIds = []
+ for domain in self[key]['domains']:
+ subnetDomainIds.append(domain['id'])
+ if domainId not in subnetDomainIds:
+ subnetDomainIds.append(domainId)
+ self[key]["domain_ids"] = subnetDomainIds
+ if len(self[key]["domains"]) is not len(subnetDomainIds):
+ return False
+ return oid
+
+ def removeDomain(self, subnetId, domainId):
+ """ Function removeDomain
+ Delete a domain from a subnet
+
+ @param subnetId: The subnet Id
+ @param domainId: The domainId to be attached wiuth the subnet
+ @return RETURN: boolean
+ """
+ subnetDomainIds = []
+ for domain in self[subnetId]['domains']:
+ subnetDomainIds.append(domain['id'])
+ subnetDomainIds.remove(domainId)
+ self[subnetId]["domain_ids"] = subnetDomainIds
+ return len(self[subnetId]["domains"]) is len(subnetDomainIds)
diff --git a/opensteak/tools/opensteak/printer.py b/opensteak/tools/opensteak/printer.py
new file mode 100644
index 0000000..98c5af5
--- /dev/null
+++ b/opensteak/tools/opensteak/printer.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+import sys
+
+
+class OpenSteakPrinter:
+ """ Just a nice message printer """
+ HEADER = '\033[95m'
+ OKBLUE = '\033[94m'
+ OKGREEN = '\033[92m'
+ WARNING = '\033[93m'
+ FAIL = '\033[91m'
+ ENDC = '\033[0m'
+ BOLD = '\033[1m'
+ UNDERLINE = '\033[4m'
+
+ TABSIZE = 4
+
+ def header(self, msg):
+ """ Function header
+ Print a header for a block
+
+ @param msg: The message to print in the header (limited to 78 chars)
+ @return RETURN: None
+ """
+ print("""
+#
+# {}
+#
+""".format(msg[0:78]))
+
+ def config(self, msg, name, value=None, indent=0):
+ """ Function config
+ Print a line with the value of a parameter
+
+ @param msg: The message to print in the header (limited to 78 chars)
+ @param name: The name of the prameter
+ @param value: The value of the parameter
+ @param indent: Tab size at the beginning of the line
+ @return RETURN: None
+ """
+ ind = ' ' * indent * self.TABSIZE
+ if value is None:
+ print('{} - {} = {}'.format(ind, msg, name))
+ elif value is False:
+ print('{} [{}KO{}] {} > {} (NOT found)'.
+ format(ind, self.FAIL, self.ENDC, msg, name))
+ else:
+ print('{} [{}OK{}] {} > {} = {}'.
+ format(ind, self.OKGREEN, self.ENDC, msg, name, str(value)))
+
+ def list(self, msg, indent=0):
+ """ Function list
+ Print a list item
+
+ @param msg: The message to print in the header (limited to 78 chars)
+ @param indent: Tab size at the beginning of the line
+ @return RETURN: None
+ """
+ print(' ' * indent * self.TABSIZE, '-', msg)
+
+ def list_id(self, dic, indent=0):
+ """ Function list_id
+ Print a list of dict items
+
+ @param dic: The dict to print
+ @param indent: Tab size at the beginning of the line
+ @return RETURN: None
+ """
+ for (k, v) in dic.items():
+ self.list("{}: {}".format(k, v), indent=indent)
+
+ def status(self, res, msg, failed="", eol="\n", quit=True, indent=0):
+ """ Function status
+ Print status message
+ - OK/KO if the result is a boolean
+ - Else the result text
+
+ @param res: The status to show
+ @param msg: The message to show
+ @param eol: End of line
+ @param quit: Exit the system in case of failure
+ @param indent: Tab size at the beginning of the line
+ @return RETURN: None
+ """
+ ind = ' ' * indent * self.TABSIZE
+ if res is True:
+ msg = '{} [{}OK{}] {}'.format(ind, self.OKGREEN, self.ENDC, msg)
+ elif res:
+ msg = '{} [{}{}{}] {}'.format(ind, self.OKBLUE, res,
+ self.ENDC, msg)
+ else:
+ msg = '{} [{}KO{}] {}'.format(ind, self.FAIL, self.ENDC, msg)
+ if failed:
+ msg += '\n > {}'.format(failed)
+ msg = msg.ljust(140) + eol
+ sys.stdout.write(msg)
+ if res is False and quit is True:
+ sys.exit(0)
+
+ def ask_validation(self, prompt=None, resp=False):
+ """ Function ask_validation
+ Ask a validation message
+
+ @param prompt: The question to ask ('Continue ?') if None
+ @param resp: The default value (Default is False)
+ @return RETURN: Trie or False
+ """
+ if prompt is None:
+ prompt = 'Continue ?'
+ if resp:
+ prompt += ' [{}Y{}/n]: '.format(self.BOLD, self.ENDC)
+ else:
+ prompt += ' [y/{}N{}]: '.format(self.BOLD, self.ENDC)
+ while True:
+ ans = input(prompt)
+ if not ans:
+ ans = 'y' if resp else 'n'
+ if ans not in ['y', 'Y', 'n', 'N']:
+ print('please enter y or n.')
+ continue
+ if ans == 'y' or ans == 'Y':
+ return True
+ if ans == 'n' or ans == 'N':
+ sys.exit(0)
diff --git a/opensteak/tools/opensteak/templateparser.py b/opensteak/tools/opensteak/templateparser.py
new file mode 100644
index 0000000..720f008
--- /dev/null
+++ b/opensteak/tools/opensteak/templateparser.py
@@ -0,0 +1,34 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+"""
+Template parser
+"""
+
+from string import Template
+
+class OpenSteakTemplateParser:
+
+ def __init__(self, filein, fileout, dictionary):
+ """
+ Parse the files with the dictionary
+ """
+ fin = open(filein)
+ fout = open(fileout,'w')
+ template = Template(fin.read())
+ fout.write(template.substitute(dictionary))
diff --git a/opensteak/tools/opensteak/virsh.py b/opensteak/tools/opensteak/virsh.py
new file mode 100644
index 0000000..594b842
--- /dev/null
+++ b/opensteak/tools/opensteak/virsh.py
@@ -0,0 +1,174 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+"""
+Virsh library
+"""
+
+import subprocess
+import os
+
+class OpenSteakVirsh:
+
+ virsh = "/usr/bin/virsh"
+ genisoimage = "/usr/bin/genisoimage"
+ environment = ""
+
+ ###
+ # INIT
+ ###
+ def __init__(self):
+ self.environment = dict(os.environ) # Copy current environment
+ self.environment['LANG'] = 'en_US.UTF-8'
+
+
+ ###
+ # VOLUMES
+ ###
+ def volumeList(self, pool="default"):
+ """
+ Return all volumes from a pool
+ """
+ p = subprocess.Popen([self.virsh, "-q", "vol-list", pool], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ # Split lines
+ lines = stdout.splitlines()
+
+ # Foreach line, split with space and construct a dictionnary
+ newLines = {}
+ for line in lines:
+ name, path = line.split(maxsplit=1)
+ newLines[name.strip()] = path.strip()
+
+ return newLines
+
+ def volumeDelete(self, path):
+ """
+ Delete a volume
+ """
+ p = subprocess.Popen([self.virsh, "-q", "vol-delete", path], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ return {"stdout":stdout, "stderr":stderr}
+
+ def volumeClone(self, origin, name, pool="default"):
+ """
+ Clone a volume
+ """
+ p = subprocess.Popen([self.virsh, "-q", "vol-clone", "--pool", pool, origin, name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ return {"stdout":stdout, "stderr":stderr}
+
+ def volumeResize(self, name, size, pool="default"):
+ """
+ Resize a volume
+ """
+ p = subprocess.Popen([self.virsh, "-q", "vol-resize", "--pool", pool, name, size], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ return {"stdout":stdout, "stderr":stderr}
+
+ ###
+ # POOLS
+ ###
+ def poolRefresh(self, pool="default"):
+ """
+ Refresh a pool
+ """
+ p = subprocess.Popen([self.virsh, "-q", "pool-refresh", pool], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ return {"stdout":stdout, "stderr":stderr}
+
+ ###
+ # DOMAINS
+ ###
+ def domainList(self):
+ """
+ Return all domains (VM)
+ """
+ p = subprocess.Popen([self.virsh, "-q", "list", "--all"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ # Split lines
+ lines = stdout.splitlines()
+
+ # Foreach line, split with space and construct a dictionnary
+ newLines = {}
+ for line in lines:
+ id, name, status = line.split(maxsplit=2)
+ newLines[name.strip()] = status.strip()
+
+ return newLines
+
+ def domainDefine(self, xml):
+ """
+ Define a domain (create a VM)
+ """
+ p = subprocess.Popen([self.virsh, "-q", "define", xml], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ return {"stdout":stdout, "stderr":stderr}
+
+ def domainUndefine(self, name):
+ """
+ Undefine a domain (delete a VM)
+ """
+ p = subprocess.Popen([self.virsh, "-q", "undefine", name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ return {"stdout":stdout, "stderr":stderr}
+
+ def domainStart(self, name):
+ """
+ Define a domain (create a VM)
+ """
+ p = subprocess.Popen([self.virsh, "-q", "start", name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ return {"stdout":stdout, "stderr":stderr}
+
+ def domainDestroy(self, name):
+ """
+ Destroy a domain (stop a VM)
+ """
+ p = subprocess.Popen([self.virsh, "-q", "destroy", name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ return {"stdout":stdout, "stderr":stderr}
+
+ ###
+ # ISO
+ ###
+ def generateConfiguration(self, name, files):
+ """
+ Generate an ISO file
+ """
+
+ commandArray = [self.genisoimage, "-quiet", "-o", "/var/lib/libvirt/images/{0}-configuration.iso".format(name), "-volid", "cidata", "-joliet", "-rock"]
+ for k, f in files.items():
+ commandArray.append(f)
+
+ # Generate the iso file
+ p = subprocess.Popen(commandArray, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment)
+ stdout, stderr = p.communicate()
+
+ return {"stdout":stdout, "stderr":stderr}
+
diff --git a/opensteak/tools/templates_foreman/install.sh b/opensteak/tools/templates_foreman/install.sh
new file mode 100644
index 0000000..497be86
--- /dev/null
+++ b/opensteak/tools/templates_foreman/install.sh
@@ -0,0 +1,216 @@
+#!/bin/sh
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Authors:
+# @author: David Blaisonneau <david.blaisonneau@orange.com>
+# @author: Arnaud Morin <arnaud1.morin@orange.com>
+
+### Set vars
+NAME="${name}"
+DOMAIN="${domain}"
+DATEE=$$(date +%F-%Hh%M)
+IP="${ip}"
+MASK="${netmaskshort}"
+NET="${network}"
+DHCP_RANGE="${dhcprange}"
+REVERSE_DNS="${reversedns}"
+DNS_FORWARDER="${dns}"
+ADMIN="${admin}"
+PASSWORD="${password}"
+
+### Set correct env
+#dpkg-reconfigure locales
+export LC_CTYPE=en_US.UTF-8
+export LANG=en_US.UTF-8
+unset LC_ALL
+umask 0022
+
+### Check hostname is on the public interface
+echo "* Ensure hostname point to external IP"
+# Remove useless lines
+perl -i -pe 's/^127.0.1.1.*\n$$//' /etc/hosts
+perl -i -pe "s/^$${IP}.*\n$$//" /etc/hosts
+# Append a line
+echo "$${IP} $${NAME}.$${DOMAIN} $${NAME}" >> /etc/hosts
+
+### Dependencies
+echo "* Install dependencies"
+apt-get -y install ca-certificates wget git isc-dhcp-server
+
+### Set AppArmor
+echo "* Set App armor"
+cat /etc/apparmor.d/local/usr.sbin.dhcpd | grep '/etc/bind/rndc.key r,' >/dev/null
+if [ $$? -eq 1 ] ; then
+ echo "/etc/bind/rndc.key r," >> /etc/apparmor.d/local/usr.sbin.dhcpd
+fi
+
+### Prepare repos
+echo "* Enable Puppet labs repo"
+if [ "Z" = "Z$$(dpkg -l |grep 'ii puppetlabs-release')" ] ; then
+ wget https://apt.puppetlabs.com/puppetlabs-release-trusty.deb
+ dpkg -i puppetlabs-release-trusty.deb
+ apt-get update
+fi
+
+# Install puppetmaster
+echo "* Install puppetmaster"
+if [ "Z" = "Z$$(dpkg -l |grep 'ii puppetmaster')" ] ; then
+ apt-get -y install puppetmaster
+fi
+
+# Enable the Foreman repo
+echo "* Enable Foreman repo"
+if [ ! -e /etc/apt/sources.list.d/foreman.list ] ; then
+ echo "deb http://deb.theforeman.org/ trusty 1.8" > /etc/apt/sources.list.d/foreman.list
+ echo "deb http://deb.theforeman.org/ plugins 1.8" >> /etc/apt/sources.list.d/foreman.list
+ wget -q http://deb.theforeman.org/pubkey.gpg -O- | apt-key add -
+ apt-get update
+fi
+
+### Install Foreman
+echo "* Install foreman-installer"
+if [ "Z" = "Z$$(dpkg -l |grep 'ii foreman-installer')" ] ; then
+ apt-get -y install foreman-installer
+fi
+if [ "Z" = "Z$$(gem list --local |grep rubyipmi)" ] ; then
+ gem install -q rubyipmi
+fi
+
+### Execute foreman installer
+echo "* Execute foreman installer"
+
+foreman-installer \
+ --foreman-admin-username="$$ADMIN" \
+ --foreman-admin-password="$$PASSWORD" \
+ --enable-foreman-plugin-templates \
+ --enable-foreman-plugin-discovery \
+ --foreman-plugin-discovery-install-images=true \
+ --enable-foreman-compute-libvirt
+
+
+foreman-installer \
+ --foreman-admin-username="$$ADMIN" \
+ --foreman-admin-password="$$PASSWORD" \
+ --enable-foreman-plugin-templates \
+ --enable-foreman-plugin-discovery \
+ --foreman-plugin-discovery-install-images=true \
+ --enable-foreman-compute-libvirt \
+ --enable-foreman-proxy \
+ --foreman-proxy-bmc=true \
+ --foreman-proxy-tftp=true \
+ --foreman-proxy-tftp-servername="$$IP" \
+ --foreman-proxy-dhcp=true \
+ --foreman-proxy-dhcp-interface="eth0" \
+ --foreman-proxy-dhcp-gateway="$$IP" \
+ --foreman-proxy-dhcp-range="$$DHCP_RANGE" \
+ --foreman-proxy-dhcp-nameservers="$$IP" \
+ --foreman-proxy-dns=true \
+ --foreman-proxy-dns-interface="eth0" \
+ --foreman-proxy-dns-zone="$$DOMAIN" \
+ --foreman-proxy-dns-reverse="$$REVERSE_DNS" \
+ --foreman-proxy-dns-forwarders="$$DNS_FORWARDER" \
+ --foreman-proxy-foreman-base-url="https://localhost"
+
+### Sync community templates for last ubuntu versions
+
+echo "* Sync community templates for last ubuntu versions"
+foreman-rake templates:sync
+
+### Get and install OpenSteak files
+
+echo "* Get OpenSteak repos"
+if [ -d /usr/local/opensteak ] ; then
+ cd /usr/local/opensteak
+ git pull
+else
+ cd /usr/local/
+ git clone https://github.com/Orange-OpenSource/opnfv.git -b foreman opensteak
+fi
+cd /usr/local/opensteak/infra/puppet_master
+
+echo "* Set puppet auth"
+echo "*.$$DOMAIN" > /etc/puppet/autosign.conf
+if [ -e /etc/puppet/auth.conf ] ; then
+ # Make a backup
+ mv /etc/puppet/auth.conf /etc/puppet/auth.conf.$$DATEE
+fi
+cp etc/puppet/auth.conf /etc/puppet/auth.conf
+perl -i -pe "s/__NET__/$$NET/" /etc/puppet/auth.conf
+perl -i -pe "s/__MASK__/$$MASK/" /etc/puppet/auth.conf
+
+# Set Hiera Conf
+echo "* Push Hiera conf into /etc/puppet/"
+if [ -e /etc/puppet/hiera.yaml ] ; then
+ # Make a backup
+ mv /etc/puppet/hiera.yaml /etc/puppet/hiera.yaml.$$DATEE
+fi
+cp etc/puppet/hiera.yaml /etc/puppet/hiera.yaml
+if [ -e /etc/hiera.yaml ] ; then
+ rm /etc/hiera.yaml
+fi
+ln -s /etc/puppet/hiera.yaml /etc/hiera.yaml
+cp -rf etc/puppet/hieradata /etc/puppet/
+rename s/DOMAIN/$$DOMAIN/ /etc/puppet/hieradata/production/nodes/*.yaml
+cp etc/puppet/manifests/site.pp /etc/puppet/manifests/site.pp
+cp ../config/common.yaml /etc/puppet/hieradata/production/common.yaml
+chgrp puppet /etc/puppet/hieradata/production/*.yaml
+
+# Install and config r10k
+echo "* Install and setup r10k"
+if [ "Z" = "Z$$(gem list --local |grep r10k)" ] ; then
+ gem install -q r10k
+fi
+if [ -e /etc/r10k.yaml ] ; then
+ # Make a backup
+ mv /etc/r10k.yaml /etc/r10k.yaml.$$DATEE
+fi
+cp etc/r10k.yaml /etc/r10k.yaml
+
+# Install opensteak-r10k-update script
+echo "* Install opensteak-r10k-update script into /usr/local/bin"
+cp usr/local/bin/opensteak-r10k-update /usr/local/bin/opensteak-r10k-update
+chmod +x /usr/local/bin/opensteak-r10k-update
+
+echo "* Run R10k. You can re-run r10k by calling:"
+echo " opensteak-r10k-update"
+opensteak-r10k-update
+
+#### Install VIM puppet
+echo "* Install VIM puppet"
+if [ ! -d ~/.vim/autoload ] ; then
+ mkdir -p ~/.vim/autoload
+fi
+if [ ! -d ~/.vim/bundle ] ; then
+ mkdir -p ~/.vim/bundle
+fi
+curl -LSso ~/.vim/autoload/pathogen.vim https://tpo.pe/pathogen.vim
+cat <<EOF > ~/.vimrc
+execute pathogen#infect()
+syntax on
+filetype plugin indent on
+EOF
+cd ~/.vim/bundle
+if [ ! -d vim-puppet ] ; then
+ git clone https://github.com/rodjek/vim-puppet.git > /dev/null
+fi
+
+### Gen SSH key for foreman
+echo "* SSH Key"
+cp /mnt/id_rsa /usr/share/foreman/.ssh/
+cp /mnt/id_rsa.pub /usr/share/foreman/.ssh/
+chown foreman:foreman /usr/share/foreman/.ssh/ -R
+
+### Run puppet
+puppet agent -t -v
+
diff --git a/opensteak/tools/templates_foreman/kvm-config b/opensteak/tools/templates_foreman/kvm-config
new file mode 100644
index 0000000..7e3d65d
--- /dev/null
+++ b/opensteak/tools/templates_foreman/kvm-config
@@ -0,0 +1,65 @@
+<domain type='kvm'>
+ <name>${name}</name>
+ <memory>${ram}</memory>
+ <currentMemory>${ram}</currentMemory>
+ <vcpu>${cpu}</vcpu>
+ <os>
+ <type arch='x86_64'>hvm</type>
+ <!-- uncomment to enable PXE boot
+ <boot dev='network'/>
+ -->
+ <boot dev='hd'/>
+ </os>
+ <features>
+ <acpi/><apic/><pae/>
+ </features>
+ <clock offset="utc"/>
+ <on_poweroff>preserve</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>/usr/bin/qemu-system-x86_64</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2'/>
+ <source file='/var/lib/libvirt/images/${name}'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='raw'/>
+ <source file='/var/lib/libvirt/images/${name}-configuration.iso'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
+ <input type='mouse' bus='ps2'/>
+ <!-- uncomment to allow virsh console
+ <console type='pty'/>
+ <!- - end -->
+ <!-- uncomment to allow console to a log file -->
+ <serial type='file'>
+ <source path='/var/log/libvirt/qemu/${name}-serial.log'/>
+ <target port='0'/>
+ <alias name='serial0'/>
+ </serial>
+ <serial type='pty'>
+ <source path='/dev/pts/1'/>
+ <target port='1'/>
+ <alias name='serial1'/>
+ </serial>
+ <console type='file'>
+ <source path='/var/log/libvirt/qemu/${name}-serial.log'/>
+ <target type='serial' port='0'/>
+ <alias name='serial0'/>
+ </console>
+ <!-- end -->
+ <graphics type='spice' port='-1' autoport='no'/>
+ <video>
+ <model type='qxl' ram='65536' vram='65536' heads='1'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+ </video>
+ <memballoon model='virtio'/>
+ <interface type='bridge'>
+ <source bridge='${bridge}'/>
+ ${bridgeconfig}
+ <model type='virtio'/>
+ </interface>
+ </devices>
+</domain>
diff --git a/opensteak/tools/templates_foreman/meta-data b/opensteak/tools/templates_foreman/meta-data
new file mode 100644
index 0000000..b4cb9b6
--- /dev/null
+++ b/opensteak/tools/templates_foreman/meta-data
@@ -0,0 +1,12 @@
+instance-id: ${name};
+network-interfaces: |
+ auto lo
+ iface lo inet loopback
+ auto eth0
+ iface eth0 inet static
+ address ${ip}
+ netmask ${netmaskshort}
+ gateway ${gateway}
+ dns-nameservers ${dns}
+ dns-search ${domain}
+local-hostname: ${name}
diff --git a/opensteak/tools/templates_foreman/user-data b/opensteak/tools/templates_foreman/user-data
new file mode 100644
index 0000000..281b5d4
--- /dev/null
+++ b/opensteak/tools/templates_foreman/user-data
@@ -0,0 +1,25 @@
+#cloud-config
+#############################################
+# OPENSTEAK VM '${name}'
+#############################################
+password: ${password}
+chpasswd: { expire: False }
+ssh_pwauth: True
+dsmode: net
+hostname: ${name}
+#############################################
+# FIRST BOOT COMMAND
+# - reload main interface
+# - install puppet from puppetlabs
+# - remove cloud-init
+#############################################
+runcmd:
+ - [ sh, -c, "mount /dev/vdb /mnt"]
+ - [ sh, -c, "sudo bash /mnt/install.sh"]
+# This is the id_rsa.sansmotdepasse key
+ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDY15cdBmIs2XOpe4EiFCsaY6bmUmK/GysMoLl4UG51JCfJwvwoWCoA+6mDIbymZxhxq9IGxilp/yTA6WQ9s/5pBag1cUMJmFuda9PjOkXl04jgqh5tR6I+GZ97AvCg93KAECis5ubSqw1xOCj4utfEUtPoF1OuzqM/lE5mY4N6VKXn+fT7pCD6cifBEs6JHhVNvs5OLLp/tO8Pa3kKYQOdyS0xc3rh+t2lrzvKUSWGZbX+dLiFiEpjsUL3tDqzkEMNUn4pdv69OJuzWHCxRWPfdrY9Wg0j3mJesP29EBht+w+EC9/kBKq+1VKdmsXUXAcjEvjovVL8l1BrX3BY0R8D sansmotdepasse
+#############################################
+# FINAL MESSAGE AT END OF BOOT
+#############################################
+final_message: "The system '${name}' is finally up, after $$UPTIME seconds"