From 5cc4c1455698767349e88022449c0c00d0e61239 Mon Sep 17 00:00:00 2001 From: ChristopherPrice Date: Fri, 5 Jun 2015 15:49:13 +0200 Subject: Corrected the "support" e-mail address. Will need to be cherry picked and merged to arno stable then updated on the release pages once approved and generated. JIRA: BGS-64 Change-Id: Ie6c48fb863f726e5d2331e53a5b0764f9fb479b4 Signed-off-by: ChristopherPrice --- common/docs/user-guide.rst | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/common/docs/user-guide.rst b/common/docs/user-guide.rst index 8e02224..ea03f2c 100644 --- a/common/docs/user-guide.rst +++ b/common/docs/user-guide.rst @@ -21,14 +21,13 @@ Version history | **Date** | **Ver.** | **Author** | **Comment** | | | | | | +--------------------+--------------------+--------------------+--------------------+ -| 2015-05-28 | 0.0.1 | Christopher Price | Initial version | +| 2015-06-04 | 1.0.0 | Christopher Price | Initial revision | | | | (Ericsson AB) | | +--------------------+--------------------+--------------------+--------------------+ -| 2015-06-02 | 0.0.2 | Christopher Price | Minor Updates | -| | | (Ericsson AB) | | +| 2015-06-05 | 1.0.1 | Christopher Price | Corrected user | +| | | (Ericsson AB) | e-mail address | +--------------------+--------------------+--------------------+--------------------+ - .. contents:: Table of Contents :backlinks: none @@ -117,7 +116,7 @@ You can engage with the community to help us improve and further develop the OPN - To access Jira for issue reporting or improvement proposals head to: https://jira.opnfv.org/ - To get started helping out developing the platform head to: https://wiki.opnfv.org/developer -Alternatively if you are intending to invest your time as a user of the platform you can ask questions and request help from our mailing list at: mailto://support@opnfv.org +Alternatively if you are intending to invest your time as a user of the platform you can ask questions and request help from our mailing list at: mailto://opnfv-users@lists.opnfv.org License ======= @@ -149,7 +148,7 @@ Fuel `Fuel User Guide `_ :Authors: Christopher Price (christopher.price@ericsson.com) -:Version: 0.0.2 +:Version: 1.0.1 **Documentation tracking** -- cgit 1.2.3-korg From f5b30fa73a637c93d764467336d9721060dbe15a Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Mon, 8 Jun 2015 13:11:56 -0400 Subject: Fixes external network bridge and adds quota limits Issue where external network router port was being placed on br-int. Port will now be properly added to br-ex. Patch also includes increased quota limits for neutron. JIRA: BGS-65 Change-Id: I30de85e4ef241b567a90011d08c0cb4ad97fe411 Signed-off-by: Tim Rozet --- common/puppet-opnfv/manifests/controller_networker.pp | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/common/puppet-opnfv/manifests/controller_networker.pp b/common/puppet-opnfv/manifests/controller_networker.pp index 157bc8f..b148ec8 100644 --- a/common/puppet-opnfv/manifests/controller_networker.pp +++ b/common/puppet-opnfv/manifests/controller_networker.pp @@ -302,6 +302,7 @@ class opnfv::controller_networker { class { "quickstack::pacemaker::neutron": agent_type => $this_agent, enable_tunneling => 'true', + external_network_bridge => 'br-ex', ml2_mechanism_drivers => $ml2_mech_drivers, ml2_network_vlan_ranges => ["physnet1:10:50"], odl_controller_ip => $odl_control_ip, @@ -309,6 +310,18 @@ class opnfv::controller_networker { ovs_tunnel_iface => $ovs_tunnel_if, ovs_tunnel_types => ["vxlan"], verbose => 'true', + neutron_conf_additional_params => { default_quota => 'default', + quota_network => '50', + quota_subnet => '50', + quota_port => 'default', + quota_security_group => '50', + quota_security_group_rule => 'default', + quota_vip => 'default', + quota_pool => 'default', + quota_router => '50', + quota_floatingip => '100', + network_auto_schedule => 'default', + }, } if ($external_network_flag != '') and str2bool($external_network_flag) { -- cgit 1.2.3-korg From e89926d7e35f58abb1a3766e07c9a9cf3b5d50de Mon Sep 17 00:00:00 2001 From: ChristopherPrice Date: Tue, 9 Jun 2015 22:17:57 +0200 Subject: Corrected links associated with release docs. To be updated along with the previous patch for the doc and cherry picked to stable/arno. JIRA: BGS-64 Change-Id: Ifb5a5d115559097d361aeca83d9b6a0807bbf3d7 Signed-off-by: ChristopherPrice --- common/docs/user-guide.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/common/docs/user-guide.rst b/common/docs/user-guide.rst index ea03f2c..08b2767 100644 --- a/common/docs/user-guide.rst +++ b/common/docs/user-guide.rst @@ -24,7 +24,7 @@ Version history | 2015-06-04 | 1.0.0 | Christopher Price | Initial revision | | | | (Ericsson AB) | | +--------------------+--------------------+--------------------+--------------------+ -| 2015-06-05 | 1.0.1 | Christopher Price | Corrected user | +| 2015-06-05 | 1.0.1 | Christopher Price | Corrected links & | | | | (Ericsson AB) | e-mail address | +--------------------+--------------------+--------------------+--------------------+ @@ -59,16 +59,16 @@ Hardware Requirements The Arno release of OPNFV is intended to be run as a baremetal deployment on a "Pharos compliant" lab infrastructure. The Pharos project in OPNFV is a community activity to provide guidance and establish requirements on hardware platforms supporting the Arno virtualisation platform. -Prior to deploying the OPNFV platform it is important that the hardware infrastructure be configured according to the Pharos specification: http://artifacts.opnfv.org/pharos/docs/spec.html +Prior to deploying the OPNFV platform it is important that the hardware infrastructure be configured according to the Pharos specification: https://www.opnfv.org/sites/opnfv/files/release/pharos-spec.arno.2015.1.0.pdf Arno Platform Deployment ------------------------ The Arno platform supports installation and deployment using two deployment tools; a Foreman based deployment toolchain and a Fuel based deployment toolchain. -In order to deploy the Arno release on a Pharos compliant lab using the Foreman deployment toolchain you should follow in the Foreman installation guide: http://artifacts.opnfv.org/genesis/foreman/docs/installation-instructions.html +In order to deploy the Arno release on a Pharos compliant lab using the Foreman deployment toolchain you should follow in the Foreman installation guide: https://www.opnfv.org/sites/opnfv/files/release/foreman_install-guide.arno.2015.1.0.pdf -In order to deploy the Arno release on a Pharos compliant lab using the Fuel deployment toolchain you should follow in the Fuel installation guide: http://artifacts.opnfv.org/genesis/fuel/docs/installation-instructions.html +In order to deploy the Arno release on a Pharos compliant lab using the Fuel deployment toolchain you should follow in the Fuel installation guide: https://www.opnfv.org/sites/opnfv/files/release/install-guide.arno.2015.1.0.pdf Enabling or disabling OpenDaylight and the native Neutron driver ---------------------------------------------------------------- @@ -78,7 +78,7 @@ You may find that you wish to adjust the system by enabling or disabling the nat Deployment Validation --------------------- -Once installed you should validate the deployment completed successfully by executing the automated basic platform validation routines outlined in the Arno testing documentation: http://artifacts.opnfv.org/functest/docs/functest.html +Once installed you should validate the deployment completed successfully by executing the automated basic platform validation routines outlined in the Arno testing documentation: https://www.opnfv.org/sites/opnfv/files/release/functest.arno.2015.1.0.pdf Operating the Arno platform =========================== -- cgit 1.2.3-korg From eb887812da568cfb4908f6ae14449b2ceaeb5bc0 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Fri, 12 Jun 2015 13:08:42 -0400 Subject: Adding in support for Lithium container. Both will be present on the controller nodes and can be started/switches on and off at will. Networking scripts are not updated - will be deprecated in favour of plugin method of enabling / setting up VXLAN / OVSDB integration pulling from the latest nightly artifact at time of commit - will work on automated pulling each night to see if that makes sense. Change-Id: Ie911cdf61cd97a99b975c30c55c664daf70eb3ee JIRA: 0 Signed-off-by: Dan Smith --- .../templates/Lithium_rc0/dockerfile/Dockerfile | 82 ++++++++ .../dockerfile/container_scripts/check_feature.sh | 18 ++ .../dockerfile/container_scripts/speak.sh | 20 ++ .../start_odl_docker_container.sh | 48 +++++ fuel/build/Makefile | 2 + fuel/build/f_lith_odl_docker/Makefile | 52 +++++ fuel/build/f_lith_odl_docker/dockerfile/Dockerfile | 72 +++++++ .../dockerfile/container_scripts/check_feature.sh | 8 + .../dockerfile/container_scripts/speak.sh | 17 ++ .../container_scripts/start_odl_docker.sh | 38 ++++ .../modules/opnfv/manifests/odl_lith_docker.pp | 81 ++++++++ .../Lithium_rc0/container_scripts/check_feature.sh | 18 ++ .../Lithium_rc0/container_scripts/speak.sh | 20 ++ .../start_odl_docker_container.sh | 48 +++++ .../puppet/modules/opnfv/scripts/change.sh | 219 +++++++++++++++++++++ .../puppet/modules/opnfv/scripts/config_net_odl.sh | 192 ++++++++++++++++++ .../puppet/modules/opnfv/scripts/stage_odl.sh | 54 +++++ .../modules/opnfv/scripts/start_odl_container.sh | 95 +++++++++ .../f_lith_odl_docker/scripts/config_net_odl.sh | 164 +++++++++++++++ .../scripts/config_neutron_for_odl.sh | 146 ++++++++++++++ .../f_lith_odl_docker/scripts/prep_nets_for_odl.sh | 90 +++++++++ .../f_lith_odl_docker/scripts/setup_ovs_for_odl.sh | 23 +++ .../puppet/modules/opnfv/manifests/init.pp | 2 + 23 files changed, 1509 insertions(+) create mode 100644 common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/Dockerfile create mode 100644 common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/check_feature.sh create mode 100644 common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/speak.sh create mode 100644 common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/start_odl_docker_container.sh create mode 100755 fuel/build/f_lith_odl_docker/Makefile create mode 100755 fuel/build/f_lith_odl_docker/dockerfile/Dockerfile create mode 100755 fuel/build/f_lith_odl_docker/dockerfile/container_scripts/check_feature.sh create mode 100755 fuel/build/f_lith_odl_docker/dockerfile/container_scripts/speak.sh create mode 100755 fuel/build/f_lith_odl_docker/dockerfile/container_scripts/start_odl_docker.sh create mode 100644 fuel/build/f_lith_odl_docker/puppet/modules/opnfv/manifests/odl_lith_docker.pp create mode 100644 fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/check_feature.sh create mode 100644 fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/speak.sh create mode 100644 fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/start_odl_docker_container.sh create mode 100644 fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/change.sh create mode 100755 fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh create mode 100755 fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh create mode 100755 fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh create mode 100644 fuel/build/f_lith_odl_docker/scripts/config_net_odl.sh create mode 100644 fuel/build/f_lith_odl_docker/scripts/config_neutron_for_odl.sh create mode 100755 fuel/build/f_lith_odl_docker/scripts/prep_nets_for_odl.sh create mode 100644 fuel/build/f_lith_odl_docker/scripts/setup_ovs_for_odl.sh diff --git a/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/Dockerfile b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/Dockerfile new file mode 100644 index 0000000..6d7535d --- /dev/null +++ b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/Dockerfile @@ -0,0 +1,82 @@ +#################################################################### +# Copyright (c) 2015 Ericsson AB and others. +# daniel.smith@ericsson.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +# +# DOCKER FILE FOR LITHIUM ODL RC0 Testing +# +############################################################################# + + +#Set the base image - note: the current release of Karaf is using Jdk7 and alot of 12.04, so we will use it rather than 14.04 and backport a ton of stuff +FROM ubuntu:12.04 + +# Maintainer Info +MAINTAINER Daniel Smith + + +#Run apt-get update one start just to check for updates when building +RUN echo "Updating APT" +RUN apt-get update +RUN echo "Adding wget" +RUN apt-get install -y wget +RUN apt-get install -y net-tools +RUN apt-get install -y openjdk-7-jre +RUN apt-get install -y openjdk-7-jdk +RUN apt-get install -y openssh-server +RUN apt-get install -y vim +RUN apt-get install -y expect +RUN apt-get install -y daemontools +RUN mkdir -p /opt/odl_source/lithium +RUN bash -c 'echo "export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64" >> ~/.bashrc' + + + +#Now lets got and fetch the ODL distribution +RUN echo "Fetching Lithium Rc0" +RUN wget https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/org/opendaylight/integration/distribution-karaf/0.3.0-SNAPSHOT/distribution-karaf-0.3.0-20150612.144348-2492.tar.gz -O /opt/odl_source/lithium/distribution-karaf-0.3.0-Lithium-RC0.tar.gz + +RUN echo "Untarring ODL inplace" +RUN mkdir -p /opt/odl/lithium +RUN tar zxvf /opt/odl_source/lithium/distribution-karaf-0.3.0-Lithium-RC0.tar.gz -C /opt/odl/lithium + +RUN echo "Installing DLUX and other features into ODL" +#COPY dockerfile/container_scripts/start_odl_docker.sh /etc/init.d/start_odl_docker.sh +COPY container_scripts/start_odl_docker_container.sh /etc/init.d/ +COPY container_scripts/speak.sh /etc/init.d/ +#COPY dockerfile/container_scripts/speak.sh /etc/init.d/speak.sh +RUN chmod 777 /etc/init.d/start_odl_docker_container.sh +RUN chmod 777 /etc/init.d/speak.sh + + + +# Expose the ports + +# PORTS FOR BASE SYSTEM AND DLUX +EXPOSE 8101 +EXPOSE 6633 +EXPOSE 1099 +EXPOSE 43506 +EXPOSE 8181 +EXPOSE 8185 +EXPOSE 9000 +EXPOSE 39378 +EXPOSE 33714 +EXPOSE 44444 +EXPOSE 6653 + +# PORTS FOR OVSDB AND ODL CONTROL +EXPOSE 12001 +EXPOSE 6640 +EXPOSE 8080 +EXPOSE 7800 +EXPOSE 55130 +EXPOSE 52150 +EXPOSE 36826 + +# set the ENTRYPOINT - An entry point allows us to run this container as an exectuable +CMD ["/etc/init.d/start_odl_docker_container.sh"] diff --git a/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/check_feature.sh b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/check_feature.sh new file mode 100644 index 0000000..04d7b53 --- /dev/null +++ b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/check_feature.sh @@ -0,0 +1,18 @@ +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# daniel.smith@ericsson.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +#!/usr/bin/expect +spawn /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/client +expect "root>" +send "feature:list | grep -i odl-restconf\r" +send "\r\r\r" +expect "root>" +send "logout\r" + + diff --git a/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/speak.sh b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/speak.sh new file mode 100644 index 0000000..a7d0e6c --- /dev/null +++ b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/speak.sh @@ -0,0 +1,20 @@ +#!/usr/bin/expect +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# daniel.smith@ericsson.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +# +# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB +# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY +################################################################################# + +spawn /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/client +expect "root>" +send "feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core" +send "\r\r\r" +expect "root>" +send "logout\r" diff --git a/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/start_odl_docker_container.sh b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/start_odl_docker_container.sh new file mode 100644 index 0000000..96a40ec --- /dev/null +++ b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/start_odl_docker_container.sh @@ -0,0 +1,48 @@ +#!/bin/bash +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# daniel.smith@ericsson.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +# +# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB +# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY +################################################################################# +# Start up script for calling karaf / ODL inside a docker container. +# +# This script will also call a couple expect scripts to load the feature set that we want + + +#ENV +export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 + +#MAIN +echo "Starting up the da Sheilds..." +/opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/karaf server & +echo "Sleeping 5 bad hack" +sleep 10 +echo "should see stuff listening now" +netstat -na +echo " should see proess running for karaf" +ps -efa +echo " Starting the packages we want" +/etc/init.d/speak.sh +echo "Printout the status - if its right, you should see 8181 appear now" +netstat -na +ps -efa + + + +## This is a loop that keeps our container going currently, prinout the "status of karaf" to the docker logs every minute +## Cheap - but effective +while true; +do + echo "Checking status of ODL:" + /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/status + sleep 60 +done + + diff --git a/fuel/build/Makefile b/fuel/build/Makefile index 5f63120..6c98ed9 100644 --- a/fuel/build/Makefile +++ b/fuel/build/Makefile @@ -43,6 +43,7 @@ SUBDIRS += f_l23network SUBDIRS += f_resolvconf SUBDIRS += f_ntp SUBDIRS += f_odl_docker +SUBDIRS += f_lith_odl_docker #SUBDIRS += f_odl # f_example is only an example of how to generate a .deb package and @@ -64,6 +65,7 @@ all: @echo "cache.mk" $(shell md5sum $(BUILD_BASE)/cache.mk | cut -f1 -d " ") >> $(VERSION_FILE) @echo "config.mk" $(shell md5sum $(BUILD_BASE)/config.mk | cut -f1 -d " ") >> $(VERSION_FILE) $(MAKE) -C f_odl_docker -f Makefile all + $(MAKE) -C f_lith_odl_docker -f Makefile all @make -C docker @docker/runcontext $(DOCKERIMG) $(MAKE) $(MAKEFLAGS) iso diff --git a/fuel/build/f_lith_odl_docker/Makefile b/fuel/build/f_lith_odl_docker/Makefile new file mode 100755 index 0000000..e89da94 --- /dev/null +++ b/fuel/build/f_lith_odl_docker/Makefile @@ -0,0 +1,52 @@ +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# stefan.k.berg@ericsson.com +# jonas.bjurel@ericsson.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +TOP := $(shell pwd) +BUILDTAG := robust_stefan +RELEASE := Lithium_rc0 + +# Edit this to match the GENESIS / OPNFV in your environment +export OPNFV_PUPPET := $(BUILD_BASE)/../../common/puppet-opnfv +include ../config.mk + +.PHONY: all +all: + @mkdir -p puppet/modules/opnfv/odl_docker/${RELEASE} + @rm -rf tmp + @mkdir -p tmp + @cp -Rvp ${OPNFV_PUPPET}/manifests/templates/${RELEASE}/dockerfile tmp/. + @docker build -t ${BUILDTAG} tmp/dockerfile/. + @docker save ${BUILDTAG} > puppet/modules/opnfv/odl_docker/${RELEASE}/odl_docker_image.tar + @wget ${DOCKER_REPO}/${DOCKER_TAG} -O puppet/modules/opnfv/odl_docker/${RELEASE}/docker-latest + @echo "OPFNV_PUPPET is: ${OPNFV_PUPPET}" + @cp -Rvp ${OPNFV_PUPPET}/manifests/templates/${RELEASE}/dockerfile/container_scripts puppet/modules/opnfv/odl_docker/${RELEASE}/. + +.PHONY: clean +clean: + @rm -rf tmp + @rm -rf release + +.PHONY: build-clean +build-clean: + @rm -rf tmp + @rm -rf release + @rm -rf puppet/modules/opnfv/odl_docker/${RELEASE}/odl_docker_image.tar + @rm -rf puppet/modules/opnfv/odl_docker/${RELEASE}/docker-latest + +.PHONY: validate-cache +validate-cache: + @echo "No cache validation schema available for $(shell pwd)" + @echo "Continuing ..." + +.PHONY: release +release: + # Fetch PP from OPNFV Common + @cp -Rvp ${OPNFV_PUPPET}/manifests/odl_docker.pp ${PUPPET_DEST} + @cp -Rvp puppet/modules/* $(PUPPET_DEST) diff --git a/fuel/build/f_lith_odl_docker/dockerfile/Dockerfile b/fuel/build/f_lith_odl_docker/dockerfile/Dockerfile new file mode 100755 index 0000000..e3c7ee5 --- /dev/null +++ b/fuel/build/f_lith_odl_docker/dockerfile/Dockerfile @@ -0,0 +1,72 @@ +#################################################################### +# +# Dockerfile to build a ODL (Karaf) Docker Container +# +# Copyright daniel.smith@ericsson.com +# License: Apache GPL +# +#################################################################### + + +#Set the base image - note: the current release of Karaf is using Jdk7 and alot of 12.04, so we will use it rather than 14.04 and backport a ton of stuff +FROM ubuntu:12.04 + +# Maintainer Info +MAINTAINER Daniel Smith + +#Run apt-get update one start just to check for updates when building +RUN echo "Updating APT" +RUN apt-get update +RUN echo "Adding wget" +RUN apt-get install -y wget +RUN apt-get install -y net-tools +RUN apt-get install -y openjdk-7-jre +RUN apt-get install -y openjdk-7-jdk +RUN apt-get install -y openssh-server +RUN apt-get install -y vim +RUN apt-get install -y expect +RUN apt-get install -y daemontools +RUN mkdir -p /opt/odl_source +RUN bash -c 'echo "export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64" >> ~/.bashrc' + + +#Now lets got and fetch the ODL distribution +RUN echo "Fetching ODL" +RUN wget https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.3-Helium-SR3/distribution-karaf-0.2.3-Helium-SR3.tar.gz -O /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz + +RUN echo "Untarring ODL inplace" +RUN mkdir -p /opt/odl +RUN tar zxvf /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz -C /opt/odl + +RUN echo "Installing DLUX and other features into ODL" +COPY tmp/dockerfile/container_scripts/start_odl_docker.sh /etc/init.d/start_odl_docker.sh +COPY tmp/dockerfile/container_scripts/speak.sh /etc/init.d/speak.sh +RUN chmod 777 /etc/init.d/start_odl_docker.sh +RUN chmod 777 /etc/init.d/speak.sh + + +# Expose the ports +# PORTS FOR BASE SYSTEM AND DLUX +EXPOSE 8101 +EXPOSE 6633 +EXPOSE 1099 +EXPOSE 43506 +EXPOSE 8181 +EXPOSE 8185 +EXPOSE 9000 +EXPOSE 39378 +EXPOSE 33714 +EXPOSE 44444 +EXPOSE 6653 + +# PORTS FOR OVSDB AND ODL CONTROL +EXPOSE 12001 +EXPOSE 6640 +EXPOSE 8080 +EXPOSE 7800 +EXPOSE 55130 +EXPOSE 52150 +EXPOSE 36826 + +# set the ENTRYPOINT - An entry point allows us to run this container as an exectuable +CMD ["/etc/init.d/start_odl_docker.sh"] diff --git a/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/check_feature.sh b/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/check_feature.sh new file mode 100755 index 0000000..3e5d0b2 --- /dev/null +++ b/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/check_feature.sh @@ -0,0 +1,8 @@ +#!/usr/bin/expect +spawn /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/client +expect "root>" +send "feature:list | grep -i odl-restconf\r" +send "\r\r\r" +expect "root>" +send "logout\r" + diff --git a/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/speak.sh b/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/speak.sh new file mode 100755 index 0000000..3ba07a8 --- /dev/null +++ b/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/speak.sh @@ -0,0 +1,17 @@ +#!/usr/bin/expect +# Ericsson Research Canada +# +# Author: Daniel Smith +# +# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB +# +# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY +# DEPRECATED AFTER ARNO + +spawn /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/client +expect "root>" +send "feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core" +send "\r\r\r" +expect "root>" +send "logout\r" + diff --git a/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/start_odl_docker.sh b/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/start_odl_docker.sh new file mode 100755 index 0000000..1c72dda --- /dev/null +++ b/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/start_odl_docker.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# Ericsson Research Canada +# +# Author: Daniel Smith +# +# Start up script for calling karaf / ODL inside a docker container. +# +# This script will also call a couple expect scripts to load the feature set that we want + + +#ENV +export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 + +#MAIN +echo "Starting up the da Sheilds..." +/opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/karaf server & +echo "Sleeping 5 bad hack" +sleep 10 +echo "should see stuff listening now" +netstat -na +echo " should see proess running for karaf" +ps -efa +echo " Starting the packages we want" +/etc/init.d/speak.sh +echo "Printout the status - if its right, you should see 8181 appear now" +netstat -na +ps -efa + + + +## This is a loop that keeps our container going currently, prinout the "status of karaf" to the docker logs every minute +## Cheap - but effective +while true; +do + echo "Checking status of ODL:" + /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/status + sleep 60 +done diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/manifests/odl_lith_docker.pp b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/manifests/odl_lith_docker.pp new file mode 100644 index 0000000..cd243ef --- /dev/null +++ b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/manifests/odl_lith_docker.pp @@ -0,0 +1,81 @@ +class opnfv::odl_lith_docker +{ + case $::fuel_settings['role'] { + /controller/: { + + file { '/opt': + ensure => 'directory', + } + + file { '/opt/opnfv': + ensure => 'directory', + owner => 'root', + group => 'root', + mode => 777, + } + + file { '/opt/opnfv/odl': + ensure => 'directory', + } + + file { '/opt/opnfv/odl/lithium': + ensure => 'directory', + } + + file { '/opt/opnfv/odl/lithium/odl_docker_image.tar': + ensure => present, + source => '/etc/puppet/modules/opnfv/odl_docker/Lithium_rc0/odl_docker_image.tar', + mode => 750, + } + + file { '/opt/opnfv/odl/lithium/docker-latest': + ensure => present, + source => '/etc/puppet/modules/opnfv/odl_docker/Lithium_rc0/docker-latest', + mode => 750, + } + + file { '/opt/opnfv/odl/start_odl_conatiner.sh': + ensure => present, + source => '/etc/puppet/modules/opnfv/scripts/start_odl_container.sh', + mode => 750, + } + file { '/opt/opnfv/odl/stage_odl.sh': + ensure => present, + source => '/etc/puppet/modules/opnfv/scripts/stage_odl.sh', + mode => 750, + } + file { '/opt/opnfv/odl/config_net_odl.sh': + ensure => present, + source => '/etc/puppet/modules/opnfv/scripts/config_net_odl.sh', + mode => 750, + } + file { '/opt/opnfv/odl/change.sh': + ensure => present, + source => '/etc/puppet/modules/opnfv/scripts/change.sh', + mode => 750, + } + + + # fix failed to find the cgroup root issue + # https://github.com/docker/docker/issues/8791 + case $::operatingsystem { + 'ubuntu': { + package {'cgroup-lite': + ensure => present, + } + + service {'cgroup-lite': + ensure => running, + enable => true, + require => Package['cgroup-lite'], + } + } + 'centos': { + package {'docker-io': + ensure => latest, + } + } + } + } + } +} diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/check_feature.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/check_feature.sh new file mode 100644 index 0000000..04d7b53 --- /dev/null +++ b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/check_feature.sh @@ -0,0 +1,18 @@ +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# daniel.smith@ericsson.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +#!/usr/bin/expect +spawn /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/client +expect "root>" +send "feature:list | grep -i odl-restconf\r" +send "\r\r\r" +expect "root>" +send "logout\r" + + diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/speak.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/speak.sh new file mode 100644 index 0000000..a7d0e6c --- /dev/null +++ b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/speak.sh @@ -0,0 +1,20 @@ +#!/usr/bin/expect +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# daniel.smith@ericsson.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +# +# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB +# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY +################################################################################# + +spawn /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/client +expect "root>" +send "feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core" +send "\r\r\r" +expect "root>" +send "logout\r" diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/start_odl_docker_container.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/start_odl_docker_container.sh new file mode 100644 index 0000000..96a40ec --- /dev/null +++ b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/start_odl_docker_container.sh @@ -0,0 +1,48 @@ +#!/bin/bash +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# daniel.smith@ericsson.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +# +# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB +# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY +################################################################################# +# Start up script for calling karaf / ODL inside a docker container. +# +# This script will also call a couple expect scripts to load the feature set that we want + + +#ENV +export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 + +#MAIN +echo "Starting up the da Sheilds..." +/opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/karaf server & +echo "Sleeping 5 bad hack" +sleep 10 +echo "should see stuff listening now" +netstat -na +echo " should see proess running for karaf" +ps -efa +echo " Starting the packages we want" +/etc/init.d/speak.sh +echo "Printout the status - if its right, you should see 8181 appear now" +netstat -na +ps -efa + + + +## This is a loop that keeps our container going currently, prinout the "status of karaf" to the docker logs every minute +## Cheap - but effective +while true; +do + echo "Checking status of ODL:" + /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/status + sleep 60 +done + + diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/change.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/change.sh new file mode 100644 index 0000000..f7f3d6e --- /dev/null +++ b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/change.sh @@ -0,0 +1,219 @@ +#!/bin/bash +# script to remove bridges and reset networking for ODL + + +#VARS +MODE=0 +DNS=8.8.8.8 + +#ENV +source ~/openrc + +# GET IPS for that node +function get_ips { + BR_MGMT=`grep address /etc/network/ifcfg_backup/ifcfg-br-mgmt | awk -F" " '{print $2}'` + BR_STORAGE=`grep address /etc/network/ifcfg_backup/ifcfg-br-storage | awk -F" " '{print $2}'` + BR_FW_ADMIN=`grep address /etc/network/ifcfg_backup/ifcfg-br-fw-admin | awk -F" " '{print $2}'` + BR_EX=`grep address /etc/network/ifcfg_backup/ifcfg-br-ex | awk -F" " '{print $2}'` + DEF_NETMASK=255.255.255.0 + DEF_GW=172.30.9.1 +} + +function backup_ifcfg { + echo " backing up " + mkdir -p /etc/network/ifcfg_backup + mv /etc/network/interfaces.d/ifcfg-br-ex /etc/network/ifcfg_backup/. + mv /etc/network/interfaces.d/ifcfg-br-fw-admin /etc/network/ifcfg_backup/. + mv /etc/network/interfaces.d/ifcfg-br-mgmt /etc/network/ifcfg_backup/. + mv /etc/network/interfaces.d/ifcfg-br-storage /etc/network/ifcfg_backup/. + mv /etc/network/interfaces.d/ifcfg-br-prv /etc/network/ifcfg_backup/. + mv /etc/network/interfaces.d/ifcfg-eth0 /etc/network/ifcfg_backup/. + mv /etc/network/interfaces.d/ifcfg-eth1 /etc/network/ifcfg_backup/. + rm -rf /etc/network/interfaces.d/ifcfg-eth1.300 + rm -rf /etc/network/interfaces.d/ifcfg-eth1.301 + rm -rf /etc/network/interfaces.d/ifcfg-eth1 + rm -rf /etc/network/interfaces.d/ifcfg-eth0 + +} + + +function create_ifcfg_br_mgmt { + echo "migrating br_mgmt" + echo "auto eth1.300" >> /etc/network/interfaces.d/ifcfg-eth1.300 + echo "iface eth1.300 inet static" >> /etc/network/interfaces.d/ifcfg-eth1.300 + echo " address $BR_MGMT" >> /etc/network/interfaces.d/ifcfg-eth1.300 + echo " netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1.300 +} + +function create_ifcfg_br_storage { + echo "migration br_storage" + echo "auto eth1.301" >> /etc/network/interfaces.d/ifcfg-eth1.301 + echo "iface eth1.301 inet static" >> /etc/network/interfaces.d/ifcfg-eth1.301 + echo " address $BR_STORAGE" >> /etc/network/interfaces.d/ifcfg-eth1.301 + echo " netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1.301 +} + +function create_ifcfg_br_fw_admin { + echo " migratinng br_fw_admin" + echo "auto eth1" >> /etc/network/interfaces.d/ifcfg-eth1 + echo "iface eth1 inet static" >> /etc/network/interfaces.d/ifcfg-eth1 + echo " address $BR_FW_ADMIN" >> /etc/network/interfaces.d/ifcfg-eth1 + echo " netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1 +} + +function create_ifcfg_eth0 { + echo "migratinng br-ex to eth0 - temporarily" + echo "auto eth0" >> /etc/network/interfaces.d/ifcfg-eth0 + echo "iface eth0 inet static" >> /etc/network/interfaces.d/ifcfg-eth0 + echo " address $BR_EX" >> /etc/network/interfaces.d/ifcfg-eth0 + echo " netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth0 + echo " gateway $DEF_GW" >> /etc/network/interfaces.d/ifcfg-eth0 +} + +function set_mode { + if [ -d "/var/lib/glance/images" ] + then + echo " controller " + MODE=0 + else + echo " compute " + MODE=1 + fi +} + + +function stop_ovs { + echo "Stopping OpenVSwitch" + service openvswitch-switch stop + +} + +function start_ovs { + echo "Starting OVS" + service openvswitch-switch start + ovs-vsctl show +} + + +function clean_ovs { + echo "cleaning OVS DB" + stop_ovs + rm -rf /var/log/openvswitch/* + mkdir -p /opt/opnfv/odl/ovs_back + cp -pr /etc/openvswitch/* /opt/opnfv/odl/ovs_back/. + rm -rf /etc/openvswitch/conf.db + echo "restarting OVS - you should see Nothing there" + start_ovs +} + + + +function reboot_me { + reboot +} + +function allow_challenge { + sed -i -e 's/ChallengeResponseAuthentication no/ChallengeResponseAuthentication yes/g' /etc/ssh/sshd_config + service ssh restart +} + +function clean_neutron { + subnets=( `neutron subnet-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` ) + networks=( `neutron net-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` ) + ports=( `neutron port-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` ) + routers=( `neutron router-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` ) + + #display all elements + echo "SUBNETS: ${subnets[@]} " + echo "NETWORKS: ${networks[@]} " + echo "PORTS: ${ports[@]} " + echo "ROUTERS: ${routers[@]} " + + + # get port and subnet for each router + for i in "${routers[@]}" + do + routerport=( `neutron router-port-list $i | awk -F" " '{print $2}' | grep -v id | sed '/^$/d' `) + routersnet=( `neutron router-port-list $i | awk -F" " '{print $8}' | grep -v fixed | sed '/^$/d' | sed 's/,$//' | sed -e 's/^"//' -e 's/"$//' `) + done + + echo "ROUTER PORTS: ${routerport[@]} " + echo "ROUTER SUBNET: ${routersnet[@]} " + + #remove router subnets + echo "router-interface-delete" + for i in "${routersnet[@]}" + do + neutron router-interface-delete ${routers[0]} $i + done + + #remove subnets + echo "subnet-delete" + for i in "${subnets[@]}" + do + neutron subnet-delete $i + done + + #remove nets + echo "net-delete" + for i in "${networks[@]}" + do + neutron net-delete $i + done + + #remove routers + echo "router-delete" + for i in "${routers[@]}" + do + neutron router-delete $i + done + + #remove ports + echo "port-delete" + for i in "${ports[@]}" + do + neutron port-delete $i + done + + #remove subnets + echo "subnet-delete second pass" + for i in "${subnets[@]}" + do + neutron subnet-delete $i + done + +} + +function set_dns { + sed -i -e 's/nameserver 10.20.0.2/nameserver $DNS/g' /etc/resolv.conf +} + + +#OUTPUT + +function check { + echo $BR_MGMT + echo $BR_STORAGE + echo $BR_FW_ADMIN + echo $BR_EX +} + +### MAIN + + +set_mode +backup_ifcfg +get_ips +create_ifcfg_br_mgmt +create_ifcfg_br_storage +create_ifcfg_br_fw_admin +if [ $MODE == "0" ] +then + create_ifcfg_eth0 +fi +allow_challenge +clean_ovs +check +reboot_me + + diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh new file mode 100755 index 0000000..145da80 --- /dev/null +++ b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh @@ -0,0 +1,192 @@ +#!/bin/bash +# +# Author: Daniel Smith (Ericsson) +# +# Script to update neutron configuration for OVSDB/ODL integratino +# +# Usage - Set / pass CONTROL_HOST to your needs +# +### SET THIS VALUE TO MATCH YOUR SYSTEM +CONTROL_HOST=192.168.0.2 +BR_EX_IP=172.30.9.70 + +# ENV +source ~/openrc +# VARS +ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini +MODE=0 + + +# FUNCTIONS +# Update ml2_conf.ini +function update_ml2conf { + echo "Backing up and modifying ml2_conf.ini" + cp $ML2_CONF $ML2_CONF.bak + sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF + sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF + sed -i -e 's/bridge_mappings=physnet2:br-prv/bridge_mappings=physnet1:br-ex/g' $ML2_CONF + echo "[ml2_odl]" >> $ML2_CONF + echo "password = admin" >> $ML2_CONF + echo "username = admin" >> $ML2_CONF + echo "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF +} + +function reset_neutrondb { + echo "Reseting DB" + mysql -e "drop database if exists neutron_ml2;" + mysql -e "create database neutron_ml2 character set utf8;" + mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';" + neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head +} + +function restart_neutron { + echo "Restarting Neutron Server" + service neutron-server restart + echo "Should see Neutron runing now" + service neutron-server status + echo "Shouldnt be any nets, but should work (return empty)" + neutron net-list +} + +function stop_neutron { + echo "Stopping Neutron / OVS components" + service neutron-plugin-openvswitch-agent stop + if [ $MODE == "0" ] + then + service neutron-server stop + fi +} + +function disable_agent { + echo "Disabling Neutron Plugin Agents from running" + service neutron-plugin-openvswitch-agent stop + echo 'manual' > /etc/init/neutron-plugin-openvswitch-agent.override +} + + + +function verify_ML2_working { + echo "checking that we can talk via ML2 properly" + curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2 + if grep "network" /tmp/check_ml2 + then + echo "Success - ML2 to ODL is working" + else + echo "im sorry Jim, but its dead" + fi + +} + + +function set_mode { + if [ -d "/var/lib/glance/images" ] + then + echo "Controller Mode" + MODE=0 + else + echo "Compute Mode" + MODE=1 + fi +} + +function stop_ovs { + echo "Stopping OpenVSwitch" + service openvswitch-switch stop + +} + +function start_ovs { + echo "Starting OVS" + service openvswitch-vswitch start + ovs-vsctl show +} + + +function control_setup { + echo "Modifying Controller" + stop_neutron + stop_ovs + disable_agent + rm -rf /var/log/openvswitch/* + mkdir -p /opt/opnfv/odl/ovs_back + mv /etc/openvswitch/conf.db /opt/opnfv/odl/ovs_back/. + mv /etc/openvswitch/.conf*lock* /opt/opnfv/odl/ovs_back/. + rm -rf /etc/openvswitch/conf.db + rm -rf /etc/openvswitch/.conf* + service openvswitch-switch start + ovs-vsctl add-br br-ex + ovs-vsctl add-port br-ex eth0 + ovs-vsctl set interface br-ex type=external + ifconfig br-ex 172.30.9.70/24 up + service neutron-server restart + + echo "setting up networks" + ip link add link eth1 name br-mgmt type vlan id 300 + ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24 up arp + ip link add link eth1 name br-storage type vlan id 301 + ip link add link eth1 name br-prv type vlan id 1000 + ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24 up arp + ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-admin | awk -F" " '{print $2}'`/24 up arp + + echo "Setting ODL Manager IP" + ovs-vsctl set-manager tcp:192.168.0.2:6640 + + echo "Verifying ODL ML2 plugin is working" + verify_ML2_working + + # BAD HACK - Should be parameterized - this is to catch up + route add default gw 172.30.9.1 + +} + +function clean_ovs { + echo "cleaning OVS DB" + stop_ovs + rm -rf /var/log/openvswitch/* + mkdir -p /opt/opnfv/odl/ovs_back + cp -pr /etc/openvswitch/* /opt/opnfv/odl/ovs_back/. + rm -rf /etc/openvswitch/conf.db + echo "restarting OVS - you should see Nothing there" + start_ovs +} + +function compute_setup { + echo "Modifying Compute" + echo "Disabling neutron openvswitch plugin" + stop_neutron + disable_agent + ip link add link eth1 name br-mgmt type vlan id 300 + ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24 up arp + ip link add link eth1 name br-storage type vlan id 301 + ip link add link eth1 name br-prv type vlan id 1000 + ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24 up arp + ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-admin | awk -F" " '{print $2}'`/24 up arp + + echo "set manager, and route for ODL controller" + ovs-vsctl set-manager tcp:192.168.0.2:6640 + route add 172.17.0.1 gw 192.168.0.2 + verify_ML2_working +} + + +# MAIN +echo "Starting to make call" +update_ml2conf +echo "Check Mode" +set_mode + +if [ $MODE == "0" ]; +then + echo "Calling control setup" + control_setup +elif [ $MODE == "1" ]; +then + echo "Calling compute setup" + compute_setup + +else + echo "Something is bad - call for help" + exit +fi + + diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh new file mode 100755 index 0000000..fa14b47 --- /dev/null +++ b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# Author: Daniel Smith (Ericsson) +# Stages ODL Controlleer +# Inputs: odl_docker_image.tar +# Usage: ./stage_odl.sh + +# ENVS +source ~/.bashrc +source ~/openrc + +LOCALPATH=/opt/opnfv/odl +DOCKERBIN=docker-latest +ODLIMGNAME=odl_docker_image.tar +DNS=8.8.8.8 +HOST_IP=`ifconfig br-ex | grep -i "inet addr" | awk -F":" '{print $2}' | awk -F" " '{print $1}'` + + + +# DEBUG ECHOS +echo $LOCALPATH +echo $DOCKERBIN +echo $ODLIMGNAME +echo $DNS +echo $HOST_IP + + +# Set DNS to someting external and default GW - ODL requires a connection to the internet +sed -i -e 's/nameserver 10.20.0.2/nameserver 8.8.8.8/g' /etc/resolv.conf +route delete default gw 10.20.0.2 +route add default gw 172.30.9.1 + +# Start Docker daemon and in background +echo "Starting Docker" +chmod +x $LOCALPATH/$DOCKERBIN +$LOCALPATH/$DOCKERBIN -d & +#courtesy sleep for virtual env +sleep 2 + +# Import the ODL Container +echo "Importing ODL Container" +$LOCALPATH/$DOCKERBIN load -i $LOCALPATH/$ODLIMGNAME + +# Start ODL, load DLUX and OVSDB modules +echo "Removing any old install found - file not found is ok here" +$LOCALPATH/$DOCKERBIN rm odl_docker +echo "Starting up ODL controller in Daemon mode - no shell possible" +$LOCALPATH/$DOCKERBIN run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -d -t loving_daniel + +# Following, you should see the docker ps listed and a port opened +echo " you should reach ODL controller at http://HOST_IP:8181/dlux/index.html" +$LOCALPATH/$DOCKERBINNAME ps -a +netstat -lnt + + diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh new file mode 100755 index 0000000..347ac74 --- /dev/null +++ b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh @@ -0,0 +1,95 @@ +#!/bin/bash +# Ericsson Canada Inc. +# Authoer: Daniel Smith +# +# A helper script to install and setup the ODL docker conatiner on the controller +# +# +# Inputs: odl_docker_image.tar +# +# Usage: ./start_odl_docker.sh +echo "DEPRECATED - USE stage_odl.sh instead - this will be removed shortly once automated deployment is working - SR1" + + +# ENVS +source ~/.bashrc +source ~/openrc + +# VARS + +# Switch for Dev mode - uses apt-get on control to cheat and get docker installed locally rather than from puppet source + +DEV=1 + +# Switch for 1:1 port mapping of EXPOSED ports in Docker to the host, if set to 0, then random ports will be used - NOTE: this doesnt work for all web services X port on Host --> Y port in Container, +# especially for SSL/HTTPS cases. Be aware. + +MATCH_PORT=1 + +LOCALPATH=/opt/opnfv/odl +DOCKERBINNAME=docker-latest +DOCKERIMAGENAME=odl_docker_image.tar +DNS=8.8.8.8 +HOST_IP=`ifconfig br-fw-admin | grep -i "inet addr" | awk -F":" '{print $2}' | awk -F" " '{print $1}'` + + +# Set this to "1" if you want to have your docker container startup into a shell + + +ENABLE_SHELL=1 + + +echo " Fetching Docker " +if [ "$DEV" -eq "1" ]; +# If testing Locally (on a control node) you can set DEV=1 to enable apt-get based install on the control node (not desired target, but good for testing). +then + echo "Dev Mode - Fetching from Internet"; + echo " this wont work in production builds"; + apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 + mkdir -p $LOCALPATH + wget https://get.docker.com/builds/Linux/x86_64/docker-latest -O $LOCALPATH/$DOCKERBINNAME + wget http://ftp.us.debian.org/debian/pool/main/d/docker.io/docker.io_1.3.3~dfsg1-2_amd64.deb + chmod 777 $LOCALPATH/$DOCKERBINNAME + echo "done "; +else + echo "Using Binaries delivered from Puppet" + echo "Starting Docker in Daemon mode" + chmod +x $LOCALPATH/$DOCKERBINNAME + $LOCALPATH/$DOCKERBINNAME -d & + + # wait until docker will be fully initialized + # before any further action against just started docker + sleep 5 +fi + + +# We need to perform some cleanup of the Openstack Environment +echo "TODO -- This should be automated in the Fuel deployment at some point" +echo "However, the timing should come after basic tests are running, since this " +echo " part will remove the subnet router association that is deployed automativally" +echo " via fuel. Refer to the ODL + Openstack Integration Page " + +# Import the ODL container into docker + +echo "Importing ODL container into docker" +$LOCALPATH/$DOCKERBINNAME load -i $LOCALPATH/$DOCKERIMAGENAME + +echo " starting up ODL - DLUX and Mapping Ports" +if [ "$MATCH_PORT" -eq "1" ] +then + echo "Starting up Docker..." + $LOCALPATH/$DOCKERBINNAME rm odl_docker +fi + +if [ "$ENABLE_SHELL" -eq "1" ]; +then + echo "Starting Container in Interactive Mode (/bin/bash will be provided, you will need to run ./start_odl_docker.sh inside the container yourself)" + $LOCALPATH/$DOCKERBINNAME run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -t loving_daniel /bin/bash +else + echo "Starting Conatiner in Daemon mode - no shell will be provided and docker attach will not provide shell)" + $LOCALPATH/$DOCKERBINNAME run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -d -t loving_daniel + echo "should see the process listed here in docker ps -a" + $LOCALPATH/$DOCKERBINNAME ps -a; + echo "Match Port enabled, you can reach the DLUX login at: " + echo "http://$HOST_IP:8181/dlux.index.html" +fi diff --git a/fuel/build/f_lith_odl_docker/scripts/config_net_odl.sh b/fuel/build/f_lith_odl_docker/scripts/config_net_odl.sh new file mode 100644 index 0000000..d292acd --- /dev/null +++ b/fuel/build/f_lith_odl_docker/scripts/config_net_odl.sh @@ -0,0 +1,164 @@ +#!/bin/bash +# +# Author: Daniel Smith (Ericsson) +# +# Script to update neutron configuration for OVSDB/ODL integratino +# +# Usage - Set / pass CONTROL_HOST to your needs +# +CONTROL_HOST=172.30.9.70 + +# ENV +source ~/openrc + +# VARS +ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini +MODE=0 + + +# FUNCTIONS + +# Update ml2_conf.ini +function update_ml2conf { + echo "Backing up and modifying ml2_conf.ini" + cp $ML2_CONF $ML2_CONF.bak + sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF + sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF + cat "[ml2_odl]" >> $ML2_CONF + cat "password = admin" >> $ML2_CONF + cat "username = admin" >> $ML2_CONF + cat "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF +} + +function reset_neutrondb { + echo "Reseting DB" + mysql -e "drop database if exists neutron_ml2;" + mysql -e "create database neutron_ml2 character set utf8;" + mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';" + neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head +} + +function restart_neutron { + echo "Restarting Neutron Server" + service neutron-server restart + echo "Should see Neutron runing now" + service neutron-server status + echo "Shouldnt be any nets, but should work (return empty)" + neutron net-list +} + +function stop_neutron { + echo "Stopping Neutron / OVS components" + service neutron-plugin-openvswitch-agent stop + if [ $MODE == "0" ] + then + service neutron-server stop + fi +} + + + +function verify_ML2_working { + echo "checking that we can talk via ML2 properly" + curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2 + if grep "network" /tmp/check_ml2 + then + echo "Success - ML2 to ODL is working" + else + echo "im sorry Jim, but its dead" + fi + +} + + +function set_mode { + if ls -l /var/lib/glance/images + then + echo "Controller Mode" + MODE=0 + else + echo "Compute Mode" + MODE=1 + fi +} + +function stop_ovs { + echo "Stopping OpenVSwitch" + service openvswitch-switch stop + +} + +function control_setup { + echo "Modifying Controller" + stop_neutron + stop_ovs + rm -rf /var/log/openvswitch/* + mkdir -p /opt/opnfv/odl/ovs_back + mv /etc/openvswitch/conf.db /opt/opnfv/odl/ovs_back/. + mv /etc/openvswitch/.conf*lock* /opt/opnfv/odl/ovs_back/. + service openvswitch-switch start + ovs-vsctl set-manager tcp:172.30.9.70:6640 + ovs-vsctl add-br br-eth0 + ovs-vsctl add-br br-ex + ovs-vsctl add-port br-eth0 eth0 + ovs-vsctl add-port br-eth0 br-eth0--br-ex + ovs-vsctl add-port br-ex br-ex--br-eth0 + ovs-vsctl set interface br-ex--br-eth0 type=patch + ovs-vsctl set interface br-eth0--br-ex type=patch + ovs-vsctl set interface br-ex--br-eth0 options:peer=br-eth0--br-ex + ovs-vsctl set interface br-eth0--br-ex options:peer=br-ex--br-eth0 + ifconfig br-ex 172.30.9.70/24 up + service neutron-server restart + + echo "setting up networks" + ip link add link eth1 name br-mgmt type vlan id 300 + ip link add link eth1 name br-storage type vlan id 301 + /etc/init.d/networking restart + + + echo "Reset Neutron DB" + #reset_neutrondb + echo "Restarting Neutron Components" + #restart_neutron + echo "Verifying ODL ML2 plugin is working" + verify_ML2_working + +} + +function compute_setup { + echo "do compute stuff here" + echo "stopping neutron openvswitch plugin" + stop_neutron + ip link add link eth1 name br-mgmt type vlan id 300 + ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24 + ip link add link eth1 name br-storage type vlan id 301 + ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24 + ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-mgmt | awk -F" " '{print $2}'`/24 + echo "set manager, and route for ODL controller" + ovs-vsctl set-manager tcp:192.168.0.2:6640 + route add 172.17.0.1 gw 192.168.0.2 + verify_ML2_working +} + + +# MAIN +echo "Starting to make call" +update_ml2conf +echo "Check Mode" +set_mode + +if [ $MODE == "0" ]; +then + echo "Calling control setup" + control_setup +elif [ $MODE == "1" ]; +then + echo "Calling compute setup" + compute_setup + +else + echo "Something is bad - call for help" + exit +fi + + diff --git a/fuel/build/f_lith_odl_docker/scripts/config_neutron_for_odl.sh b/fuel/build/f_lith_odl_docker/scripts/config_neutron_for_odl.sh new file mode 100644 index 0000000..3b688ae --- /dev/null +++ b/fuel/build/f_lith_odl_docker/scripts/config_neutron_for_odl.sh @@ -0,0 +1,146 @@ +#!/bin/bash +CONTROL_HOST=172.17.0.3 + +# ENV +source ~/openrc + + + +# VARS +ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini +MODE=0 + + +# FUCNTIONS + + +# Update ml2_conf.ini +function update_ml2conf { + echo "Backing up and modifying ml2_conf.ini" + cp $ML2_CONF $ML2_CONF.bak + sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF +#!/bin/bash +CONTROL_HOST=172.17.0.3 + +# ENV +source ~/openrc + + + +# VARS +ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini +MODE=0 + + +# FUCNTIONS + + +# Update ml2_conf.ini +function update_ml2conf { + echo "Backing up and modifying ml2_conf.ini" + cp $ML2_CONF $ML2_CONF.bak + sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF + sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF + cat "[ml2_odl]" >> $ML2_CONF + cat "password = admin" >> $ML2_CONF + cat "username = admin" >> $ML2_CONF + cat "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF +} + +function reset_neutrondb { + echo "Reseting DB" + mysql -e "drop database if exists neutron_ml2;" + mysql -e "create database neutron_ml2 character set utf8;" + mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';" + neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head +} + +function restart_neutron { + echo "Restarting Neutron Server" + service neutron-server restart + echo "Should see Neutron runing now" + service neutron-server status + echo "Shouldnt be any nets, but should work (return empty)" + neutron net-list +} + +function stop_neutron { + echo "Stopping Neutron / OVS components" + service neutron-plugin-openvswitch-agent stop + if [ $MODE == "0" ] + then + service neutron-server stop + fi +} + + + +function verify_ML2_working { + echo "checking that we can talk via ML2 properly" + curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2 + if grep "network" /tmp/check_ml2 + then + echo "Success - ML2 to ODL is working" + else + echo "im sorry Jim, but its dead" + fi + +} + + +function set_mode { + if df -k | grep glance + then + echo "Controller Mode" + MODE=0 + else + echo "Compute Mode" + MODE=1 + fi +} + +function stop_ovs { + echo "Stopping OpenVSwitch" + service openvswitch-switch stop + +} + +function control_setup { + echo "do control stuff here" + echo "Reset Neutron DB" + #reset_neutrondb + echo "Restarting Neutron Components" + #restart_neutron + echo "Verifying ODL ML2 plugin is working" + verify_ML2_working + +} + +function compute_setup { + echo "do compute stuff here" + stop_neutron + verify_ML2_working +} + + +# MAIN +echo "Starting to make call" +#update_ml2conf +echo "Check Mode" +set_mode + +if [ $MODE == "0" ]; +then + echo "Calling control setup" + control_setup +elif [ $MODE == "1" ]; +then + echo "Calling compute setup" + compute_setup + +else + echo "Something is bad - call for help" + exit +fi + + diff --git a/fuel/build/f_lith_odl_docker/scripts/prep_nets_for_odl.sh b/fuel/build/f_lith_odl_docker/scripts/prep_nets_for_odl.sh new file mode 100755 index 0000000..dd4fc9f --- /dev/null +++ b/fuel/build/f_lith_odl_docker/scripts/prep_nets_for_odl.sh @@ -0,0 +1,90 @@ +#!/bin/bash +# a "cheat" way to install docker on the controller +# can only be used if you have a connecting out to the internet + +# Usage: ./install_docker.sh + +OLDGW=$1 +#!/bin/bash +# a "cheat" way to install docker on the controller +# can only be used if you have a connecting out to the internet + +# Usage: ./install_docker.sh + +OLDGW=$1 +NEWGW=$2 +IMAGEPATH=/opt/opnfv +IMAGENAME=odl_docker_image.tar +SOURCES=/etc/apt/sources.list + + +if [ "$#" -ne 2]; then + echo "Two args not provided, will not touch networking" +else + + # Fix routes + echo "Fixing routes" + #DEBUG + netstat -rn + + echo "delete old def route" + route delete default gw $1 + echo "adding new def route" + route add default gw $2 + + echo " you should see a good nslookup now" + nslookup www.google.ca +#!/bin/bash +# a "cheat" way to install docker on the controller +# can only be used if you have a connecting out to the internet + +# Usage: ./install_docker.sh + +OLDGW=$1 +NEWGW=$2 +IMAGEPATH=/opt/opnfv +IMAGENAME=odl_docker_image.tar +SOURCES=/etc/apt/sources.list + + +if [ "$#" -ne 2]; then + echo "Two args not provided, will not touch networking" +else + + # Fix routes + echo "Fixing routes" + #DEBUG + netstat -rn + + echo "delete old def route" + route delete default gw $1 + echo "adding new def route" + route add default gw $2 + + echo " you should see a good nslookup now" + nslookup www.google.ca +fi + + +if egrep "mirrors.txt" $SOURCES +then + echo "Sources was already updated, not touching" +else + echo "adding the closests mirrors and docker mirror to the mix" + echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise main restricted universe multiverse" >> /etc/apt/sources.list + echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-updates main restricted universe multiverse" >> /etc/apt/sources.list + echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-backports main restricted universe multiverse" >> /etc/apt/sources.list + echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-security main restricted universe multiverse" >> /etc/apt/sources.list + apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 + echo "deb https://get.docker.com/ubuntu docker main " > /etc/apt/sources.list.d/docker.list +fi + +echo "Updating" +apt-get update +echo "Installing Docker" +apt-get install -y lxc-docker + +echo "Loading ODL Docker Image" +docker load -i $IMAGEPATH/$IMAGENAME + + diff --git a/fuel/build/f_lith_odl_docker/scripts/setup_ovs_for_odl.sh b/fuel/build/f_lith_odl_docker/scripts/setup_ovs_for_odl.sh new file mode 100644 index 0000000..42c9451 --- /dev/null +++ b/fuel/build/f_lith_odl_docker/scripts/setup_ovs_for_odl.sh @@ -0,0 +1,23 @@ +#!/bin/bash + + + +ok .. so they created br-int + +so lets add a physical nic to it + + +# First - Removal all the bridges you find + +for i in $(ovs-vsctl list-br) +do + if [ "$i" == "br-int" ]; + then + echo "skipped br-int" + elif [ "$i" == "br-prv"]; + then + echo "skipped br-pr" + else + ovs-vsctl del-br $i + fi +done diff --git a/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp b/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp index 54f1c86..436f496 100644 --- a/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp +++ b/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp @@ -25,4 +25,6 @@ class opnfv { include opnfv::add_packages # Setup OpenDaylight include opnfv::odl_docker + # Setup OpenDaylight + include opnfv::odl_lith_docker } -- cgit 1.2.3-korg From 12019717d3be5b4cfa42751cd19cbd42f82bc04a Mon Sep 17 00:00:00 2001 From: baigk Date: Thu, 11 Jun 2015 14:13:24 +0800 Subject: Add openstack HA installer code with ansible for compass adapter patch 2: add service monitor and adjust monitor timelen BGS-25 Change-Id: I4239576b3888a0a7f6b697fe7fffffb677ef7dab Signed-off-by: baigk --- .../openstack_juno/HA-ansible-multinodes.yml | 42 + compass/deploy/ansible/openstack_juno/allinone.yml | 38 + compass/deploy/ansible/openstack_juno/compute.yml | 9 + .../deploy/ansible/openstack_juno/controller.yml | 15 + .../deploy/ansible/openstack_juno/group_vars/all | 54 + .../deploy/ansible/openstack_juno/multinodes.yml | 75 ++ compass/deploy/ansible/openstack_juno/network.yml | 8 + .../roles/cinder-controller/handlers/main.yml | 6 + .../cinder-controller/tasks/cinder_config.yml | 20 + .../cinder-controller/tasks/cinder_install.yml | 20 + .../roles/cinder-controller/tasks/main.yml | 13 + .../cinder-controller/templates/api-paste.ini | 71 ++ .../roles/cinder-controller/templates/cinder.conf | 63 + .../cinder-controller/templates/cinder_init.sh | 6 + .../roles/cinder-volume/files/loop.yml | 1 + .../roles/cinder-volume/handlers/main.yml | 4 + .../roles/cinder-volume/tasks/main.yml | 55 + .../roles/cinder-volume/templates/cinder.conf | 62 + .../files/sources.list.d/cloudarchive-juno.list | 1 + .../openstack_juno/roles/common/tasks/main.yml | 28 + .../openstack_juno/roles/common/templates/hosts | 22 + .../openstack_juno/roles/common/templates/ntp.conf | 56 + .../openstack_juno/roles/dashboard/tasks/main.yml | 30 + .../roles/dashboard/templates/local_settings.py | 511 ++++++++ .../dashboard/templates/openstack-dashboard.conf | 14 + .../openstack_juno/roles/database/files/my.cnf | 131 ++ .../openstack_juno/roles/database/tasks/main.yml | 12 + .../roles/database/tasks/mariadb.yml | 61 + .../openstack_juno/roles/database/tasks/mysql.yml | 22 + .../roles/database/templates/data.j2 | 39 + .../openstack_juno/roles/database/templates/my.cnf | 134 ++ .../roles/database/templates/wsrep.cnf | 126 ++ .../openstack_juno/roles/glance/handlers/main.yml | 6 + .../roles/glance/tasks/glance_config.yml | 29 + .../roles/glance/tasks/glance_install.yml | 26 + .../openstack_juno/roles/glance/tasks/main.yml | 18 + .../openstack_juno/roles/glance/tasks/nfs.yml | 41 + .../roles/glance/templates/glance-api.conf | 677 ++++++++++ .../roles/glance/templates/glance-registry.conf | 190 +++ .../roles/glance/templates/image_upload.sh | 2 + .../openstack_juno/roles/ha/files/galera_chk | 10 + .../ansible/openstack_juno/roles/ha/files/mysqlchk | 15 + .../openstack_juno/roles/ha/files/notify.sh | 4 + .../openstack_juno/roles/ha/handlers/main.yml | 9 + .../ansible/openstack_juno/roles/ha/tasks/main.yml | 94 ++ .../openstack_juno/roles/ha/templates/failover.j2 | 65 + .../openstack_juno/roles/ha/templates/haproxy.cfg | 133 ++ .../roles/ha/templates/keepalived.conf | 42 + .../roles/keystone/handlers/main.yml | 3 + .../roles/keystone/tasks/keystone_config.yml | 16 + .../roles/keystone/tasks/keystone_install.yml | 27 + .../openstack_juno/roles/keystone/tasks/main.yml | 13 + .../roles/keystone/templates/admin-openrc.sh | 6 + .../roles/keystone/templates/demo-openrc.sh | 5 + .../roles/keystone/templates/keystone.conf | 1317 ++++++++++++++++++++ .../roles/keystone/templates/keystone_init | 43 + .../roles/monitor/files/check_service.sh | 7 + .../openstack_juno/roles/monitor/files/root | 1 + .../openstack_juno/roles/monitor/tasks/main.yml | 11 + .../ansible/openstack_juno/roles/mq/tasks/main.yml | 5 + .../openstack_juno/roles/mq/tasks/rabbitmq.yml | 45 + .../roles/mq/tasks/rabbitmq_cluster.yml | 27 + .../roles/mq/templates/.erlang.cookie | 1 + .../roles/mq/templates/rabbitmq-env.conf | 1 + .../roles/neutron-common/handlers/main.yml | 13 + .../roles/neutron-compute/defaults/main.yml | 2 + .../roles/neutron-compute/handlers/main.yml | 13 + .../roles/neutron-compute/tasks/main.yml | 55 + .../roles/neutron-compute/templates/dhcp_agent.ini | 90 ++ .../neutron-compute/templates/dnsmasq-neutron.conf | 2 + .../neutron-compute/templates/etc/xorp/config.boot | 25 + .../roles/neutron-compute/templates/l3_agent.ini | 81 ++ .../neutron-compute/templates/metadata_agent.ini | 46 + .../roles/neutron-compute/templates/ml2_conf.ini | 108 ++ .../neutron-compute/templates/neutron-network.conf | 465 +++++++ .../roles/neutron-compute/templates/neutron.conf | 466 +++++++ .../neutron-compute/templates/neutron_init.sh | 4 + .../roles/neutron-compute/templates/nova.conf | 73 ++ .../roles/neutron-controller/handlers/main.yml | 24 + .../roles/neutron-controller/tasks/main.yml | 13 + .../neutron-controller/tasks/neutron_config.yml | 10 + .../neutron-controller/tasks/neutron_install.yml | 29 + .../neutron-controller/templates/dhcp_agent.ini | 90 ++ .../templates/dnsmasq-neutron.conf | 2 + .../templates/etc/xorp/config.boot | 25 + .../neutron-controller/templates/l3_agent.ini | 81 ++ .../templates/metadata_agent.ini | 46 + .../neutron-controller/templates/ml2_conf.ini | 108 ++ .../templates/neutron-network.conf | 465 +++++++ .../neutron-controller/templates/neutron.conf | 466 +++++++ .../neutron-controller/templates/neutron_init.sh | 4 + .../roles/neutron-controller/templates/nova.conf | 69 + .../roles/neutron-network/handlers/main.yml | 21 + .../roles/neutron-network/tasks/igmp-router.yml | 20 + .../roles/neutron-network/tasks/main.yml | 114 ++ .../roles/neutron-network/tasks/odl.yml | 13 + .../roles/neutron-network/templates/dhcp_agent.ini | 90 ++ .../neutron-network/templates/dnsmasq-neutron.conf | 2 + .../neutron-network/templates/etc/xorp/config.boot | 25 + .../roles/neutron-network/templates/l3_agent.ini | 81 ++ .../neutron-network/templates/metadata_agent.ini | 46 + .../roles/neutron-network/templates/ml2_conf.ini | 108 ++ .../neutron-network/templates/neutron-network.conf | 465 +++++++ .../roles/neutron-network/templates/neutron.conf | 466 +++++++ .../neutron-network/templates/neutron_init.sh | 4 + .../roles/neutron-network/templates/nova.conf | 69 + .../roles/nova-compute/handlers/main.yml | 3 + .../roles/nova-compute/tasks/main.yml | 21 + .../roles/nova-compute/templates/nova-compute.conf | 7 + .../roles/nova-compute/templates/nova.conf | 73 ++ .../roles/nova-controller/handlers/main.yml | 24 + .../roles/nova-controller/tasks/main.yml | 13 + .../roles/nova-controller/tasks/nova_config.yml | 16 + .../roles/nova-controller/tasks/nova_install.yml | 35 + .../roles/nova-controller/templates/dhcp_agent.ini | 90 ++ .../nova-controller/templates/dnsmasq-neutron.conf | 2 + .../nova-controller/templates/etc/xorp/config.boot | 25 + .../roles/nova-controller/templates/l3_agent.ini | 81 ++ .../nova-controller/templates/metadata_agent.ini | 46 + .../roles/nova-controller/templates/ml2_conf.ini | 108 ++ .../nova-controller/templates/neutron-network.conf | 465 +++++++ .../roles/nova-controller/templates/neutron.conf | 466 +++++++ .../nova-controller/templates/neutron_init.sh | 4 + .../roles/nova-controller/templates/nova.conf | 72 ++ .../openstack_juno/roles/repo/tasks/main.yml | 14 + .../roles/repo/templates/sources.list | 1 + .../ansible/openstack_juno/single-controller.yml | 38 + compass/deploy/ansible/openstack_juno/storage.yml | 8 + compass/deploy/conf/base.conf | 1 + compass/deploy/conf/cluster.conf | 20 + compass/deploy/conf/five.conf | 2 +- compass/deploy/deploy-vm.sh | 9 +- compass/deploy/remote_excute.exp | 23 + compass/deploy/status_callback.py | 174 +++ 134 files changed, 10876 insertions(+), 2 deletions(-) create mode 100644 compass/deploy/ansible/openstack_juno/HA-ansible-multinodes.yml create mode 100644 compass/deploy/ansible/openstack_juno/allinone.yml create mode 100644 compass/deploy/ansible/openstack_juno/compute.yml create mode 100644 compass/deploy/ansible/openstack_juno/controller.yml create mode 100644 compass/deploy/ansible/openstack_juno/group_vars/all create mode 100644 compass/deploy/ansible/openstack_juno/multinodes.yml create mode 100644 compass/deploy/ansible/openstack_juno/network.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/cinder-controller/handlers/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/cinder_config.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/cinder_install.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/api-paste.ini create mode 100644 compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/cinder.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/cinder_init.sh create mode 100644 compass/deploy/ansible/openstack_juno/roles/cinder-volume/files/loop.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/cinder-volume/handlers/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/cinder-volume/tasks/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/cinder-volume/templates/cinder.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/common/files/sources.list.d/cloudarchive-juno.list create mode 100644 compass/deploy/ansible/openstack_juno/roles/common/tasks/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/common/templates/hosts create mode 100644 compass/deploy/ansible/openstack_juno/roles/common/templates/ntp.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/dashboard/tasks/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/dashboard/templates/local_settings.py create mode 100644 compass/deploy/ansible/openstack_juno/roles/dashboard/templates/openstack-dashboard.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/database/files/my.cnf create mode 100644 compass/deploy/ansible/openstack_juno/roles/database/tasks/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/database/tasks/mariadb.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/database/tasks/mysql.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/database/templates/data.j2 create mode 100644 compass/deploy/ansible/openstack_juno/roles/database/templates/my.cnf create mode 100644 compass/deploy/ansible/openstack_juno/roles/database/templates/wsrep.cnf create mode 100644 compass/deploy/ansible/openstack_juno/roles/glance/handlers/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/glance/tasks/glance_config.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/glance/tasks/glance_install.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/glance/tasks/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/glance/tasks/nfs.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/glance/templates/glance-api.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/glance/templates/glance-registry.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/glance/templates/image_upload.sh create mode 100644 compass/deploy/ansible/openstack_juno/roles/ha/files/galera_chk create mode 100644 compass/deploy/ansible/openstack_juno/roles/ha/files/mysqlchk create mode 100644 compass/deploy/ansible/openstack_juno/roles/ha/files/notify.sh create mode 100644 compass/deploy/ansible/openstack_juno/roles/ha/handlers/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/ha/tasks/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/ha/templates/failover.j2 create mode 100644 compass/deploy/ansible/openstack_juno/roles/ha/templates/haproxy.cfg create mode 100644 compass/deploy/ansible/openstack_juno/roles/ha/templates/keepalived.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/keystone/handlers/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/keystone/tasks/keystone_config.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/keystone/tasks/keystone_install.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/keystone/tasks/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/keystone/templates/admin-openrc.sh create mode 100644 compass/deploy/ansible/openstack_juno/roles/keystone/templates/demo-openrc.sh create mode 100644 compass/deploy/ansible/openstack_juno/roles/keystone/templates/keystone.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/keystone/templates/keystone_init create mode 100644 compass/deploy/ansible/openstack_juno/roles/monitor/files/check_service.sh create mode 100644 compass/deploy/ansible/openstack_juno/roles/monitor/files/root create mode 100644 compass/deploy/ansible/openstack_juno/roles/monitor/tasks/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/mq/tasks/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/mq/tasks/rabbitmq.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/mq/tasks/rabbitmq_cluster.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/mq/templates/.erlang.cookie create mode 100644 compass/deploy/ansible/openstack_juno/roles/mq/templates/rabbitmq-env.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-common/handlers/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-compute/defaults/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-compute/handlers/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-compute/tasks/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/dhcp_agent.ini create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/dnsmasq-neutron.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/etc/xorp/config.boot create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/l3_agent.ini create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/metadata_agent.ini create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/ml2_conf.ini create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron-network.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron_init.sh create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/nova.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-controller/handlers/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/neutron_config.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/neutron_install.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/dhcp_agent.ini create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/dnsmasq-neutron.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/etc/xorp/config.boot create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/l3_agent.ini create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/metadata_agent.ini create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/ml2_conf.ini create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron-network.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron_init.sh create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/nova.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-network/handlers/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/igmp-router.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/odl.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/dhcp_agent.ini create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/dnsmasq-neutron.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/etc/xorp/config.boot create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/l3_agent.ini create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/metadata_agent.ini create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/ml2_conf.ini create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron-network.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron_init.sh create mode 100644 compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/nova.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/nova-compute/handlers/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/nova-compute/tasks/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/nova-compute/templates/nova-compute.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/nova-compute/templates/nova.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/nova-controller/handlers/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/nova_config.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/nova_install.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/dhcp_agent.ini create mode 100644 compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/dnsmasq-neutron.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/etc/xorp/config.boot create mode 100644 compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/l3_agent.ini create mode 100644 compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/metadata_agent.ini create mode 100644 compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/ml2_conf.ini create mode 100644 compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron-network.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron_init.sh create mode 100644 compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/nova.conf create mode 100644 compass/deploy/ansible/openstack_juno/roles/repo/tasks/main.yml create mode 100644 compass/deploy/ansible/openstack_juno/roles/repo/templates/sources.list create mode 100644 compass/deploy/ansible/openstack_juno/single-controller.yml create mode 100644 compass/deploy/ansible/openstack_juno/storage.yml create mode 100644 compass/deploy/conf/cluster.conf create mode 100644 compass/deploy/remote_excute.exp create mode 100644 compass/deploy/status_callback.py diff --git a/compass/deploy/ansible/openstack_juno/HA-ansible-multinodes.yml b/compass/deploy/ansible/openstack_juno/HA-ansible-multinodes.yml new file mode 100644 index 0000000..9c1d7e7 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/HA-ansible-multinodes.yml @@ -0,0 +1,42 @@ +--- +- hosts: all + remote_user: root + sudo: true + roles: + - repo + - common + +- hosts: ha + remote_user: root + sudo: True + roles: + - ha + +- hosts: controller + remote_user: root + sudo: True + roles: + - database + - mq + - keystone + - nova-controller + - neutron-controller + - cinder-controller + - glance + - neutron-common + - neutron-network + - dashboard + +- hosts: compute + remote_user: root + sudo: True + roles: + - nova-compute + - neutron-compute + - cinder-volume + +- hosts: all + remote_user: root + sudo: True + roles: + - monitor diff --git a/compass/deploy/ansible/openstack_juno/allinone.yml b/compass/deploy/ansible/openstack_juno/allinone.yml new file mode 100644 index 0000000..15220ca --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/allinone.yml @@ -0,0 +1,38 @@ +--- +- hosts: all + remote_user: root + sudo: true + roles: + - repo + +- hosts: controller + sudo: True + roles: + - common + - database + - mq + - keystone + - nova-controller + - neutron-controller + - dashboard + - cinder-controller + - glance + +- hosts: network + sudo: True + roles: + - common + - neutron-network + +- hosts: storage + sudo: True + roles: + - common + - cinder-volume + +- hosts: compute + sudo: True + roles: + - common + - nova-compute + - neutron-compute diff --git a/compass/deploy/ansible/openstack_juno/compute.yml b/compass/deploy/ansible/openstack_juno/compute.yml new file mode 100644 index 0000000..b2679c0 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/compute.yml @@ -0,0 +1,9 @@ +--- +- hosts: all + remote_user: vagrant + sudo: True + roles: + - repo + - common + - nova-compute + - neutron-compute diff --git a/compass/deploy/ansible/openstack_juno/controller.yml b/compass/deploy/ansible/openstack_juno/controller.yml new file mode 100644 index 0000000..7f4a10e --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/controller.yml @@ -0,0 +1,15 @@ +--- +- hosts: controller + remote_user: root + sudo: True + roles: + - repo + - common + - database + - mq + - keystone + - nova-controller + - neutron-controller + - dashboard + - cinder-controller + - glance diff --git a/compass/deploy/ansible/openstack_juno/group_vars/all b/compass/deploy/ansible/openstack_juno/group_vars/all new file mode 100644 index 0000000..5643fcd --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/group_vars/all @@ -0,0 +1,54 @@ +controller_host: 10.1.0.11 +network_host: 10.1.0.12 +compute_host: 10.1.0.13 +storage_host: 10.1.0.14 +odl_controller: 10.1.0.15 + +DEBUG: False +VERBOSE: False +NTP_SERVER_LOCAL: controller +DB_HOST: "{{ controller_host }}" +MQ_BROKER: rabbitmq + +OPENSTACK_REPO: cloudarchive-juno.list +ADMIN_TOKEN: admin +CEILOMETER_TOKEN: c095d479023a0fd58a54 +RABBIT_PASS: guest +KEYSTONE_DBPASS: keystone_db_secret +DEMO_PASS: demo_secret +ADMIN_PASS: admin_secret +GLANCE_DBPASS: glance_db_secret +GLANCE_PASS: glance_secret +NOVA_DBPASS: nova_db_secret +NOVA_PASS: nova_secret +DASH_DBPASS: dash_db_secret +CINDER_DBPASS: cinder_db_secret +CINDER_PASS: cinder_secret +NEUTRON_DBPASS: neutron_db_secret +NEUTRON_PASS: netron_secret +NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan'] +NEUTRON_TENANT_NETWORK_TYPES: ['vxlan'] +#NEUTRON_MECHANISM_DRIVERS: ['opendaylight'] +NEUTRON_MECHANISM_DRIVERS: ['openvswitch'] +NEUTRON_TUNNEL_TYPES: ['vxlan'] +METADATA_SECRET: metadata_secret +INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS: 10.1.1.21 +INTERFACE_NAME: eth2 + +EXTERNAL_NETWORK_CIDR: 203.0.113.0/24 +EXTERNAL_NETWORK_GATEWAY: 203.0.113.1 +FLOATING_IP_START: 203.0.113.101 +FLOATING_IP_END: 203.0.113.200 + +juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main +build_in_image: http://cdn.download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img +build_in_image_name: cirros-0.3.3-x86_64-disk.img + +physical_device: /dev/sdb + +internal_interface: ansible_eth1 +internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}" + +odl_username: admin +odl_password: admin +odl_api_port: 8080 diff --git a/compass/deploy/ansible/openstack_juno/multinodes.yml b/compass/deploy/ansible/openstack_juno/multinodes.yml new file mode 100644 index 0000000..ffd29d5 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/multinodes.yml @@ -0,0 +1,75 @@ +--- +- hosts: all + remote_user: root + sudo: true + roles: + - repo + +- hosts: database + sudo: True + roles: + - common + - database + +- hosts: messaging + sudo: True + roles: + - common + - mq + +- hosts: identity + sudo: True + roles: + - common + - keystone + +- hosts: compute-controller + sudo: True + roles: + - common + - nova-controller + +- hosts: network-server + sudo: True + roles: + - common + - neutron-controller + +- hosts: storage-controller + sudo: True + roles: + - common + - cinder-controller + +- hosts: image + sudo: True + roles: + - common + - glance + +- hosts: dashboard + sudo: True + roles: + - common + - dashboard + +- hosts: network-worker + sudo: True + roles: + - common + - neutron-network + +- hosts: storage-volume + sudo: True + roles: + - common + - cinder-volume + +- hosts: compute-worker + sudo: True + roles: + - common + - nova-compute + - neutron-compute + + diff --git a/compass/deploy/ansible/openstack_juno/network.yml b/compass/deploy/ansible/openstack_juno/network.yml new file mode 100644 index 0000000..558f317 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/network.yml @@ -0,0 +1,8 @@ +--- +- hosts: all + remote_user: vagrant + sudo: True + roles: + - repo + - common + - neutron-network diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-controller/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/handlers/main.yml new file mode 100644 index 0000000..ef671dd --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: restart cinder-scheduler + service: name=cinder-scheduler state=restarted enabled=yes +- name: restart cinder-api + service: name=cinder-api state=restarted enabled=yes + diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/cinder_config.yml b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/cinder_config.yml new file mode 100644 index 0000000..7796cf7 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/cinder_config.yml @@ -0,0 +1,20 @@ +--- +- name: sync cinder db + shell: su -s /bin/sh -c "cinder-manage db sync" cinder && cinder + register: result + until: result.rc == 0 + retries: 5 + delay: 3 + notify: + - restart cinder-scheduler + - restart cinder-api + +- meta: flush_handlers + +- name: upload cinder keystone register script + template: src=cinder_init.sh dest=/opt/cinder_init.sh mode=0744 + +- name: run cinder register script + shell: for i in {0..5}; do /opt/cinder_init.sh && touch cinder_init_complete; if [ $? != 0 ]; then sleep 5; else break; fi; done + args: + creates: cinder_init_complete diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/cinder_install.yml b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/cinder_install.yml new file mode 100644 index 0000000..03ad432 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/cinder_install.yml @@ -0,0 +1,20 @@ +--- +- name: install cinder packages + apt: name={{ item }} state=present force=yes + with_items: + - cinder-api + - cinder-scheduler + - python-cinderclient + +- name: generate cinder service list + shell: echo {{ item }} >> /opt/service + with_items: + - cinder-api + - cinder-scheduler + +- name: upload cinder conf + template: src=cinder.conf dest=/etc/cinder/cinder.conf + notify: + - restart cinder-scheduler + - restart cinder-api + diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/main.yml new file mode 100644 index 0000000..1dbe91f --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- include: cinder_install.yml + tags: + - install + - cinder-install + - cinder + +- include: cinder_config.yml + when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == '' + tags: + - config + - cinder-config + - cinder diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/api-paste.ini b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/api-paste.ini new file mode 100644 index 0000000..b568a17 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/api-paste.ini @@ -0,0 +1,71 @@ +############# +# OpenStack # +############# + +[composite:osapi_volume] +use = call:cinder.api:root_app_factory +/: apiversions +/v1: openstack_volume_api_v1 +/v2: openstack_volume_api_v2 + +[composite:openstack_volume_api_v1] +use = call:cinder.api.middleware.auth:pipeline_factory +noauth = request_id faultwrap sizelimit osprofiler noauth apiv1 +keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1 +keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1 + +[composite:openstack_volume_api_v2] +use = call:cinder.api.middleware.auth:pipeline_factory +noauth = request_id faultwrap sizelimit osprofiler noauth apiv2 +keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2 +keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2 + +[filter:request_id] +paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMiddleware.factory + +[filter:faultwrap] +paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory + +[filter:osprofiler] +paste.filter_factory = osprofiler.web:WsgiMiddleware.factory +hmac_keys = SECRET_KEY +enabled = yes + +[filter:noauth] +paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory + +[filter:sizelimit] +paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory + +[app:apiv1] +paste.app_factory = cinder.api.v1.router:APIRouter.factory + +[app:apiv2] +paste.app_factory = cinder.api.v2.router:APIRouter.factory + +[pipeline:apiversions] +pipeline = faultwrap osvolumeversionapp + +[app:osvolumeversionapp] +paste.app_factory = cinder.api.versions:Versions.factory + +[filter:authtoken] +paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory +# auth_host = 127.0.0.1 +# auth_port = 35357 +# auth_protocol = http +auth_uri = http://{{ HA_VIP }}:5000/v2.0 +identity_uri = http://{{ HA_VIP }}:35357 +admin_tenant_name = service +admin_user = cinder +admin_password = {{ CINDER_PASS }} + +########## +# Shared # +########## + +[filter:keystonecontext] +paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/cinder.conf b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/cinder.conf new file mode 100644 index 0000000..e34fd2f --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/cinder.conf @@ -0,0 +1,63 @@ +[DEFAULT] +rootwrap_config = /etc/cinder/rootwrap.conf +api_paste_confg = /etc/cinder/api-paste.ini +iscsi_helper = tgtadm +volume_name_template = volume-%s +volume_group = cinder-volumes +verbose = {{ VERBOSE }} +debug = {{ DEBUG }} +auth_strategy = keystone +state_path = /var/lib/cinder +lock_path = /var/lock/cinder +notification_driver=cinder.openstack.common.notifier.rpc_notifier +volumes_dir = /var/lib/cinder/volumes + +log_file=/var/log/cinder/cinder.log + +control_exchange = cinder +rpc_backend = rabbit +rabbit_host = {{ rabbit_host }} +rabbit_port = 5672 +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} +my_ip = {{ storage_controller_host }} + +glance_host = {{ HA_VIP }} +glance_port = 9292 +api_rate_limit = False +storage_availability_zone = nova + +quota_volumes = 10 +quota_gigabytes=1000 +quota_driver=cinder.quota.DbQuotaDriver + +osapi_volume_listen = {{ storage_controller_host }} +osapi_volume_listen_port = 8776 + +db_backend = sqlalchemy +volume_name_template = volume-%s +snapshot_name_template = snapshot-%s + +max_gigabytes=10000 +volume_group=cinder-volumes + +volume_clear=zero +volume_clear_size=10 + +iscsi_ip_address={{ storage_controller_host }} +iscsi_port=3260 +iscsi_helper=tgtadm + +volumes_dir=/var/lib/cinder/volumes + +volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver + +[keystone_authtoken] +auth_uri = http://{{ HA_VIP }}:5000/v2.0 +identity_uri = http://{{ HA_VIP }}:35357 +admin_tenant_name = service +admin_user = cinder +admin_password = {{ CINDER_PASS }} + +[database] +connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/cinder_init.sh b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/cinder_init.sh new file mode 100644 index 0000000..0ec61b6 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/cinder-controller/templates/cinder_init.sh @@ -0,0 +1,6 @@ +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=cinder --pass={{ CINDER_PASS }} --email=cinder@example.com +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=cinder --tenant=service --role=admin + +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=cinder --type=volume --description="OpenStack Block Storage" +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ volume / {print $2}') --publicurl=http://{{ HA_VIP }}:8776/v1/%\(tenant_id\)s --internalurl=http://{{ HA_VIP }}:8776/v1/%\(tenant_id\)s --adminurl=http://{{ HA_VIP }}:8776/v1/%\(tenant_id\)s + diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-volume/files/loop.yml b/compass/deploy/ansible/openstack_juno/roles/cinder-volume/files/loop.yml new file mode 100644 index 0000000..e872652 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/cinder-volume/files/loop.yml @@ -0,0 +1 @@ +physical_device: /dev/loop0 diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-volume/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/cinder-volume/handlers/main.yml new file mode 100644 index 0000000..d8e8852 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/cinder-volume/handlers/main.yml @@ -0,0 +1,4 @@ +--- +- name: restart cinder-volume + service: name=cinder-volume state=restarted enabled=yes + diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-volume/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/cinder-volume/tasks/main.yml new file mode 100644 index 0000000..8c0e626 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/cinder-volume/tasks/main.yml @@ -0,0 +1,55 @@ +--- +- name: install cinder-volume and lvm2 packages + apt: name={{ item }} state=present force=yes + with_items: + - cinder-volume + - lvm2 + +- name: generate cinder volume service list + shell: echo {{ item }} >> /opt/service + with_items: + - cinder-volume + +- name: check if physical device exists + stat: path={{ physical_device }} + register: st + +- name: repace physical_device if st is false + local_action: copy src=loop.yml dest=/tmp/loop.yml + when: st.stat.exists == False + +- name: load loop.yml + include_vars: /tmp/loop.yml + when: st.stat.exists == False + +- name: check if cinder-volumes is mounted + shell: ls /mnt + register: cindervolumes + +- name: get available partition size + shell: df / | awk '$3 ~ /[0-9]+/ { print $4 }' + register: partition_size + +- name: if not mounted, mount it + shell: dd if=/dev/zero of=/mnt/cinder-volumes + bs=1 count=0 seek={{ partition_size.stdout }} + when: cindervolumes.stdout != 'cinder-volumes' + +- name: get first lo device + shell: ls /dev/loop* | egrep 'loop[0-9]+'|sed -n 1p + register: first_lo + when: cindervolumes.stdout != 'cinder-volumes' + +- name: do a losetup on /mnt/cinder-volumes + shell: losetup {{ first_lo.stdout }} /mnt/cinder-volumes + when: cindervolumes.stdout != 'cinder-volumes' + +- name: create physical and group volumes + lvg: vg=cinder-volumes pvs={{ physical_device }} + vg_options=--force + +- name: upload cinder-volume configuration + template: src=cinder.conf dest=/etc/cinder/cinder.conf + backup=yes + notify: + - restart cinder-volume diff --git a/compass/deploy/ansible/openstack_juno/roles/cinder-volume/templates/cinder.conf b/compass/deploy/ansible/openstack_juno/roles/cinder-volume/templates/cinder.conf new file mode 100644 index 0000000..aa3b8cc --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/cinder-volume/templates/cinder.conf @@ -0,0 +1,62 @@ +[DEFAULT] +rootwrap_config = /etc/cinder/rootwrap.conf +api_paste_confg = /etc/cinder/api-paste.ini +iscsi_helper = tgtadm +volume_name_template = volume-%s +volume_group = cinder-volumes +verbose = True +auth_strategy = keystone +state_path = /var/lib/cinder +lock_path = /var/lock/cinder +notification_driver=cinder.openstack.common.notifier.rpc_notifier +volumes_dir = /var/lib/cinder/volumes + +log_file=/var/log/cinder/cinder.log + +control_exchange = cinder +rpc_backend = rabbit +rabbit_host = {{ rabbit_host }} +rabbit_port = 5672 +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} +my_ip = {{ storage_controller_host }} + +glance_host = {{ HA_VIP }} +glance_port = 9292 +api_rate_limit = False +storage_availability_zone = nova + +quota_volumes = 10 +quota_gigabytes=1000 +quota_driver=cinder.quota.DbQuotaDriver + +osapi_volume_listen = {{ storage_controller_host }} +osapi_volume_listen_port = 8776 + +db_backend = sqlalchemy +volume_name_template = volume-%s +snapshot_name_template = snapshot-%s + +max_gigabytes=10000 +volume_group=cinder-volumes + +volume_clear=zero +volume_clear_size=10 + +iscsi_ip_address={{ storage_controller_host }} +iscsi_port=3260 +iscsi_helper=tgtadm + +volumes_dir=/var/lib/cinder/volumes + +volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver + +[keystone_authtoken] +auth_uri = http://{{ HA_VIP }}:5000/v2.0 +identity_uri = http://{{ HA_VIP }}:35357 +admin_tenant_name = service +admin_user = cinder +admin_password = {{ CINDER_PASS }} + +[database] +connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder diff --git a/compass/deploy/ansible/openstack_juno/roles/common/files/sources.list.d/cloudarchive-juno.list b/compass/deploy/ansible/openstack_juno/roles/common/files/sources.list.d/cloudarchive-juno.list new file mode 100644 index 0000000..920f3d2 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/common/files/sources.list.d/cloudarchive-juno.list @@ -0,0 +1 @@ +deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main diff --git a/compass/deploy/ansible/openstack_juno/roles/common/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/common/tasks/main.yml new file mode 100644 index 0000000..ce595f5 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/common/tasks/main.yml @@ -0,0 +1,28 @@ +--- +- name: install ubuntu-cloud-keyring(ubuntu) + apt: name={{ item }} state=latest + with_items: + - ubuntu-cloud-keyring + +- name: update hosts files to all hosts + template: src=hosts + dest=/etc/hosts + backup=yes + +- name: install common packages + apt: name={{ item }} state=latest + with_items: + - python-pip + - python-dev + - python-mysqldb + - ntp + +- name: restart ntp + command: su -s /bin/sh -c "service ntp stop; ntpd -gq; hwclock --systohc" + ignore_errors: True + +- name: update ntp conf + template: src=ntp.conf dest=/etc/ntp.conf backup=yes + +- name: restart ntp + service: name=ntp state=restarted enabled=yes diff --git a/compass/deploy/ansible/openstack_juno/roles/common/templates/hosts b/compass/deploy/ansible/openstack_juno/roles/common/templates/hosts new file mode 100644 index 0000000..9d27c0a --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/common/templates/hosts @@ -0,0 +1,22 @@ +# compute-controller +10.145.89.136 host-136 +# database +10.145.89.136 host-136 +# messaging +10.145.89.136 host-136 +# storage-controller +10.145.89.138 host-138 +# image +10.145.89.138 host-138 +# identity +10.145.89.136 host-136 +# network-server +10.145.89.138 host-138 +# dashboard +10.145.89.136 host-136 +# storage-volume +10.145.89.139 host-139 +# network-worker +10.145.89.139 host-139 +# compute-worker +10.145.89.137 host-137 diff --git a/compass/deploy/ansible/openstack_juno/roles/common/templates/ntp.conf b/compass/deploy/ansible/openstack_juno/roles/common/templates/ntp.conf new file mode 100644 index 0000000..c613809 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/common/templates/ntp.conf @@ -0,0 +1,56 @@ +# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help + +driftfile /var/lib/ntp/ntp.drift + + +# Enable this if you want statistics to be logged. +#statsdir /var/log/ntpstats/ + +statistics loopstats peerstats clockstats +filegen loopstats file loopstats type day enable +filegen peerstats file peerstats type day enable +filegen clockstats file clockstats type day enable + +# Specify one or more NTP servers. + +# Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board +# on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for +# more information. +server {{ NTP_SERVER_LOCAL }} +server 0.ubuntu.pool.ntp.org +server 1.ubuntu.pool.ntp.org +server 2.ubuntu.pool.ntp.org +server 3.ubuntu.pool.ntp.org + +# Use Ubuntu's ntp server as a fallback. +server ntp.ubuntu.com + +# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for +# details. The web page +# might also be helpful. +# +# Note that "restrict" applies to both servers and clients, so a configuration +# that might be intended to block requests from certain clients could also end +# up blocking replies from your own upstream servers. + +# By default, exchange time with everybody, but don't allow configuration. +restrict -4 default kod notrap nomodify nopeer noquery +restrict -6 default kod notrap nomodify nopeer noquery + +# Local users may interrogate the ntp server more closely. +restrict 127.0.0.1 +restrict ::1 + +# Clients from this (example!) subnet have unlimited access, but only if +# cryptographically authenticated. +#restrict 192.168.123.0 mask 255.255.255.0 notrust + + +# If you want to provide time to your local subnet, change the next line. +# (Again, the address is an example only.) +#broadcast 192.168.123.255 + +# If you want to listen to time broadcasts on your local subnet, de-comment the +# next lines. Please do this only if you trust everybody on the network! +#disable auth +#broadcastclient diff --git a/compass/deploy/ansible/openstack_juno/roles/dashboard/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/dashboard/tasks/main.yml new file mode 100644 index 0000000..465b996 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/dashboard/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: install dashboard packages + apt: name={{ item }} state=present force=yes + with_items: + - apache2 + - memcached + - libapache2-mod-wsgi + - openstack-dashboard + +- name: remove ubuntu theme + apt: name=openstack-dashboard-ubuntu-theme + state=absent + +## horizon configuration is already enabled in apache2/conf-enabled +## by openstack-dashboard package deploy script. +#- name: update dashboard conf +# template: src=openstack-dashboard.conf +# dest=/etc/apache2/sites-available/openstack-dashboard.conf +# backup=yes + +- name: update horizon settings + template: src=local_settings.py + dest=/etc/openstack-dashboard/local_settings.py + backup=yes + +- name: restart apache2 + service: name=apache2 state=restarted enabled=yes + +- name: restart memcached + service: name=memcached state=restarted enabled=yes diff --git a/compass/deploy/ansible/openstack_juno/roles/dashboard/templates/local_settings.py b/compass/deploy/ansible/openstack_juno/roles/dashboard/templates/local_settings.py new file mode 100644 index 0000000..87e06e3 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/dashboard/templates/local_settings.py @@ -0,0 +1,511 @@ +import os + +from django.utils.translation import ugettext_lazy as _ + +from openstack_dashboard import exceptions + +DEBUG = True +TEMPLATE_DEBUG = DEBUG + +# Required for Django 1.5. +# If horizon is running in production (DEBUG is False), set this +# with the list of host/domain names that the application can serve. +# For more information see: +# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts +#ALLOWED_HOSTS = ['horizon.example.com', ] + +# Set SSL proxy settings: +# For Django 1.4+ pass this header from the proxy after terminating the SSL, +# and don't forget to strip it from the client's request. +# For more information see: +# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header +# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https') + +# If Horizon is being served through SSL, then uncomment the following two +# settings to better secure the cookies from security exploits +#CSRF_COOKIE_SECURE = True +#SESSION_COOKIE_SECURE = True + +# Overrides for OpenStack API versions. Use this setting to force the +# OpenStack dashboard to use a specific API version for a given service API. +# NOTE: The version should be formatted as it appears in the URL for the +# service API. For example, The identity service APIs have inconsistent +# use of the decimal point, so valid options would be "2.0" or "3". +# OPENSTACK_API_VERSIONS = { +# "identity": 3, +# "volume": 2 +# } + +# Set this to True if running on multi-domain model. When this is enabled, it +# will require user to enter the Domain name in addition to username for login. +# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False + +# Overrides the default domain used when running on single-domain model +# with Keystone V3. All entities will be created in the default domain. +# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default' + +# Set Console type: +# valid options would be "AUTO", "VNC", "SPICE" or "RDP" +# CONSOLE_TYPE = "AUTO" + +# Default OpenStack Dashboard configuration. +HORIZON_CONFIG = { + 'dashboards': ('project', 'admin', 'settings',), + 'default_dashboard': 'project', + 'user_home': 'openstack_dashboard.views.get_user_home', + 'ajax_queue_limit': 10, + 'auto_fade_alerts': { + 'delay': 3000, + 'fade_duration': 1500, + 'types': ['alert-success', 'alert-info'] + }, + 'help_url': "http://docs.openstack.org", + 'exceptions': {'recoverable': exceptions.RECOVERABLE, + 'not_found': exceptions.NOT_FOUND, + 'unauthorized': exceptions.UNAUTHORIZED}, +} + +# Specify a regular expression to validate user passwords. +# HORIZON_CONFIG["password_validator"] = { +# "regex": '.*', +# "help_text": _("Your password does not meet the requirements.") +# } + +# Disable simplified floating IP address management for deployments with +# multiple floating IP pools or complex network requirements. +# HORIZON_CONFIG["simple_ip_management"] = False + +# Turn off browser autocompletion for the login form if so desired. +# HORIZON_CONFIG["password_autocomplete"] = "off" + +LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) + +# Set custom secret key: +# You can either set it to a specific value or you can let horizion generate a +# default secret key that is unique on this machine, e.i. regardless of the +# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there +# may be situations where you would want to set this explicitly, e.g. when +# multiple dashboard instances are distributed on different machines (usually +# behind a load-balancer). Either you have to make sure that a session gets all +# requests routed to the same dashboard instance or you set the same SECRET_KEY +# for all of them. +from horizon.utils import secret_key +SECRET_KEY = 'AJDSKLAJDKASJDKASJDKSAJDKSJAKDSA' +# We recommend you use memcached for development; otherwise after every reload +# of the django development server, you will have to login again. To use +# memcached set CACHES to something like +CACHES = { + 'default': { + 'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache', + 'LOCATION' : '127.0.0.1:11211', + } +} + +#CACHES = { +# 'default': { +# 'BACKEND' : 'django.core.cache.backends.locmem.LocMemCache' +# } +#} + +# Enable the Ubuntu theme if it is present. +try: + from ubuntu_theme import * +except ImportError: + pass + +# Default Ubuntu apache configuration uses /horizon as the application root. +# Configure auth redirects here accordingly. +LOGIN_URL='/horizon/auth/login/' +LOGOUT_URL='/horizon/auth/logout/' +LOGIN_REDIRECT_URL='/horizon' + +# The Ubuntu package includes pre-compressed JS and compiled CSS to allow +# offline compression by default. To enable online compression, install +# the node-less package and enable the following option. +COMPRESS_OFFLINE = True + +# By default, validation of the HTTP Host header is disabled. Production +# installations should have this set accordingly. For more information +# see https://docs.djangoproject.com/en/dev/ref/settings/. +ALLOWED_HOSTS = ['{{ dashboard_host }}'] + +# Send email to the console by default +EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' +# Or send them to /dev/null +#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' + +# Configure these for your outgoing email host +# EMAIL_HOST = 'smtp.my-company.com' +# EMAIL_PORT = 25 +# EMAIL_HOST_USER = 'djangomail' +# EMAIL_HOST_PASSWORD = 'top-secret!' + +# For multiple regions uncomment this configuration, and add (endpoint, title). +# AVAILABLE_REGIONS = [ +# ('http://cluster1.example.com:5000/v2.0', 'cluster1'), +# ('http://cluster2.example.com:5000/v2.0', 'cluster2'), +# ] + +OPENSTACK_HOST = "{{ HA_VIP }}" +OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST +OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_" + +# Disable SSL certificate checks (useful for self-signed certificates): +# OPENSTACK_SSL_NO_VERIFY = True + +# The CA certificate to use to verify SSL connections +# OPENSTACK_SSL_CACERT = '/path/to/cacert.pem' + +# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the +# capabilities of the auth backend for Keystone. +# If Keystone has been configured to use LDAP as the auth backend then set +# can_edit_user to False and name to 'ldap'. +# +# TODO(tres): Remove these once Keystone has an API to identify auth backend. +OPENSTACK_KEYSTONE_BACKEND = { + 'name': 'native', + 'can_edit_user': True, + 'can_edit_group': True, + 'can_edit_project': True, + 'can_edit_domain': True, + 'can_edit_role': True +} + +#Setting this to True, will add a new "Retrieve Password" action on instance, +#allowing Admin session password retrieval/decryption. +#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False + +# The Xen Hypervisor has the ability to set the mount point for volumes +# attached to instances (other Hypervisors currently do not). Setting +# can_set_mount_point to True will add the option to set the mount point +# from the UI. +OPENSTACK_HYPERVISOR_FEATURES = { + 'can_set_mount_point': False, + 'can_set_password': False, +} + +# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional +# services provided by neutron. Options currently available are load +# balancer service, security groups, quotas, VPN service. +OPENSTACK_NEUTRON_NETWORK = { + 'enable_lb': False, + 'enable_firewall': False, + 'enable_quotas': True, + 'enable_vpn': False, + # The profile_support option is used to detect if an external router can be + # configured via the dashboard. When using specific plugins the + # profile_support can be turned on if needed. + 'profile_support': None, + #'profile_support': 'cisco', +} + +# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features +# in the OpenStack Dashboard related to the Image service, such as the list +# of supported image formats. +# OPENSTACK_IMAGE_BACKEND = { +# 'image_formats': [ +# ('', ''), +# ('aki', _('AKI - Amazon Kernel Image')), +# ('ami', _('AMI - Amazon Machine Image')), +# ('ari', _('ARI - Amazon Ramdisk Image')), +# ('iso', _('ISO - Optical Disk Image')), +# ('qcow2', _('QCOW2 - QEMU Emulator')), +# ('raw', _('Raw')), +# ('vdi', _('VDI')), +# ('vhd', _('VHD')), +# ('vmdk', _('VMDK')) +# ] +# } + +# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for +# image custom property attributes that appear on image detail pages. +IMAGE_CUSTOM_PROPERTY_TITLES = { + "architecture": _("Architecture"), + "kernel_id": _("Kernel ID"), + "ramdisk_id": _("Ramdisk ID"), + "image_state": _("Euca2ools state"), + "project_id": _("Project ID"), + "image_type": _("Image Type") +} + +# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is 'publicURL'. +#OPENSTACK_ENDPOINT_TYPE = "publicURL" + +# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the +# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints +# in the Keystone service catalog. Use this setting when Horizon is running +# external to the OpenStack environment. The default is None. This +# value should differ from OPENSTACK_ENDPOINT_TYPE if used. +#SECONDARY_ENDPOINT_TYPE = "publicURL" + +# The number of objects (Swift containers/objects or images) to display +# on a single page before providing a paging element (a "more" link) +# to paginate results. +API_RESULT_LIMIT = 1000 +API_RESULT_PAGE_SIZE = 20 + +# The timezone of the server. This should correspond with the timezone +# of your entire OpenStack installation, and hopefully be in UTC. +TIME_ZONE = "UTC" + +# When launching an instance, the menu of available flavors is +# sorted by RAM usage, ascending. If you would like a different sort order, +# you can provide another flavor attribute as sorting key. Alternatively, you +# can provide a custom callback method to use for sorting. You can also provide +# a flag for reverse sort. For more info, see +# http://docs.python.org/2/library/functions.html#sorted +# CREATE_INSTANCE_FLAVOR_SORT = { +# 'key': 'name', +# # or +# 'key': my_awesome_callback_method, +# 'reverse': False, +# } + +# The Horizon Policy Enforcement engine uses these values to load per service +# policy rule files. The content of these files should match the files the +# OpenStack services are using to determine role based access control in the +# target installation. + +# Path to directory containing policy.json files +#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf") +# Map of local copy of service policy files +#POLICY_FILES = { +# 'identity': 'keystone_policy.json', +# 'compute': 'nova_policy.json', +# 'volume': 'cinder_policy.json', +# 'image': 'glance_policy.json', +#} + +# Trove user and database extension support. By default support for +# creating users and databases on database instances is turned on. +# To disable these extensions set the permission here to something +# unusable such as ["!"]. +# TROVE_ADD_USER_PERMS = [] +# TROVE_ADD_DATABASE_PERMS = [] + +LOGGING = { + 'version': 1, + # When set to True this will disable all logging except + # for loggers specified in this configuration dictionary. Note that + # if nothing is specified here and disable_existing_loggers is True, + # django.db.backends will still log unless it is disabled explicitly. + 'disable_existing_loggers': False, + 'handlers': { + 'null': { + 'level': 'DEBUG', + 'class': 'django.utils.log.NullHandler', + }, + 'console': { + # Set the level to "DEBUG" for verbose output logging. + 'level': 'INFO', + 'class': 'logging.StreamHandler', + }, + }, + 'loggers': { + # Logging from django.db.backends is VERY verbose, send to null + # by default. + 'django.db.backends': { + 'handlers': ['null'], + 'propagate': False, + }, + 'requests': { + 'handlers': ['null'], + 'propagate': False, + }, + 'horizon': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'openstack_dashboard': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'novaclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'cinderclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'keystoneclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'glanceclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'neutronclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'heatclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'ceilometerclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'troveclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'swiftclient': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'openstack_auth': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'nose.plugins.manager': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'django': { + 'handlers': ['console'], + 'level': 'DEBUG', + 'propagate': False, + }, + 'iso8601': { + 'handlers': ['null'], + 'propagate': False, + }, + } +} + +# 'direction' should not be specified for all_tcp/udp/icmp. +# It is specified in the form. +SECURITY_GROUP_RULES = { + 'all_tcp': { + 'name': 'ALL TCP', + 'ip_protocol': 'tcp', + 'from_port': '1', + 'to_port': '65535', + }, + 'all_udp': { + 'name': 'ALL UDP', + 'ip_protocol': 'udp', + 'from_port': '1', + 'to_port': '65535', + }, + 'all_icmp': { + 'name': 'ALL ICMP', + 'ip_protocol': 'icmp', + 'from_port': '-1', + 'to_port': '-1', + }, + 'ssh': { + 'name': 'SSH', + 'ip_protocol': 'tcp', + 'from_port': '22', + 'to_port': '22', + }, + 'smtp': { + 'name': 'SMTP', + 'ip_protocol': 'tcp', + 'from_port': '25', + 'to_port': '25', + }, + 'dns': { + 'name': 'DNS', + 'ip_protocol': 'tcp', + 'from_port': '53', + 'to_port': '53', + }, + 'http': { + 'name': 'HTTP', + 'ip_protocol': 'tcp', + 'from_port': '80', + 'to_port': '80', + }, + 'pop3': { + 'name': 'POP3', + 'ip_protocol': 'tcp', + 'from_port': '110', + 'to_port': '110', + }, + 'imap': { + 'name': 'IMAP', + 'ip_protocol': 'tcp', + 'from_port': '143', + 'to_port': '143', + }, + 'ldap': { + 'name': 'LDAP', + 'ip_protocol': 'tcp', + 'from_port': '389', + 'to_port': '389', + }, + 'https': { + 'name': 'HTTPS', + 'ip_protocol': 'tcp', + 'from_port': '443', + 'to_port': '443', + }, + 'smtps': { + 'name': 'SMTPS', + 'ip_protocol': 'tcp', + 'from_port': '465', + 'to_port': '465', + }, + 'imaps': { + 'name': 'IMAPS', + 'ip_protocol': 'tcp', + 'from_port': '993', + 'to_port': '993', + }, + 'pop3s': { + 'name': 'POP3S', + 'ip_protocol': 'tcp', + 'from_port': '995', + 'to_port': '995', + }, + 'ms_sql': { + 'name': 'MS SQL', + 'ip_protocol': 'tcp', + 'from_port': '1433', + 'to_port': '1433', + }, + 'mysql': { + 'name': 'MYSQL', + 'ip_protocol': 'tcp', + 'from_port': '3306', + 'to_port': '3306', + }, + 'rdp': { + 'name': 'RDP', + 'ip_protocol': 'tcp', + 'from_port': '3389', + 'to_port': '3389', + }, +} + +FLAVOR_EXTRA_KEYS = { + 'flavor_keys': [ + ('quota:read_bytes_sec', _('Quota: Read bytes')), + ('quota:write_bytes_sec', _('Quota: Write bytes')), + ('quota:cpu_quota', _('Quota: CPU')), + ('quota:cpu_period', _('Quota: CPU period')), + ('quota:inbound_average', _('Quota: Inbound average')), + ('quota:outbound_average', _('Quota: Outbound average')), + ] +} + diff --git a/compass/deploy/ansible/openstack_juno/roles/dashboard/templates/openstack-dashboard.conf b/compass/deploy/ansible/openstack_juno/roles/dashboard/templates/openstack-dashboard.conf new file mode 100644 index 0000000..a5a791a --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/dashboard/templates/openstack-dashboard.conf @@ -0,0 +1,14 @@ + + +WSGIScriptAlias / /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi +WSGIDaemonProcess horizon user=www-data group=www-data processes=3 threads=10 +Alias /static /usr/share/openstack-dashboard/openstack_dashboard/static/ + + +Order allow,deny +Allow from all + + + + + diff --git a/compass/deploy/ansible/openstack_juno/roles/database/files/my.cnf b/compass/deploy/ansible/openstack_juno/roles/database/files/my.cnf new file mode 100644 index 0000000..d61f947 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/database/files/my.cnf @@ -0,0 +1,131 @@ +# +# The MySQL database server configuration file. +# +# You can copy this to one of: +# - "/etc/mysql/my.cnf" to set global options, +# - "~/.my.cnf" to set user-specific options. +# +# One can use all long options that the program supports. +# Run program with --help to get a list of available options and with +# --print-defaults to see which it would actually understand and use. +# +# For explanations see +# http://dev.mysql.com/doc/mysql/en/server-system-variables.html + +# This will be passed to all mysql clients +# It has been reported that passwords should be enclosed with ticks/quotes +# escpecially if they contain "#" chars... +# Remember to edit /etc/mysql/debian.cnf when changing the socket location. +[client] +port = 3306 +socket = /var/run/mysqld/mysqld.sock + +# Here is entries for some specific programs +# The following values assume you have at least 32M ram + +# This was formally known as [safe_mysqld]. Both versions are currently parsed. +[mysqld_safe] +socket = /var/run/mysqld/mysqld.sock +nice = 0 + +[mysqld] +# +# * Basic Settings +# +user = mysql +pid-file = /var/run/mysqld/mysqld.pid +socket = /var/run/mysqld/mysqld.sock +port = 3306 +basedir = /usr +datadir = /var/lib/mysql +tmpdir = /tmp +lc-messages-dir = /usr/share/mysql +skip-external-locking +# +# Instead of skip-networking the default is now to listen only on +# localhost which is more compatible and is not less secure. +bind-address = 0.0.0.0 +# +# * Fine Tuning +# +key_buffer = 16M +max_allowed_packet = 16M +thread_stack = 192K +thread_cache_size = 8 +# This replaces the startup script and checks MyISAM tables if needed +# the first time they are touched +myisam-recover = BACKUP +#max_connections = 100 +#table_cache = 64 +#thread_concurrency = 10 +# +# * Query Cache Configuration +# +query_cache_limit = 1M +query_cache_size = 16M +# +# * Logging and Replication +# +# Both location gets rotated by the cronjob. +# Be aware that this log type is a performance killer. +# As of 5.1 you can enable the log at runtime! +#general_log_file = /var/log/mysql/mysql.log +#general_log = 1 +# +# Error log - should be very few entries. +# +log_error = /var/log/mysql/error.log +# +# Here you can see queries with especially long duration +#log_slow_queries = /var/log/mysql/mysql-slow.log +#long_query_time = 2 +#log-queries-not-using-indexes +# +# The following can be used as easy to replay backup logs or for replication. +# note: if you are setting up a replication slave, see README.Debian about +# other settings you may need to change. +#server-id = 1 +#log_bin = /var/log/mysql/mysql-bin.log +expire_logs_days = 10 +max_binlog_size = 100M +#binlog_do_db = include_database_name +#binlog_ignore_db = include_database_name +# +# * InnoDB +# +# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/. +# Read the manual for more InnoDB related options. There are many! +# +# * Security Features +# +# Read the manual, too, if you want chroot! +# chroot = /var/lib/mysql/ +# +# For generating SSL certificates I recommend the OpenSSL GUI "tinyca". +# +# ssl-ca=/etc/mysql/cacert.pem +# ssl-cert=/etc/mysql/server-cert.pem +# ssl-key=/etc/mysql/server-key.pem +default-storage-engine = innodb +innodb_file_per_table +collation-server = utf8_general_ci +init-connect = 'SET NAMES utf8' +character-set-server = utf8 + +[mysqldump] +quick +quote-names +max_allowed_packet = 16M + +[mysql] +#no-auto-rehash # faster start of mysql but no tab completition + +[isamchk] +key_buffer = 16M + +# +# * IMPORTANT: Additional settings that can override those from this file! +# The files must end with '.cnf', otherwise they'll be ignored. +# +!includedir /etc/mysql/conf.d/ + diff --git a/compass/deploy/ansible/openstack_juno/roles/database/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/database/tasks/main.yml new file mode 100644 index 0000000..e66f0cd --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/database/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: copy data.sh + template: src=data.j2 dest=/opt/data.sh mode=777 + tags: + - mysql_user + +- include: mysql.yml + when: HA_CLUSTER is not defined + +- include: mariadb.yml + when: HA_CLUSTER is defined + diff --git a/compass/deploy/ansible/openstack_juno/roles/database/tasks/mariadb.yml b/compass/deploy/ansible/openstack_juno/roles/database/tasks/mariadb.yml new file mode 100644 index 0000000..f87ea2f --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/database/tasks/mariadb.yml @@ -0,0 +1,61 @@ +--- +- name: install python-mysqldb + apt: name={{ item }} state=present force=yes + with_items: + - libaio1 + - libssl0.9.8 + #- mariadb-client-5.5 + - mysql-client-5.5 + - python-mysqldb + +- name: download mariadb and galera deb package + get_url: url={{ item.url }} dest=/opt/{{ item.filename }} + register: result + until: result|success + retries: 5 + delay: 3 + with_items: + - { url: "{{ MARIADB_URL }}", filename: "{{ MARIADB }}" } + - { url: "{{ GALERA_URL }}", filename: "{{ GALERA }}" } + +- name: install mariadb and galera packages + command: dpkg -i /opt/{{ item }} + with_items: + - "{{ MARIADB }}" + - "{{ GALERA }}" + +- name: create mysql log directy + file: path=/var/log/mysql state=directory owner=mysql group=mysql mode=0755 + +- name: update mariadb my.cnf + template: src=my.cnf dest=/etc/mysql/my.cnf backup=yes + +- name: update galera wsrep.cnf + template: src=wsrep.cnf dest=/etc/mysql/conf.d/wsrep.cnf backup=yes + +- name: update wsrep_sst_rsync uid + lineinfile: dest=/usr/bin/wsrep_sst_rsync state=absent regexp="\s*uid = \$MYUID$" backup=yes + +- name: update wsrep_sst_rsync gid + lineinfile: dest=/usr/bin/wsrep_sst_rsync state=absent regexp="\s*gid = \$MYGID$" backup=yes + +- name: manually restart mysql server + service: name=mysql state=restarted enabled=yes + register: result + until: result|success + retries: 5 + delay: 5 + tags: + - mysql_restart + +- name: generate mysql service list + shell: echo {{ item }} >> /opt/service + with_items: + - mysql + + +- name: create database/user + shell: /opt/data.sh + when: HA_CLUSTER[inventory_hostname] == '' + tags: + - mysql_user diff --git a/compass/deploy/ansible/openstack_juno/roles/database/tasks/mysql.yml b/compass/deploy/ansible/openstack_juno/roles/database/tasks/mysql.yml new file mode 100644 index 0000000..327b656 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/database/tasks/mysql.yml @@ -0,0 +1,22 @@ +--- +- name: install mysql client and server packages + apt: name={{ item }} state=present + with_items: + - python-mysqldb + - mysql-server + +- name: create mysql log directy + file: path=/var/log/mysql state=directory owner=mysql group=mysql mode=0755 + +- name: update mysql my.cnf + copy: src=my.cnf + dest=/etc/mysql/my.cnf + backup=yes + +- name: manually restart mysql server + shell: service mysql restart + +- name: create database/user + shell: /opt/data.sh + tags: + - mysql_user diff --git a/compass/deploy/ansible/openstack_juno/roles/database/templates/data.j2 b/compass/deploy/ansible/openstack_juno/roles/database/templates/data.j2 new file mode 100644 index 0000000..c894b32 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/database/templates/data.j2 @@ -0,0 +1,39 @@ +#!/bin/sh +mysql -uroot -Dmysql <[:port] of the node. +# The values supplied will be used as defaults for state transfer receiving, +# listening ports and so on. Default: address of the first network interface. +wsrep_node_address={{ hostvars[inventory_hostname]['ansible_' + INTERNAL_INTERFACE].ipv4.address }} + +# Address for incoming client connections. Autodetect by default. +#wsrep_node_incoming_address= + +# How many threads will process writesets from other nodes +wsrep_slave_threads=1 + +# DBUG options for wsrep provider +#wsrep_dbug_option + +# Generate fake primary keys for non-PK tables (required for multi-master +# and parallel applying operation) +wsrep_certify_nonPK=1 + +# Maximum number of rows in write set +wsrep_max_ws_rows=131072 + +# Maximum size of write set +wsrep_max_ws_size=1073741824 + +# to enable debug level logging, set this to 1 +wsrep_debug=1 + +# convert locking sessions into transactions +wsrep_convert_LOCK_to_trx=0 + +# how many times to retry deadlocked autocommits +wsrep_retry_autocommit=1 + +# change auto_increment_increment and auto_increment_offset automatically +wsrep_auto_increment_control=1 + +# retry autoinc insert, which failed for duplicate key error +wsrep_drupal_282555_workaround=0 + +# enable "strictly synchronous" semantics for read operations +wsrep_causal_reads=0 + +# Command to call when node status or cluster membership changes. +# Will be passed all or some of the following options: +# --status - new status of this node +# --uuid - UUID of the cluster +# --primary - whether the component is primary or not ("yes"/"no") +# --members - comma-separated list of members +# --index - index of this node in the list +wsrep_notify_cmd= + +## +## WSREP State Transfer options +## + +# State Snapshot Transfer method +wsrep_sst_method=rsync + +# Address on THIS node to receive SST at. DON'T SET IT TO DONOR ADDRESS!!! +# (SST method dependent. Defaults to the first IP of the first interface) +#wsrep_sst_receive_address= + +# SST authentication string. This will be used to send SST to joining nodes. +# Depends on SST method. For mysqldump method it is root: +wsrep_sst_auth={{ WSREP_SST_USER }}:{{ WSREP_SST_PASS }} + +# Desired SST donor name. +#wsrep_sst_donor= + +# Protocol version to use +# wsrep_protocol_version= diff --git a/compass/deploy/ansible/openstack_juno/roles/glance/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/glance/handlers/main.yml new file mode 100644 index 0000000..d8eaa44 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/glance/handlers/main.yml @@ -0,0 +1,6 @@ +--- +- name: restart glance-api + service: name=glance-api state=restarted enabled=yes + +- name: restart glance-registry + service: name=glance-registry state=restarted enabled=yes diff --git a/compass/deploy/ansible/openstack_juno/roles/glance/tasks/glance_config.yml b/compass/deploy/ansible/openstack_juno/roles/glance/tasks/glance_config.yml new file mode 100644 index 0000000..28392a3 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/glance/tasks/glance_config.yml @@ -0,0 +1,29 @@ +--- +- name: init glance db version + shell: glance-manage db_version_control 0 + +- name: sync glance db + shell: sleep 15; su -s /bin/sh -c "glance-manage db_sync" glance + register: result + until: result.rc == 0 + retries: 5 + delay: 3 + notify: + - restart glance-registry + - restart glance-api + +- meta: flush_handlers + +- name: place image upload script + template: src=image_upload.sh dest=/opt/image_upload.sh mode=0744 + +- name: download cirros image file + get_url: url={{ build_in_image }} dest=/opt/{{ build_in_image_name }} + +- name: wait for 9292 port to become available + wait_for: host={{ image_host }} port=9292 delay=5 + +- name: run image upload + shell: for i in {0..5}; do /opt/image_upload.sh && touch image_upload_completed; if [ $? != 0 ] ;then sleep 5; else break;fi;done + args: + creates: image_upload_completed diff --git a/compass/deploy/ansible/openstack_juno/roles/glance/tasks/glance_install.yml b/compass/deploy/ansible/openstack_juno/roles/glance/tasks/glance_install.yml new file mode 100644 index 0000000..505b3b0 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/glance/tasks/glance_install.yml @@ -0,0 +1,26 @@ +--- +- name: install glance packages + apt: name={{ item }} state=latest force=yes + with_items: + - glance + - python-glanceclient + +- name: generate glance service list + shell: echo {{ item }} >> /opt/service + with_items: + - glance-registry + - glance-api + +- name: update glance conf + template: src={{ item }} dest=/etc/glance/{{ item }} + backup=yes + with_items: + - glance-api.conf + - glance-registry.conf + notify: + - restart glance-registry + - restart glance-api + +- name: remove default sqlite db + shell: rm /var/lib/glance/glance.sqlite || touch glance.sqllite.db.removed + diff --git a/compass/deploy/ansible/openstack_juno/roles/glance/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/glance/tasks/main.yml new file mode 100644 index 0000000..296f0dc --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/glance/tasks/main.yml @@ -0,0 +1,18 @@ +--- +- include: glance_install.yml + tags: + - install + - glance_install + - glance + +- include: nfs.yml + tags: + - nfs + +- include: glance_config.yml + when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == '' + tags: + - config + - glance_config + - glance + diff --git a/compass/deploy/ansible/openstack_juno/roles/glance/tasks/nfs.yml b/compass/deploy/ansible/openstack_juno/roles/glance/tasks/nfs.yml new file mode 100644 index 0000000..c03ab4d --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/glance/tasks/nfs.yml @@ -0,0 +1,41 @@ +--- +- name: get nfs server + local_action: shell /sbin/ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6| grep "10" -m 1 |awk '{print $2}'|tr -d "addr:" + register: ip_info + run_once: True + +- name: install nfs + local_action: yum name=nfs-utils state=present + run_once: True + +- name: create image directory + local_action: file path=/opt/images state=directory mode=0777 + run_once: True + +- name: update nfs config + local_action: lineinfile dest=/etc/exports state=present + regexp="/opt/images *(rw,insecure,sync,all_squash)" + line="/opt/images *(rw,insecure,sync,all_squash)" + run_once: True + +- name: restart nfs service + local_action: service name=nfs state=restarted enabled=yes + run_once: True + +- name: install nfs comm + apt: name=nfs-common state=present + +- name: get mount info + command: mount + register: mount_info + +- name: mount image directory + shell: | + mount -t nfs -onfsvers=3 {{ item }}:/opt/images /var/lib/glance/images + sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab + echo {{ item }}:/opt/images /var/lib/glance/images/ nfs nfsvers=3 >> /etc/fstab + when: mount_info.stdout.find('images') == -1 + with_items: + ip_info.stdout_lines + retries: 5 + delay: 3 diff --git a/compass/deploy/ansible/openstack_juno/roles/glance/templates/glance-api.conf b/compass/deploy/ansible/openstack_juno/roles/glance/templates/glance-api.conf new file mode 100644 index 0000000..763539e --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/glance/templates/glance-api.conf @@ -0,0 +1,677 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +#verbose = False + +# Show debugging output in logs (sets DEBUG log level output) +#debug = False + +# Which backend scheme should Glance use by default is not specified +# in a request to add a new image to Glance? Known schemes are determined +# by the known_stores option below. +# Default: 'file' +# "default_store" option has been moved to [glance_store] section in +# Juno release + +# List of which store classes and store class locations are +# currently known to glance at startup. +# Existing but disabled stores: +# glance.store.rbd.Store, +# glance.store.s3.Store, +# glance.store.swift.Store, +# glance.store.sheepdog.Store, +# glance.store.cinder.Store, +# glance.store.gridfs.Store, +# glance.store.vmware_datastore.Store, +#known_stores = glance.store.filesystem.Store, +# glance.store.http.Store + + +# Maximum image size (in bytes) that may be uploaded through the +# Glance API server. Defaults to 1 TB. +# WARNING: this value should only be increased after careful consideration +# and must be set to a value under 8 EB (9223372036854775808). +#image_size_cap = 1099511627776 + +# Address to bind the API server +bind_host = {{ image_host }} + +# Port the bind the API server to +bind_port = 9292 + +# Log to this file. Make sure you do not set the same log file for both the API +# and registry servers! +# +# If `log_file` is omitted and `use_syslog` is false, then log messages are +# sent to stdout as a fallback. +log_file = /var/log/glance/api.log + +# Backlog requests when creating socket +backlog = 4096 + +# TCP_KEEPIDLE value in seconds when creating socket. +# Not supported on OS X. +#tcp_keepidle = 600 + +# API to use for accessing data. Default value points to sqlalchemy +# package, it is also possible to use: glance.db.registry.api +# data_api = glance.db.sqlalchemy.api + +# Number of Glance API worker processes to start. +# On machines with more than one CPU increasing this value +# may improve performance (especially if using SSL with +# compression turned on). It is typically recommended to set +# this value to the number of CPUs present on your machine. +workers = 1 + +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large tokens +# (typically those generated by the Keystone v3 API with big service +# catalogs) +# max_header_line = 16384 + +# Role used to identify an authenticated user as administrator +#admin_role = admin + +# Allow unauthenticated users to access the API with read-only +# privileges. This only applies when using ContextMiddleware. +#allow_anonymous_access = False + +# Allow access to version 1 of glance api +#enable_v1_api = True + +# Allow access to version 2 of glance api +#enable_v2_api = True + +# Return the URL that references where the data is stored on +# the backend storage system. For example, if using the +# file system store a URL of 'file:///path/to/image' will +# be returned to the user in the 'direct_url' meta-data field. +# The default value is false. +#show_image_direct_url = False + +# Send headers containing user and tenant information when making requests to +# the v1 glance registry. This allows the registry to function as if a user is +# authenticated without the need to authenticate a user itself using the +# auth_token middleware. +# The default value is false. +#send_identity_headers = False + +# Supported values for the 'container_format' image attribute +#container_formats=ami,ari,aki,bare,ovf,ova + +# Supported values for the 'disk_format' image attribute +#disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso + +# Directory to use for lock files. Default to a temp directory +# (string value). This setting needs to be the same for both +# glance-scrubber and glance-api. +#lock_path= + +# Property Protections config file +# This file contains the rules for property protections and the roles/policies +# associated with it. +# If this config value is not specified, by default, property protections +# won't be enforced. +# If a value is specified and the file is not found, then the glance-api +# service will not start. +#property_protection_file = + +# Specify whether 'roles' or 'policies' are used in the +# property_protection_file. +# The default value for property_protection_rule_format is 'roles'. +#property_protection_rule_format = roles + +# Specifies how long (in hours) a task is supposed to live in the tasks DB +# after succeeding or failing before getting soft-deleted. +# The default value for task_time_to_live is 48 hours. +# task_time_to_live = 48 + +# This value sets what strategy will be used to determine the image location +# order. Currently two strategies are packaged with Glance 'location_order' +# and 'store_type'. +#location_strategy = location_order + +# ================= Syslog Options ============================ + +# Send logs to syslog (/dev/log) instead of to file specified +# by `log_file` +#use_syslog = False + +# Facility to use. If unset defaults to LOG_USER. +#syslog_log_facility = LOG_LOCAL0 + +# ================= SSL Options =============================== + +# Certificate file to use when starting API server securely +#cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +#key_file = /path/to/keyfile + +# CA certificate file to use to verify connecting clients +#ca_file = /path/to/cafile + +# ================= Security Options ========================== + +# AES key for encrypting store 'location' metadata, including +# -- if used -- Swift or S3 credentials +# Should be set to a random string of length 16, 24 or 32 bytes +#metadata_encryption_key = <16, 24 or 32 char registry metadata key> + +# ============ Registry Options =============================== + +# Address to find the registry server +registry_host = {{ internal_ip }} + +# Port the registry server is listening on +registry_port = 9191 + +# What protocol to use when connecting to the registry server? +# Set to https for secure HTTP communication +registry_client_protocol = http + +# The path to the key file to use in SSL connections to the +# registry server, if any. Alternately, you may set the +# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file +#registry_client_key_file = /path/to/key/file + +# The path to the cert file to use in SSL connections to the +# registry server, if any. Alternately, you may set the +# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file +#registry_client_cert_file = /path/to/cert/file + +# The path to the certifying authority cert file to use in SSL connections +# to the registry server, if any. Alternately, you may set the +# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file +#registry_client_ca_file = /path/to/ca/file + +# When using SSL in connections to the registry server, do not require +# validation via a certifying authority. This is the registry's equivalent of +# specifying --insecure on the command line using glanceclient for the API +# Default: False +#registry_client_insecure = False + +# The period of time, in seconds, that the API server will wait for a registry +# request to complete. A value of '0' implies no timeout. +# Default: 600 +#registry_client_timeout = 600 + +# Whether to automatically create the database tables. +# Default: False +#db_auto_create = False + +# Enable DEBUG log messages from sqlalchemy which prints every database +# query and response. +# Default: False +#sqlalchemy_debug = True + +# Pass the user's token through for API requests to the registry. +# Default: True +#use_user_token = True + +# If 'use_user_token' is not in effect then admin credentials +# can be specified. Requests to the registry on behalf of +# the API will use these credentials. +# Admin user name +#admin_user = None +# Admin password +#admin_password = None +# Admin tenant name +#admin_tenant_name = None +# Keystone endpoint +#auth_url = None +# Keystone region +#auth_region = None +# Auth strategy +#auth_strategy = keystone + +# ============ Notification System Options ===================== + +# Notifications can be sent when images are create, updated or deleted. +# There are three methods of sending notifications, logging (via the +# log_file directive), rabbit (via a rabbitmq queue), qpid (via a Qpid +# message queue), or noop (no notifications sent, the default) +# NOTE: THIS CONFIGURATION OPTION HAS BEEN DEPRECATED IN FAVOR OF `notification_driver` +# notifier_strategy = default + +# Driver or drivers to handle sending notifications +# notification_driver = noop + +# Default publisher_id for outgoing notifications. +# default_publisher_id = image.localhost + +# Configuration options if sending notifications via rabbitmq (these are +# the defaults) +rabbit_host = localhost +rabbit_port = 5672 +rabbit_use_ssl = false +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} +rabbit_virtual_host = / +rabbit_notification_exchange = glance +rabbit_notification_topic = notifications +rabbit_durable_queues = False + +# Configuration options if sending notifications via Qpid (these are +# the defaults) +qpid_notification_exchange = glance +qpid_notification_topic = notifications +qpid_hostname = localhost +qpid_port = 5672 +qpid_username = +qpid_password = +qpid_sasl_mechanisms = +qpid_reconnect_timeout = 0 +qpid_reconnect_limit = 0 +qpid_reconnect_interval_min = 0 +qpid_reconnect_interval_max = 0 +qpid_reconnect_interval = 0 +qpid_heartbeat = 5 +# Set to 'ssl' to enable SSL +qpid_protocol = tcp +qpid_tcp_nodelay = True + +# ============ Filesystem Store Options ======================== + +# Directory that the Filesystem backend store +# writes image data to +# this option has been moved to [glance_store] for Juno release +# filesystem_store_datadir = /var/lib/glance/images/ + +# A list of directories where image data can be stored. +# This option may be specified multiple times for specifying multiple store +# directories. Either one of filesystem_store_datadirs or +# filesystem_store_datadir option is required. A priority number may be given +# after each directory entry, separated by a ":". +# When adding an image, the highest priority directory will be selected, unless +# there is not enough space available in cases where the image size is already +# known. If no priority is given, it is assumed to be zero and the directory +# will be considered for selection last. If multiple directories have the same +# priority, then the one with the most free space available is selected. +# If same store is specified multiple times then BadStoreConfiguration +# exception will be raised. +#filesystem_store_datadirs = /var/lib/glance/images/:1 + +# A path to a JSON file that contains metadata describing the storage +# system. When show_multiple_locations is True the information in this +# file will be returned with any location that is contained in this +# store. +#filesystem_store_metadata_file = None + +# ============ Swift Store Options ============================= + +# Version of the authentication service to use +# Valid versions are '2' for keystone and '1' for swauth and rackspace +swift_store_auth_version = 2 + +# Address where the Swift authentication service lives +# Valid schemes are 'http://' and 'https://' +# If no scheme specified, default to 'https://' +# For swauth, use something like '127.0.0.1:8080/v1.0/' +swift_store_auth_address = 127.0.0.1:5000/v2.0/ + +# User to authenticate against the Swift authentication service +# If you use Swift authentication service, set it to 'account':'user' +# where 'account' is a Swift storage account and 'user' +# is a user in that account +swift_store_user = jdoe:jdoe + +# Auth key for the user authenticating against the +# Swift authentication service +swift_store_key = a86850deb2742ec3cb41518e26aa2d89 + +# Container within the account that the account should use +# for storing images in Swift +swift_store_container = glance + +# Do we create the container if it does not exist? +swift_store_create_container_on_put = False + +# What size, in MB, should Glance start chunking image files +# and do a large object manifest in Swift? By default, this is +# the maximum object size in Swift, which is 5GB +swift_store_large_object_size = 5120 + +# When doing a large object manifest, what size, in MB, should +# Glance write chunks to Swift? This amount of data is written +# to a temporary disk buffer during the process of chunking +# the image file, and the default is 200MB +swift_store_large_object_chunk_size = 200 + +# Whether to use ServiceNET to communicate with the Swift storage servers. +# (If you aren't RACKSPACE, leave this False!) +# +# To use ServiceNET for authentication, prefix hostname of +# `swift_store_auth_address` with 'snet-'. +# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/ +swift_enable_snet = False + +# If set to True enables multi-tenant storage mode which causes Glance images +# to be stored in tenant specific Swift accounts. +#swift_store_multi_tenant = False + +# A list of swift ACL strings that will be applied as both read and +# write ACLs to the containers created by Glance in multi-tenant +# mode. This grants the specified tenants/users read and write access +# to all newly created image objects. The standard swift ACL string +# formats are allowed, including: +# : +# : +# *: +# Multiple ACLs can be combined using a comma separated list, for +# example: swift_store_admin_tenants = service:glance,*:admin +#swift_store_admin_tenants = + +# The region of the swift endpoint to be used for single tenant. This setting +# is only necessary if the tenant has multiple swift endpoints. +#swift_store_region = + +# If set to False, disables SSL layer compression of https swift requests. +# Setting to 'False' may improve performance for images which are already +# in a compressed format, eg qcow2. If set to True, enables SSL layer +# compression (provided it is supported by the target swift proxy). +#swift_store_ssl_compression = True + +# The number of times a Swift download will be retried before the +# request fails +#swift_store_retry_get_count = 0 + +# ============ S3 Store Options ============================= + +# Address where the S3 authentication service lives +# Valid schemes are 'http://' and 'https://' +# If no scheme specified, default to 'http://' +s3_store_host = 127.0.0.1:8080/v1.0/ + +# User to authenticate against the S3 authentication service +s3_store_access_key = <20-char AWS access key> + +# Auth key for the user authenticating against the +# S3 authentication service +s3_store_secret_key = <40-char AWS secret key> + +# Container within the account that the account should use +# for storing images in S3. Note that S3 has a flat namespace, +# so you need a unique bucket name for your glance images. An +# easy way to do this is append your AWS access key to "glance". +# S3 buckets in AWS *must* be lowercased, so remember to lowercase +# your AWS access key if you use it in your bucket name below! +s3_store_bucket = glance + +# Do we create the bucket if it does not exist? +s3_store_create_bucket_on_put = False + +# When sending images to S3, the data will first be written to a +# temporary buffer on disk. By default the platform's temporary directory +# will be used. If required, an alternative directory can be specified here. +#s3_store_object_buffer_dir = /path/to/dir + +# When forming a bucket url, boto will either set the bucket name as the +# subdomain or as the first token of the path. Amazon's S3 service will +# accept it as the subdomain, but Swift's S3 middleware requires it be +# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'. +#s3_store_bucket_url_format = subdomain + +# ============ RBD Store Options ============================= + +# Ceph configuration file path +# If using cephx authentication, this file should +# include a reference to the right keyring +# in a client. section +#rbd_store_ceph_conf = /etc/ceph/ceph.conf + +# RADOS user to authenticate as (only applicable if using cephx) +# If , a default will be chosen based on the client. section +# in rbd_store_ceph_conf +#rbd_store_user = + +# RADOS pool in which images are stored +#rbd_store_pool = images + +# RADOS images will be chunked into objects of this size (in megabytes). +# For best performance, this should be a power of two +#rbd_store_chunk_size = 8 + +# ============ Sheepdog Store Options ============================= + +sheepdog_store_address = localhost + +sheepdog_store_port = 7000 + +# Images will be chunked into objects of this size (in megabytes). +# For best performance, this should be a power of two +sheepdog_store_chunk_size = 64 + +# ============ Cinder Store Options =============================== + +# Info to match when looking for cinder in the service catalog +# Format is : separated values of the form: +# :: (string value) +#cinder_catalog_info = volume:cinder:publicURL + +# Override service catalog lookup with template for cinder endpoint +# e.g. http://localhost:8776/v1/%(project_id)s (string value) +#cinder_endpoint_template = + +# Region name of this node (string value) +#os_region_name = + +# Location of ca certicates file to use for cinder client requests +# (string value) +#cinder_ca_certificates_file = + +# Number of cinderclient retries on failed http calls (integer value) +#cinder_http_retries = 3 + +# Allow to perform insecure SSL requests to cinder (boolean value) +#cinder_api_insecure = False + +# ============ VMware Datastore Store Options ===================== + +# ESX/ESXi or vCenter Server target system. +# The server value can be an IP address or a DNS name +# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com +#vmware_server_host = + +# Server username (string value) +#vmware_server_username = + +# Server password (string value) +#vmware_server_password = + +# Inventory path to a datacenter (string value) +# Value optional when vmware_server_ip is an ESX/ESXi host: if specified +# should be `ha-datacenter`. +#vmware_datacenter_path = + +# Datastore associated with the datacenter (string value) +#vmware_datastore_name = + +# The number of times we retry on failures +# e.g., socket error, etc (integer value) +#vmware_api_retry_count = 10 + +# The interval used for polling remote tasks +# invoked on VMware ESX/VC server in seconds (integer value) +#vmware_task_poll_interval = 5 + +# Absolute path of the folder containing the images in the datastore +# (string value) +#vmware_store_image_dir = /openstack_glance + +# Allow to perform insecure SSL requests to the target system (boolean value) +#vmware_api_insecure = False + +# ============ Delayed Delete Options ============================= + +# Turn on/off delayed delete +delayed_delete = False + +# Delayed delete time in seconds +scrub_time = 43200 + +# Directory that the scrubber will use to remind itself of what to delete +# Make sure this is also set in glance-scrubber.conf +scrubber_datadir = /var/lib/glance/scrubber + +# =============== Quota Options ================================== + +# The maximum number of image members allowed per image +#image_member_quota = 128 + +# The maximum number of image properties allowed per image +#image_property_quota = 128 + +# The maximum number of tags allowed per image +#image_tag_quota = 128 + +# The maximum number of locations allowed per image +#image_location_quota = 10 + +# Set a system wide quota for every user. This value is the total number +# of bytes that a user can use across all storage systems. A value of +# 0 means unlimited. +#user_storage_quota = 0 + +# =============== Image Cache Options ============================= + +# Base directory that the Image Cache uses +image_cache_dir = /var/lib/glance/image-cache/ + +# =============== Manager Options ================================= + +# DEPRECATED. TO BE REMOVED IN THE JUNO RELEASE. +# Whether or not to enforce that all DB tables have charset utf8. +# If your database tables do not have charset utf8 you will +# need to convert before this option is removed. This option is +# only relevant if your database engine is MySQL. +#db_enforce_mysql_charset = True + +# =============== Glance Store ==================================== +[glance_store] +# Moved from [DEFAULT], for Juno release +default_store = file +filesystem_store_datadir = /var/lib/glance/images/ + +# =============== Database Options ================================= + +[database] +# The file name to use with SQLite (string value) +sqlite_db = /var/lib/glance/glance.sqlite + +# If True, SQLite uses synchronous mode (boolean value) +#sqlite_synchronous = True + +# The backend to use for db (string value) +# Deprecated group/name - [DEFAULT]/db_backend +backend = sqlalchemy + +# The SQLAlchemy connection string used to connect to the +# database (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = +connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode = TRADITIONAL + +# Timeout before idle sql connections are reaped (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout = 3600 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size = + +# Maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries = 10 + +# Interval between retries of opening a sql connection +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval = 10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow = + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug = 0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout = + +# Enable the experimental use of database reconnect on +# connection lost (boolean value) +#use_db_reconnect = False + +# seconds between db connection retries (integer value) +#db_retry_interval = 1 + +# Whether to increase interval between db connection retries, +# up to db_max_retry_interval (boolean value) +#db_inc_retry_interval = True + +# max seconds between db connection retries, if +# db_inc_retry_interval is enabled (integer value) +#db_max_retry_interval = 10 + +# maximum db connection retries before error is raised. +# (setting -1 implies an infinite retry count) (integer value) +#db_max_retries = 20 + +[keystone_authtoken] +auth_uri = http://{{ HA_VIP }}:5000/v2.0 +identity_uri = http://{{ HA_VIP }}:35357 +admin_tenant_name = service +admin_user = glance +admin_password = {{ GLANCE_PASS }} + +[paste_deploy] +# Name of the paste configuration file that defines the available pipelines +#config_file = glance-api-paste.ini + +# Partial name of a pipeline in your paste configuration file with the +# service name removed. For example, if your paste section name is +# [pipeline:glance-api-keystone], you would configure the flavor below +# as 'keystone'. +flavor= keystone + +[store_type_location_strategy] +# The scheme list to use to get store preference order. The scheme must be +# registered by one of the stores defined by the 'known_stores' config option. +# This option will be applied when you using 'store_type' option as image +# location strategy defined by the 'location_strategy' config option. +#store_type_preference = diff --git a/compass/deploy/ansible/openstack_juno/roles/glance/templates/glance-registry.conf b/compass/deploy/ansible/openstack_juno/roles/glance/templates/glance-registry.conf new file mode 100644 index 0000000..8d731a2 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/glance/templates/glance-registry.conf @@ -0,0 +1,190 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +#verbose = False + +# Show debugging output in logs (sets DEBUG log level output) +#debug = False + +# Address to bind the registry server +bind_host = {{ internal_ip }} + +# Port the bind the registry server to +bind_port = 9191 + +# Log to this file. Make sure you do not set the same log file for both the API +# and registry servers! +# +# If `log_file` is omitted and `use_syslog` is false, then log messages are +# sent to stdout as a fallback. +log_file = /var/log/glance/registry.log + +# Backlog requests when creating socket +backlog = 4096 + +# TCP_KEEPIDLE value in seconds when creating socket. +# Not supported on OS X. +#tcp_keepidle = 600 + +# API to use for accessing data. Default value points to sqlalchemy +# package. +#data_api = glance.db.sqlalchemy.api + +# Enable Registry API versions individually or simultaneously +#enable_v1_registry = True +#enable_v2_registry = True + +# Limit the api to return `param_limit_max` items in a call to a container. If +# a larger `limit` query param is provided, it will be reduced to this value. +api_limit_max = 1000 + +# If a `limit` query param is not provided in an api request, it will +# default to `limit_param_default` +limit_param_default = 25 + +# Role used to identify an authenticated user as administrator +#admin_role = admin + +# Whether to automatically create the database tables. +# Default: False +#db_auto_create = False + +# Enable DEBUG log messages from sqlalchemy which prints every database +# query and response. +# Default: False +#sqlalchemy_debug = True + +# ================= Syslog Options ============================ + +# Send logs to syslog (/dev/log) instead of to file specified +# by `log_file` +#use_syslog = False + +# Facility to use. If unset defaults to LOG_USER. +#syslog_log_facility = LOG_LOCAL1 + +# ================= SSL Options =============================== + +# Certificate file to use when starting registry server securely +#cert_file = /path/to/certfile + +# Private key file to use when starting registry server securely +#key_file = /path/to/keyfile + +# CA certificate file to use to verify connecting clients +#ca_file = /path/to/cafile + +# ================= Database Options ========================== + +[database] +# The file name to use with SQLite (string value) +sqlite_db = /var/lib/glance/glance.sqlite + +# If True, SQLite uses synchronous mode (boolean value) +#sqlite_synchronous = True + +# The backend to use for db (string value) +# Deprecated group/name - [DEFAULT]/db_backend +backend = sqlalchemy + +# The SQLAlchemy connection string used to connect to the +# database (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = +connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode = TRADITIONAL + +# Timeout before idle sql connections are reaped (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout = 3600 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size = + +# Maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries = 10 + +# Interval between retries of opening a sql connection +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval = 10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow = + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug = 0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout = + +# Enable the experimental use of database reconnect on +# connection lost (boolean value) +#use_db_reconnect = False + +# seconds between db connection retries (integer value) +#db_retry_interval = 1 + +# Whether to increase interval between db connection retries, +# up to db_max_retry_interval (boolean value) +#db_inc_retry_interval = True + +# max seconds between db connection retries, if +# db_inc_retry_interval is enabled (integer value) +#db_max_retry_interval = 10 + +# maximum db connection retries before error is raised. +# (setting -1 implies an infinite retry count) (integer value) +#db_max_retries = 20 + +[keystone_authtoken] +auth_uri = http://{{ HA_VIP }}:5000/v2.0 +identity_uri = http://{{ HA_VIP }}:35357 +admin_tenant_name = service +admin_user = glance +admin_password = {{ GLANCE_PASS }} + +[paste_deploy] +# Name of the paste configuration file that defines the available pipelines +#config_file = glance-registry-paste.ini + +# Partial name of a pipeline in your paste configuration file with the +# service name removed. For example, if your paste section name is +# [pipeline:glance-registry-keystone], you would configure the flavor below +# as 'keystone'. +flavor= keystone diff --git a/compass/deploy/ansible/openstack_juno/roles/glance/templates/image_upload.sh b/compass/deploy/ansible/openstack_juno/roles/glance/templates/image_upload.sh new file mode 100644 index 0000000..9dd1fa8 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/glance/templates/image_upload.sh @@ -0,0 +1,2 @@ +sleep 10 +glance --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ HA_VIP }}:35357/v2.0 image-create --name="cirros" --disk-format=qcow2 --container-format=bare --is-public=true < /opt/{{ build_in_image_name }} && touch glance.import.completed diff --git a/compass/deploy/ansible/openstack_juno/roles/ha/files/galera_chk b/compass/deploy/ansible/openstack_juno/roles/ha/files/galera_chk new file mode 100644 index 0000000..9fd165c --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/ha/files/galera_chk @@ -0,0 +1,10 @@ +#! /bin/sh + +code=`mysql -uroot -e "show status" | awk '/Threads_running/{print $2}'` + +if [ "$code"=="1" ] +then + echo "HTTP/1.1 200 OK\r\n" +else + echo "HTTP/1.1 503 Service Unavailable\r\n" +fi diff --git a/compass/deploy/ansible/openstack_juno/roles/ha/files/mysqlchk b/compass/deploy/ansible/openstack_juno/roles/ha/files/mysqlchk new file mode 100644 index 0000000..2c03f19 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/ha/files/mysqlchk @@ -0,0 +1,15 @@ +# default: off +# description: An xinetd internal service which echo's characters back to +# clients. +# This is the tcp version. +service mysqlchk +{ + disable = no + flags = REUSE + socket_type = stream + protocol = tcp + user = root + wait = no + server = /usr/local/bin/galera_chk + port = 9200 +} diff --git a/compass/deploy/ansible/openstack_juno/roles/ha/files/notify.sh b/compass/deploy/ansible/openstack_juno/roles/ha/files/notify.sh new file mode 100644 index 0000000..5edffe8 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/ha/files/notify.sh @@ -0,0 +1,4 @@ +#!/bin/bash +python /usr/local/bin/failover.py $1 +mysql -uroot -e"flush hosts" +service mysql restart diff --git a/compass/deploy/ansible/openstack_juno/roles/ha/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/ha/handlers/main.yml new file mode 100644 index 0000000..a02c686 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/ha/handlers/main.yml @@ -0,0 +1,9 @@ +--- +- name: restart haproxy + service: name=haproxy state=restarted enabled=yes + +- name: restart xinetd + service: name=xinetd state=restarted enabled=yes + +- name: restart keepalived + service: name=keepalived state=restarted enabled=yes diff --git a/compass/deploy/ansible/openstack_juno/roles/ha/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/ha/tasks/main.yml new file mode 100644 index 0000000..a00c21a --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/ha/tasks/main.yml @@ -0,0 +1,94 @@ +--- +- name: install keepalived xinet haproxy + apt: name={{ item }} state=present + with_items: + - keepalived + - xinetd + - haproxy + +- name: generate ha service list + shell: echo {{ item }} >> /opt/service + with_items: + - keepalived + - xinetd + - haproxy + +- name: install pexpect + pip: name=pexpect state=present + +- name: activate ip_nonlocal_bind + sysctl: name=net.ipv4.ip_nonlocal_bind value=1 + state=present reload=yes + +- name: set net.ipv4.tcp_keepalive_intvl + sysctl: name=net.ipv4.tcp_keepalive_intvl value=1 + state=present reload=yes + +- name: set net.ipv4.tcp_keepalive_probes + sysctl: name=net.ipv4.tcp_keepalive_probes value=5 + state=present reload=yes + +- name: set net.ipv4.tcp_keepalive_time + sysctl: name=net.ipv4.tcp_keepalive_time value=5 + state=present reload=yes + +- name: update haproxy cfg + template: src=haproxy.cfg dest=/etc/haproxy/haproxy.cfg + notify: restart haproxy + +- name: set haproxy enable flag + lineinfile: dest=/etc/default/haproxy state=present + regexp="ENABLED=*" + line="ENABLED=1" + notify: restart haproxy + +- name: set haproxy log + lineinfile: dest=/etc/rsyslog.conf state=present + regexp="local0.* /var/log/haproxy.log" + line="local0.* /var/log/haproxy.log" + +- name: set rsyslog udp module + lineinfile: dest=/etc/rsyslog.conf state=present + regexp="^#$ModLoad imudp" + line="$ModLoad imudp" + +- name: set rsyslog udp port + lineinfile: dest=/etc/rsyslog.conf state=present + regexp="^#$UDPServerRun 514" + line="$UDPServerRun 514" + +- name: copy galera_chk file + copy: src=galera_chk dest=/usr/local/bin/galera_chk mode=0777 + +- name: copy notify file + copy: src=notify.sh dest=/usr/local/bin/notify.sh mode=0777 + +- name: copy notify template file + template: src=failover.j2 dest=/usr/local/bin/failover.py mode=0777 + +- name: add network service + lineinfile: dest=/etc/services state=present + line="mysqlchk 9200/tcp" + insertafter="Local services" + notify: restart xinetd + +- name: copy mysqlchk file + copy: src=mysqlchk dest=/etc/xinetd.d/mysqlchk mode=0777 + notify: restart xinetd + +- name: set keepalived start param + lineinfile: dest=/etc/default/keepalived state=present + regexp="^DAEMON_ARGS=*" + line="DAEMON_ARGS=\"-D -d -S 1\"" + +- name: set keepalived log + lineinfile: dest=/etc/rsyslog.conf state=present + regexp="local1.* /var/log/keepalived.log" + line="local1.* /var/log/keepalived.log" + +- name: update keepalived info + template: src=keepalived.conf dest=/etc/keepalived/keepalived.conf + notify: restart keepalived + +- name: restart rsyslog + shell: service rsyslog restart diff --git a/compass/deploy/ansible/openstack_juno/roles/ha/templates/failover.j2 b/compass/deploy/ansible/openstack_juno/roles/ha/templates/failover.j2 new file mode 100644 index 0000000..b03c737 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/ha/templates/failover.j2 @@ -0,0 +1,65 @@ +import ConfigParser, os, socket +import logging as LOG +import pxssh +import sys +import re + +LOG_FILE="/var/log/mysql_failover" +try: + os.remove(LOG_FILE) +except: + pass + +LOG.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename=LOG_FILE,level=LOG.DEBUG) +ha_vip = {{ HA_VIP }} +LOG.info("ha_vip: %s" % ha_vip) + +#ha_vip = "10.1.0.50" +galera_path = '/etc/mysql/conf.d/wsrep.cnf' +pattern = re.compile(r"gcomm://(?P.*)") + +def ssh_get_hostname(ip): + try: + s = pxssh.pxssh() + s.login("%s" % ip, "root", "root") + s.sendline('hostname') # run a command + s.prompt() # match the prompt + result = s.before.strip() # print everything before the prompt. + return result.split(os.linesep)[1] + except pxssh.ExceptionPxssh as e: + LOG.error("pxssh failed on login.") + raise + +def failover(mode): + config = ConfigParser.ConfigParser() + config.optionxform = str + config.readfp(open(galera_path)) + wsrep_cluster_address = config.get("mysqld", "wsrep_cluster_address") + wsrep_cluster_address = pattern.match(wsrep_cluster_address).groupdict()["prev_ip"] + + LOG.info("old wsrep_cluster_address = %s" % wsrep_cluster_address) + + if mode == "master": + # refresh wsrep_cluster_address to null + LOG.info("I'm being master, set wsrep_cluster_address to null") + wsrep_cluster_address = "" + + elif mode == "backup": + # refresh wsrep_cluster_address to master int ip + hostname = ssh_get_hostname(ha_vip) + wsrep_cluster_address = socket.gethostbyname(hostname) + LOG.info("I'm being slave, set wsrep_cluster_address to master internal ip") + + LOG.info("new wsrep_cluster_address = %s" % wsrep_cluster_address) + wsrep_cluster_address = "gcomm://%s" % wsrep_cluster_address + config.set("mysqld", "wsrep_cluster_address", wsrep_cluster_address) + with open(galera_path, 'wb') as fp: + #config.write(sys.stdout) + config.write(fp) + + os.system("service mysql restart") + LOG.info("failover success!!!") + +if __name__ == "__main__": + LOG.debug("call me: %s" % sys.argv) + failover(sys.argv[1]) diff --git a/compass/deploy/ansible/openstack_juno/roles/ha/templates/haproxy.cfg b/compass/deploy/ansible/openstack_juno/roles/ha/templates/haproxy.cfg new file mode 100644 index 0000000..4ed528a --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/ha/templates/haproxy.cfg @@ -0,0 +1,133 @@ + +global + #chroot /var/run/haproxy + daemon + user haproxy + group haproxy + maxconn 4000 + pidfile /var/run/haproxy/haproxy.pid + #log 127.0.0.1 local0 + tune.bufsize 1000000 + stats socket /var/run/haproxy.sock + stats timeout 2m + +defaults + log global + maxconn 8000 + option redispatch + option dontlognull + option splice-auto + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 6m + timeout server 6m + timeout check 10s + retries 5 + +listen proxy-glance_registry_cluster + bind {{ HA_VIP }}:9191 + option tcpka + option tcplog + balance source +{% for host in groups['controller'] %} + server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:9191 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-glance_api_cluster + bind {{ HA_VIP }}:9292 + option tcpka + option httpchk + option tcplog + balance source +{% for host in groups['controller'] %} + server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:9292 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-nova-novncproxy + bind {{ HA_VIP }}:6080 + option tcpka + option tcplog + balance source +{% for host in groups['controller'] %} + server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:6080 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-network + bind {{ HA_VIP }}:9696 + option tcpka + option tcplog + balance source +{% for host in groups['controller'] %} + server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:9696 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-volume + bind {{ HA_VIP }}:8776 + option tcpka + option httpchk + option tcplog + balance source +{% for host in groups['controller'] %} + server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8776 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-keystone_admin_cluster + bind {{ HA_VIP }}:35357 + option tcpka + option httpchk + option tcplog + balance source +{% for host in groups['controller'] %} + server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:35357 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-keystone_public_internal_cluster + bind {{ HA_VIP }}:5000 + option tcpka + option httpchk + option tcplog + balance source +{% for host in groups['controller'] %} + server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:5000 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-nova_compute_api_cluster + bind {{ HA_VIP }}:8774 + mode tcp + option httpchk + option tcplog + balance source +{% for host in groups['controller'] %} + server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8774 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-nova_metadata_api_cluster + bind {{ HA_VIP }}:8775 + option tcpka + option tcplog + balance source +{% for host in groups['controller'] %} + server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8775 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen proxy-cinder_api_cluster + bind {{ HA_VIP }}:8776 + mode tcp + option httpchk + option tcplog + balance source +{% for host in groups['controller'] %} + server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8776 weight 1 check inter 2000 rise 2 fall 5 +{% endfor %} + +listen stats + mode http + bind 0.0.0.0:8888 + stats enable + stats refresh 30s + stats uri / + stats realm Global\ statistics + stats auth admin:admin + + diff --git a/compass/deploy/ansible/openstack_juno/roles/ha/templates/keepalived.conf b/compass/deploy/ansible/openstack_juno/roles/ha/templates/keepalived.conf new file mode 100644 index 0000000..0b49137 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/ha/templates/keepalived.conf @@ -0,0 +1,42 @@ +global_defs { + + notification_email{ + root@huawei.com + } + + notification_email_from keepalived@huawei.com + + smtp_server localhost + + smtp_connect_timeout 30 + + router_id NodeA + +} + +vrrp_instance VI_1 { + + interface {{ INTERNAL_INTERFACE }} + virtual_router_id 51 + state BACKUP + nopreempt + advert_int 1 +{% for host in groups['controller'] %} +{% if host == inventory_hostname %} + priority {{ 100 - loop.index0 * 5 }} +{% endif %} +{% endfor %} + + authentication { + auth_type PASS + auth_pass 1111 + } + + virtual_ipaddress { + {{ HA_VIP }} dev {{ INTERNAL_INTERFACE }} + } + + notify_master "/usr/local/bin/notify.sh master" + notify_backup "/usr/local/bin/notify.sh backup" +} + diff --git a/compass/deploy/ansible/openstack_juno/roles/keystone/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/keystone/handlers/main.yml new file mode 100644 index 0000000..9c0084e --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/keystone/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: restart keystone + service: name=keystone state=restarted enabled=yes diff --git a/compass/deploy/ansible/openstack_juno/roles/keystone/tasks/keystone_config.yml b/compass/deploy/ansible/openstack_juno/roles/keystone/tasks/keystone_config.yml new file mode 100644 index 0000000..3203b26 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/keystone/tasks/keystone_config.yml @@ -0,0 +1,16 @@ +--- +- name: keystone-manage db-sync + shell: su -s /bin/sh -c "keystone-manage db_sync" + register: result + until: result.rc == 0 + retries: 5 + delay: 3 + +- name: place keystone init script under /opt/ + template: src=keystone_init dest=/opt/keystone_init mode=0744 + +- name: run keystone_init + shell: /opt/keystone_init && touch keystone_init_complete || keystone_init_failed + args: + creates: keystone_init_complete + diff --git a/compass/deploy/ansible/openstack_juno/roles/keystone/tasks/keystone_install.yml b/compass/deploy/ansible/openstack_juno/roles/keystone/tasks/keystone_install.yml new file mode 100644 index 0000000..7d92395 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/keystone/tasks/keystone_install.yml @@ -0,0 +1,27 @@ +--- +- name: install keystone packages + apt: name=keystone state=present force=yes + +- name: generate keystone service list + shell: echo {{ item }} >> /opt/service + with_items: + - keystone + +- name: update keystone conf + template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes + notify: restart keystone + +- name: delete sqlite database + shell: rm /var/lib/keystone/keystone.db || echo sqllite database already removed + +- name: cron job to purge expired tokens hourly + shell: (crontab -l -u keystone 2>&1 | grep -q token_flush) || echo '@hourly /usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1' >> /var/spool/cron/crontabs/keystone + +- name: modify keystone cron rights + file: path=/var/spool/cron/crontabs/keystone mode=0600 + +- name: keystone source files + template: src={{ item }} dest=/opt/{{ item }} + with_items: + - admin-openrc.sh + - demo-openrc.sh diff --git a/compass/deploy/ansible/openstack_juno/roles/keystone/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/keystone/tasks/main.yml new file mode 100644 index 0000000..2f36e91 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/keystone/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- include: keystone_install.yml + tags: + - install + - keystone_install + - keystone + +- include: keystone_config.yml + when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == '' + tags: + - config + - keystone_config + - keystone diff --git a/compass/deploy/ansible/openstack_juno/roles/keystone/templates/admin-openrc.sh b/compass/deploy/ansible/openstack_juno/roles/keystone/templates/admin-openrc.sh new file mode 100644 index 0000000..f2e0d61 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/keystone/templates/admin-openrc.sh @@ -0,0 +1,6 @@ +# Verify the Identity Service installation +export OS_PASSWORD={{ ADMIN_PASS }} +export OS_TENANT_NAME=admin +export OS_AUTH_URL=http://{{ HA_VIP }}:35357/v2.0 +export OS_USERNAME=ADMIN + diff --git a/compass/deploy/ansible/openstack_juno/roles/keystone/templates/demo-openrc.sh b/compass/deploy/ansible/openstack_juno/roles/keystone/templates/demo-openrc.sh new file mode 100644 index 0000000..8bdc51b --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/keystone/templates/demo-openrc.sh @@ -0,0 +1,5 @@ +export OS_USERNAME=demo +export OS_PASSWORD={{ DEMO_PASS }} +export OS_TENANT_NAME=demo +export OS_AUTH_URL=http://{{ HA_VIP }}:35357/v2.0 + diff --git a/compass/deploy/ansible/openstack_juno/roles/keystone/templates/keystone.conf b/compass/deploy/ansible/openstack_juno/roles/keystone/templates/keystone.conf new file mode 100644 index 0000000..c40f77f --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/keystone/templates/keystone.conf @@ -0,0 +1,1317 @@ +[DEFAULT] + +admin_token={{ ADMIN_TOKEN }} + +public_bind_host= {{ identity_host }} + +admin_bind_host= {{ identity_host }} + +#compute_port=8774 + +#admin_port=35357 + +#public_port=5000 + +# The base public endpoint URL for keystone that are +# advertised to clients (NOTE: this does NOT affect how +# keystone listens for connections) (string value). +# Defaults to the base host URL of the request. Eg a +# request to http://server:5000/v2.0/users will +# default to http://server:5000. You should only need +# to set this value if the base URL contains a path +# (eg /prefix/v2.0) or the endpoint should be found on +# a different server. +#public_endpoint=http://localhost:%(public_port)s/ + +# The base admin endpoint URL for keystone that are advertised +# to clients (NOTE: this does NOT affect how keystone listens +# for connections) (string value). +# Defaults to the base host URL of the request. Eg a +# request to http://server:35357/v2.0/users will +# default to http://server:35357. You should only need +# to set this value if the base URL contains a path +# (eg /prefix/v2.0) or the endpoint should be found on +# a different server. +#admin_endpoint=http://localhost:%(admin_port)s/ + +# onready allows you to send a notification when the process +# is ready to serve For example, to have it notify using +# systemd, one could set shell command: "onready = systemd- +# notify --ready" or a module with notify() method: "onready = +# keystone.common.systemd". (string value) +#onready= + +# enforced by optional sizelimit middleware +# (keystone.middleware:RequestBodySizeLimiter). (integer +# value) +#max_request_body_size=114688 + +# limit the sizes of user & tenant ID/names. (integer value) +#max_param_size=64 + +# similar to max_param_size, but provides an exception for +# token values. (integer value) +#max_token_size=8192 + +# During a SQL upgrade member_role_id will be used to create a +# new role that will replace records in the +# user_tenant_membership table with explicit role grants. +# After migration, the member_role_id will be used in the API +# add_user_to_project. (string value) +#member_role_id=9fe2ff9ee4384b1894a90878d3e92bab + +# During a SQL upgrade member_role_id will be used to create a +# new role that will replace records in the +# user_tenant_membership table with explicit role grants. +# After migration, member_role_name will be ignored. (string +# value) +#member_role_name=_member_ + +# The value passed as the keyword "rounds" to passlib encrypt +# method. (integer value) +#crypt_strength=40000 + +# Set this to True if you want to enable TCP_KEEPALIVE on +# server sockets i.e. sockets used by the keystone wsgi server +# for client connections. (boolean value) +#tcp_keepalive=false + +# Sets the value of TCP_KEEPIDLE in seconds for each server +# socket. Only applies if tcp_keepalive is True. Not supported +# on OS X. (integer value) +#tcp_keepidle=600 + +# The maximum number of entities that will be returned in a +# collection can be set with list_limit, with no limit set by +# default. This global limit may be then overridden for a +# specific driver, by specifying a list_limit in the +# appropriate section (e.g. [assignment]). (integer value) +#list_limit= + +# Set this to false if you want to enable the ability for +# user, group and project entities to be moved between domains +# by updating their domain_id. Allowing such movement is not +# recommended if the scope of a domain admin is being +# restricted by use of an appropriate policy file (see +# policy.v3cloudsample as an example). (boolean value) +#domain_id_immutable=true + + +# +# Options defined in oslo.messaging +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +#amqp_auto_delete=false + +# Size of RPC connection pool. (integer value) +#rpc_conn_pool_size=30 + +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. (list value) +#allowed_rpc_exception_modules=oslo.messaging.exceptions,nova.exception,cinder.exception,exceptions +# Qpid broker hostname. (string value) +#qpid_hostname=localhost + +# Qpid broker port. (integer value) +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +#qpid_username= + +# Password for Qpid connection. (string value) +#qpid_password= + +# Space separated list of SASL mechanisms to use for auth. +# (string value) +#qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats. (integer +# value) +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +#qpid_tcp_nodelay=true + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +#qpid_topology_version=1 + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some +# distributions. (string value) +#kombu_ssl_version= + +# SSL key file (valid only if SSL enabled). (string value) +#kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled). (string value) +#kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL +# enabled). (string value) +#kombu_ssl_ca_certs= + +# How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. (floating point value) +#kombu_reconnect_delay=1.0 + +# The RabbitMQ broker address where a single node is used. +# (string value) +#rabbit_host=localhost + +# The RabbitMQ broker port where a single node is used. +# (integer value) +#rabbit_port=5672 + +# RabbitMQ HA cluster host:port pairs. (list value) +#rabbit_hosts=$rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +#rabbit_use_ssl=false + +# The RabbitMQ userid. (string value) +rabbit_userid={{ RABBIT_USER }} + +# The RabbitMQ password. (string value) +rabbit_password={{ RABBIT_PASS }} + +# the RabbitMQ login method (string value) +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +#rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer +# value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ. (integer value) +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +#rabbit_ha_queues=false + +# If passed, use a fake RabbitMQ provider. (boolean value) +#fake_rabbit=false + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +#rpc_zmq_host=keystone + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Host to locate redis. (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + +# Size of RPC greenthread pool. (integer value) +#rpc_thread_pool_size=64 + +# Driver or drivers to handle sending notifications. (multi +# valued) +#notification_driver= + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend +# option and driver specific configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other +# drivers include qpid and zmq. (string value) +#rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the +# transport_url option. (string value) +#control_exchange=openstack + + +# +# Options defined in keystone.notifications +# + +# Default publisher_id for outgoing notifications (string +# value) +#default_publisher_id= + + +# +# Options defined in keystone.middleware.ec2_token +# + +# URL to get token from ec2 request. (string value) +#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens + +# Required if EC2 server requires client certificate. (string +# value) +#keystone_ec2_keyfile= + +# Client certificate key filename. Required if EC2 server +# requires client certificate. (string value) +#keystone_ec2_certfile= + +# A PEM encoded certificate authority to use when verifying +# HTTPS connections. Defaults to the system CAs. (string +# value) +#keystone_ec2_cafile= + +# Disable SSL certificate verification. (boolean value) +#keystone_ec2_insecure=false + + +# +# Options defined in keystone.openstack.common.eventlet_backdoor +# + +# Enable eventlet backdoor. Acceptable values are 0, , +# and :, where 0 results in listening on a random +# tcp port number; results in listening on the +# specified port number (and not enabling backdoor if that +# port is in use); and : results in listening on +# the smallest unused port number within the specified range +# of port numbers. The chosen port is displayed in the +# service's log file. (string value) +#backdoor_port= + + +# +# Options defined in keystone.openstack.common.lockutils +# + +# Whether to disable inter-process locks (boolean value) +#disable_process_locking=false + +# Directory to use for lock files. (string value) +#lock_path= + + +# +# Options defined in keystone.openstack.common.log +# + +# Print debugging output (set logging level to DEBUG instead +# of default WARNING level). (boolean value) +#debug=false + +# Print more verbose output (set logging level to INFO instead +# of default WARNING level). (boolean value) +#verbose=false + +# Log output to standard error (boolean value) +#use_stderr=true + +# Format string to use for log messages with context (string +# value) +#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context +# (string value) +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG (string +# value) +#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format +# (string value) +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s + +# List of logger=LEVEL pairs (list value) +#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN + +# Publish error events (boolean value) +#publish_errors=false + +# Make deprecations fatal (boolean value) +#fatal_deprecations=false + +# If an instance is passed with the log message, format it +# like this (string value) +#instance_format="[instance: %(uuid)s] " + +# If an instance UUID is passed with the log message, format +# it like this (string value) +#instance_uuid_format="[instance: %(uuid)s] " + +# The name of logging configuration file. It does not disable +# existing loggers, but just appends specified logging +# configuration to any other existing logging options. Please +# see the Python logging module documentation for details on +# logging configuration files. (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append= + +# DEPRECATED. A logging.Formatter log message format string +# which may use any of the available logging.LogRecord +# attributes. This option is deprecated. Please use +# logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format= + +# Format string for %%(asctime)s in log records. Default: +# %(default)s (string value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If no default is +# set, logging will go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file= + +# (Optional) The base directory used for relative --log-file +# paths (string value) +# Deprecated group/name - [DEFAULT]/logdir +log_dir = /var/log/keystone + +# Use syslog for logging. Existing syslog format is DEPRECATED +# during I, and then will be changed in J to honor RFC5424 +# (boolean value) +#use_syslog=false + +# (Optional) Use syslog rfc5424 format for logging. If +# enabled, will add APP-NAME (RFC5424) before the MSG part of +# the syslog message. The old format without APP-NAME is +# deprecated in I, and will be removed in J. (boolean value) +#use_syslog_rfc_format=false + +# Syslog facility to receive log lines (string value) +#syslog_log_facility=LOG_USER + + +# +# Options defined in keystone.openstack.common.policy +# + +# JSON file containing policy (string value) +#policy_file=policy.json + +# Rule enforced when requested rule is not found (string +# value) +#policy_default_rule=default + + +[assignment] + +# +# Options defined in keystone +# + +# Keystone Assignment backend driver. (string value) +#driver= + +# Toggle for assignment caching. This has no effect unless +# global caching is enabled. (boolean value) +#caching=true + +# TTL (in seconds) to cache assignment data. This has no +# effect unless global caching is enabled. (integer value) +#cache_time= + +# Maximum number of entities that will be returned in an +# assignment collection. (integer value) +#list_limit= + + +[auth] + +# +# Options defined in keystone +# + +# Default auth methods. (list value) +#methods=external,password,token + +# The password auth plugin module. (string value) +#password=keystone.auth.plugins.password.Password + +# The token auth plugin module. (string value) +#token=keystone.auth.plugins.token.Token + +# The external (REMOTE_USER) auth plugin module. (string +# value) +#external=keystone.auth.plugins.external.DefaultDomain + + +[cache] + +# +# Options defined in keystone +# + +# Prefix for building the configuration dictionary for the +# cache region. This should not need to be changed unless +# there is another dogpile.cache region with the same +# configuration name. (string value) +#config_prefix=cache.keystone + +# Default TTL, in seconds, for any cached item in the +# dogpile.cache region. This applies to any cached method that +# doesn't have an explicit cache expiration time defined for +# it. (integer value) +#expiration_time=600 + +# Dogpile.cache backend module. It is recommended that +# Memcache (dogpile.cache.memcache) or Redis +# (dogpile.cache.redis) be used in production deployments. +# Small workloads (single process) like devstack can use the +# dogpile.cache.memory backend. (string value) +#backend=keystone.common.cache.noop + +# Use a key-mangling function (sha1) to ensure fixed length +# cache-keys. This is toggle-able for debugging purposes, it +# is highly recommended to always leave this set to True. +# (boolean value) +#use_key_mangler=true + +# Arguments supplied to the backend module. Specify this +# option once per argument to be passed to the dogpile.cache +# backend. Example format: ":". (multi valued) +#backend_argument= + +# Proxy Classes to import that will affect the way the +# dogpile.cache backend functions. See the dogpile.cache +# documentation on changing-backend-behavior. Comma delimited +# list e.g. my.dogpile.proxy.Class, my.dogpile.proxyClass2. +# (list value) +#proxies= + +# Global toggle for all caching using the should_cache_fn +# mechanism. (boolean value) +#enabled=false + +# Extra debugging from the cache backend (cache keys, +# get/set/delete/etc calls) This is only really useful if you +# need to see the specific cache-backend get/set/delete calls +# with the keys/values. Typically this should be left set to +# False. (boolean value) +#debug_cache_backend=false + + +[catalog] + +# +# Options defined in keystone +# + +# Catalog template file name for use with the template catalog +# backend. (string value) +#template_file=default_catalog.templates + +# Keystone catalog backend driver. (string value) +#driver=keystone.catalog.backends.sql.Catalog + +# Maximum number of entities that will be returned in a +# catalog collection. (integer value) +#list_limit= + + +[credential] + +# +# Options defined in keystone +# + +# Keystone Credential backend driver. (string value) +#driver=keystone.credential.backends.sql.Credential + + +[database] + +# +# Options defined in keystone.openstack.common.db.options +# + +# The file name to use with SQLite (string value) +#sqlite_db=keystone.sqlite + +# If True, SQLite uses synchronous mode (boolean value) +#sqlite_synchronous=true + +# The backend to use for db (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend=sqlalchemy + +# The SQLAlchemy connection string used to connect to the +# database (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection= +connection = mysql://keystone:{{ KEYSTONE_DBPASS }}@{{ db_host }}/keystone + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode=TRADITIONAL + +# Timeout before idle sql connections are reaped (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout=3600 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size= + +# Maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries=10 + +# Interval between retries of opening a sql connection +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval=10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow= + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug=0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace=false + +# If set, use this value for pool_timeout with sqlalchemy +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout= + +# Enable the experimental use of database reconnect on +# connection lost (boolean value) +#use_db_reconnect=false + +# seconds between db connection retries (integer value) +#db_retry_interval=1 + +# Whether to increase interval between db connection retries, +# up to db_max_retry_interval (boolean value) +#db_inc_retry_interval=true + +# max seconds between db connection retries, if +# db_inc_retry_interval is enabled (integer value) +#db_max_retry_interval=10 + +# maximum db connection retries before error is raised. +# (setting -1 implies an infinite retry count) (integer value) +#db_max_retries=20 + + +[ec2] + +# +# Options defined in keystone +# + +# Keystone EC2Credential backend driver. (string value) +#driver=keystone.contrib.ec2.backends.kvs.Ec2 + + +[endpoint_filter] + +# +# Options defined in keystone +# + +# Keystone Endpoint Filter backend driver (string value) +#driver=keystone.contrib.endpoint_filter.backends.sql.EndpointFilter + +# Toggle to return all active endpoints if no filter exists. +# (boolean value) +#return_all_endpoints_if_no_filter=true + + +[federation] + +# +# Options defined in keystone +# + +# Keystone Federation backend driver. (string value) +#driver=keystone.contrib.federation.backends.sql.Federation + +# Value to be used when filtering assertion parameters from +# the environment. (string value) +#assertion_prefix= + + +[identity] + +# +# Options defined in keystone +# + +# This references the domain to use for all Identity API v2 +# requests (which are not aware of domains). A domain with +# this ID will be created for you by keystone-manage db_sync +# in migration 008. The domain referenced by this ID cannot +# be deleted on the v3 API, to prevent accidentally breaking +# the v2 API. There is nothing special about this domain, +# other than the fact that it must exist to order to maintain +# support for your v2 clients. (string value) +#default_domain_id=default + +# A subset (or all) of domains can have their own identity +# driver, each with their own partial configuration file in a +# domain configuration directory. Only values specific to the +# domain need to be placed in the domain specific +# configuration file. This feature is disabled by default; set +# to True to enable. (boolean value) +#domain_specific_drivers_enabled=false + +# Path for Keystone to locate the domain specificidentity +# configuration files if domain_specific_drivers_enabled is +# set to true. (string value) +#domain_config_dir=/etc/keystone/domains + +# Keystone Identity backend driver. (string value) +#driver=keystone.identity.backends.sql.Identity + +# Maximum supported length for user passwords; decrease to +# improve performance. (integer value) +#max_password_length=4096 + +# Maximum number of entities that will be returned in an +# identity collection. (integer value) +#list_limit= + + +[kvs] + +# +# Options defined in keystone +# + +# Extra dogpile.cache backend modules to register with the +# dogpile.cache library. (list value) +#backends= + +# Prefix for building the configuration dictionary for the KVS +# region. This should not need to be changed unless there is +# another dogpile.cache region with the same configuration +# name. (string value) +#config_prefix=keystone.kvs + +# Toggle to disable using a key-mangling function to ensure +# fixed length keys. This is toggle-able for debugging +# purposes, it is highly recommended to always leave this set +# to True. (boolean value) +#enable_key_mangler=true + +# Default lock timeout for distributed locking. (integer +# value) +#default_lock_timeout=5 + + +[ldap] + +# +# Options defined in keystone +# + +# URL for connecting to the LDAP server. (string value) +#url=ldap://localhost + +# User BindDN to query the LDAP server. (string value) +#user= + +# Password for the BindDN to query the LDAP server. (string +# value) +#password= + +# LDAP server suffix (string value) +#suffix=cn=example,cn=com + +# If true, will add a dummy member to groups. This is required +# if the objectclass for groups requires the "member" +# attribute. (boolean value) +#use_dumb_member=false + +# DN of the "dummy member" to use when "use_dumb_member" is +# enabled. (string value) +#dumb_member=cn=dumb,dc=nonexistent + +# allow deleting subtrees. (boolean value) +#allow_subtree_delete=false + +# The LDAP scope for queries, this can be either "one" +# (onelevel/singleLevel) or "sub" (subtree/wholeSubtree). +# (string value) +#query_scope=one + +# Maximum results per page; a value of zero ("0") disables +# paging. (integer value) +#page_size=0 + +# The LDAP dereferencing option for queries. This can be +# either "never", "searching", "always", "finding" or +# "default". The "default" option falls back to using default +# dereferencing configured by your ldap.conf. (string value) +#alias_dereferencing=default + +# Override the system's default referral chasing behavior for +# queries. (boolean value) +#chase_referrals= + +# Search base for users. (string value) +#user_tree_dn= + +# LDAP search filter for users. (string value) +#user_filter= + +# LDAP objectClass for users. (string value) +#user_objectclass=inetOrgPerson + +# LDAP attribute mapped to user id. (string value) +#user_id_attribute=cn + +# LDAP attribute mapped to user name. (string value) +#user_name_attribute=sn + +# LDAP attribute mapped to user email. (string value) +#user_mail_attribute=email + +# LDAP attribute mapped to password. (string value) +#user_pass_attribute=userPassword + +# LDAP attribute mapped to user enabled flag. (string value) +#user_enabled_attribute=enabled + +# Bitmask integer to indicate the bit that the enabled value +# is stored in if the LDAP server represents "enabled" as a +# bit on an integer rather than a boolean. A value of "0" +# indicates the mask is not used. If this is not set to "0" +# the typical value is "2". This is typically used when +# "user_enabled_attribute = userAccountControl". (integer +# value) +#user_enabled_mask=0 + +# Default value to enable users. This should match an +# appropriate int value if the LDAP server uses non-boolean +# (bitmask) values to indicate if a user is enabled or +# disabled. If this is not set to "True"the typical value is +# "512". This is typically used when "user_enabled_attribute = +# userAccountControl". (string value) +#user_enabled_default=True + +# List of attributes stripped off the user on update. (list +# value) +#user_attribute_ignore=default_project_id,tenants + +# LDAP attribute mapped to default_project_id for users. +# (string value) +#user_default_project_id_attribute= + +# Allow user creation in LDAP backend. (boolean value) +#user_allow_create=true + +# Allow user updates in LDAP backend. (boolean value) +#user_allow_update=true + +# Allow user deletion in LDAP backend. (boolean value) +#user_allow_delete=true + +# If True, Keystone uses an alternative method to determine if +# a user is enabled or not by checking if they are a member of +# the "user_enabled_emulation_dn" group. (boolean value) +#user_enabled_emulation=false + +# DN of the group entry to hold enabled users when using +# enabled emulation. (string value) +#user_enabled_emulation_dn= + +# List of additional LDAP attributes used for mapping +# Additional attribute mappings for users. Attribute mapping +# format is :, where ldap_attr is the +# attribute in the LDAP entry and user_attr is the Identity +# API attribute. (list value) +#user_additional_attribute_mapping= + +# Search base for projects (string value) +#tenant_tree_dn= + +# LDAP search filter for projects. (string value) +#tenant_filter= + +# LDAP objectClass for projects. (string value) +#tenant_objectclass=groupOfNames + +# LDAP attribute mapped to project id. (string value) +#tenant_id_attribute=cn + +# LDAP attribute mapped to project membership for user. +# (string value) +#tenant_member_attribute=member + +# LDAP attribute mapped to project name. (string value) +#tenant_name_attribute=ou + +# LDAP attribute mapped to project description. (string value) +#tenant_desc_attribute=description + +# LDAP attribute mapped to project enabled. (string value) +#tenant_enabled_attribute=enabled + +# LDAP attribute mapped to project domain_id. (string value) +#tenant_domain_id_attribute=businessCategory + +# List of attributes stripped off the project on update. (list +# value) +#tenant_attribute_ignore= + +# Allow tenant creation in LDAP backend. (boolean value) +#tenant_allow_create=true + +# Allow tenant update in LDAP backend. (boolean value) +#tenant_allow_update=true + +# Allow tenant deletion in LDAP backend. (boolean value) +#tenant_allow_delete=true + +# If True, Keystone uses an alternative method to determine if +# a project is enabled or not by checking if they are a member +# of the "tenant_enabled_emulation_dn" group. (boolean value) +#tenant_enabled_emulation=false + +# DN of the group entry to hold enabled projects when using +# enabled emulation. (string value) +#tenant_enabled_emulation_dn= + +# Additional attribute mappings for projects. Attribute +# mapping format is :, where ldap_attr +# is the attribute in the LDAP entry and user_attr is the +# Identity API attribute. (list value) +#tenant_additional_attribute_mapping= + +# Search base for roles. (string value) +#role_tree_dn= + +# LDAP search filter for roles. (string value) +#role_filter= + +# LDAP objectClass for roles. (string value) +#role_objectclass=organizationalRole + +# LDAP attribute mapped to role id. (string value) +#role_id_attribute=cn + +# LDAP attribute mapped to role name. (string value) +#role_name_attribute=ou + +# LDAP attribute mapped to role membership. (string value) +#role_member_attribute=roleOccupant + +# List of attributes stripped off the role on update. (list +# value) +#role_attribute_ignore= + +# Allow role creation in LDAP backend. (boolean value) +#role_allow_create=true + +# Allow role update in LDAP backend. (boolean value) +#role_allow_update=true + +# Allow role deletion in LDAP backend. (boolean value) +#role_allow_delete=true + +# Additional attribute mappings for roles. Attribute mapping +# format is :, where ldap_attr is the +# attribute in the LDAP entry and user_attr is the Identity +# API attribute. (list value) +#role_additional_attribute_mapping= + +# Search base for groups. (string value) +#group_tree_dn= + +# LDAP search filter for groups. (string value) +#group_filter= + +# LDAP objectClass for groups. (string value) +#group_objectclass=groupOfNames + +# LDAP attribute mapped to group id. (string value) +#group_id_attribute=cn + +# LDAP attribute mapped to group name. (string value) +#group_name_attribute=ou + +# LDAP attribute mapped to show group membership. (string +# value) +#group_member_attribute=member + +# LDAP attribute mapped to group description. (string value) +#group_desc_attribute=description + +# List of attributes stripped off the group on update. (list +# value) +#group_attribute_ignore= + +# Allow group creation in LDAP backend. (boolean value) +#group_allow_create=true + +# Allow group update in LDAP backend. (boolean value) +#group_allow_update=true + +# Allow group deletion in LDAP backend. (boolean value) +#group_allow_delete=true + +# Additional attribute mappings for groups. Attribute mapping +# format is :, where ldap_attr is the +# attribute in the LDAP entry and user_attr is the Identity +# API attribute. (list value) +#group_additional_attribute_mapping= + +# CA certificate file path for communicating with LDAP +# servers. (string value) +#tls_cacertfile= + +# CA certificate directory path for communicating with LDAP +# servers. (string value) +#tls_cacertdir= + +# Enable TLS for communicating with LDAP servers. (boolean +# value) +#use_tls=false + +# valid options for tls_req_cert are demand, never, and allow. +# (string value) +#tls_req_cert=demand + + +[matchmaker_ring] + +# +# Options defined in oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + + +[memcache] + +# +# Options defined in keystone +# + +# Memcache servers in the format of "host:port" (list value) +#servers=localhost:11211 + +# Number of compare-and-set attempts to make when using +# compare-and-set in the token memcache back end. (integer +# value) +#max_compare_and_set_retry=16 + + +[oauth1] + +# +# Options defined in keystone +# + +# Keystone Credential backend driver. (string value) +#driver=keystone.contrib.oauth1.backends.sql.OAuth1 + +# Duration (in seconds) for the OAuth Request Token. (integer +# value) +#request_token_duration=28800 + +# Duration (in seconds) for the OAuth Access Token. (integer +# value) +#access_token_duration=86400 + + +[os_inherit] + +# +# Options defined in keystone +# + +# role-assignment inheritance to projects from owning domain +# can be optionally enabled. (boolean value) +#enabled=false + + +[paste_deploy] + +# +# Options defined in keystone +# + +# Name of the paste configuration file that defines the +# available pipelines. (string value) +#config_file=keystone-paste.ini + + +[policy] + +# +# Options defined in keystone +# + +# Keystone Policy backend driver. (string value) +#driver=keystone.policy.backends.sql.Policy + +# Maximum number of entities that will be returned in a policy +# collection. (integer value) +#list_limit= + + +[revoke] + +# +# Options defined in keystone +# + +# An implementation of the backend for persisting revocation +# events. (string value) +#driver=keystone.contrib.revoke.backends.kvs.Revoke + +# This value (calculated in seconds) is added to token +# expiration before a revocation event may be removed from the +# backend. (integer value) +#expiration_buffer=1800 + +# Toggle for revocation event cacheing. This has no effect +# unless global caching is enabled. (boolean value) +#caching=true + + +[signing] + +# +# Options defined in keystone +# + +# Deprecated in favor of provider in the [token] section. +# (string value) +#token_format= + +# Path of the certfile for token signing. (string value) +#certfile=/etc/keystone/ssl/certs/signing_cert.pem + +# Path of the keyfile for token signing. (string value) +#keyfile=/etc/keystone/ssl/private/signing_key.pem + +# Path of the CA for token signing. (string value) +#ca_certs=/etc/keystone/ssl/certs/ca.pem + +# Path of the CA Key for token signing. (string value) +#ca_key=/etc/keystone/ssl/private/cakey.pem + +# Key Size (in bits) for token signing cert (auto generated +# certificate). (integer value) +#key_size=2048 + +# Day the token signing cert is valid for (auto generated +# certificate). (integer value) +#valid_days=3650 + +# Certificate Subject (auto generated certificate) for token +# signing. (string value) +#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com + + +[ssl] + +# +# Options defined in keystone +# + +# Toggle for SSL support on the keystone eventlet servers. +# (boolean value) +#enable=false + +# Path of the certfile for SSL. (string value) +#certfile=/etc/keystone/ssl/certs/keystone.pem + +# Path of the keyfile for SSL. (string value) +#keyfile=/etc/keystone/ssl/private/keystonekey.pem + +# Path of the ca cert file for SSL. (string value) +#ca_certs=/etc/keystone/ssl/certs/ca.pem + +# Path of the CA key file for SSL. (string value) +#ca_key=/etc/keystone/ssl/private/cakey.pem + +# Require client certificate. (boolean value) +#cert_required=false + +# SSL Key Length (in bits) (auto generated certificate). +# (integer value) +#key_size=1024 + +# Days the certificate is valid for once signed (auto +# generated certificate). (integer value) +#valid_days=3650 + +# SSL Certificate Subject (auto generated certificate). +# (string value) +#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost + + +[stats] + +# +# Options defined in keystone +# + +# Keystone stats backend driver. (string value) +#driver=keystone.contrib.stats.backends.kvs.Stats + + +[token] + +# +# Options defined in keystone +# + +# External auth mechanisms that should add bind information to +# token e.g. kerberos, x509. (list value) +#bind= + +# Enforcement policy on tokens presented to keystone with bind +# information. One of disabled, permissive, strict, required +# or a specifically required bind mode e.g. kerberos or x509 +# to require binding to that authentication. (string value) +#enforce_token_bind=permissive + +# Amount of time a token should remain valid (in seconds). +# (integer value) +#expiration=3600 + +# Controls the token construction, validation, and revocation +# operations. Core providers are +# "keystone.token.providers.[pki|uuid].Provider". (string +# value) +provider=keystone.token.providers.uuid.Provider + +# Keystone Token persistence backend driver. (string value) +driver=keystone.token.persistence.backends.sql.Token + +# Toggle for token system cacheing. This has no effect unless +# global caching is enabled. (boolean value) +#caching=true + +# Time to cache the revocation list and the revocation events +# if revoke extension is enabled (in seconds). This has no +# effect unless global and token caching are enabled. (integer +# value) +revocation_cache_time=3600 + +# Time to cache tokens (in seconds). This has no effect unless +# global and token caching are enabled. (integer value) +#cache_time= + +# Revoke token by token identifier. Setting revoke_by_id to +# True enables various forms of enumerating tokens, e.g. `list +# tokens for user`. These enumerations are processed to +# determine the list of tokens to revoke. Only disable if +# you are switching to using the Revoke extension with a +# backend other than KVS, which stores events in memory. +# (boolean value) +#revoke_by_id=true + + +[trust] + +# +# Options defined in keystone +# + +# delegation and impersonation features can be optionally +# disabled. (boolean value) +#enabled=true + +# Keystone Trust backend driver. (string value) +#driver=keystone.trust.backends.sql.Trust + + +[extra_headers] +Distribution = Ubuntu + diff --git a/compass/deploy/ansible/openstack_juno/roles/keystone/templates/keystone_init b/compass/deploy/ansible/openstack_juno/roles/keystone/templates/keystone_init new file mode 100644 index 0000000..729669b --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/keystone/templates/keystone_init @@ -0,0 +1,43 @@ +# create an administrative user + +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=admin --pass={{ ADMIN_PASS }} --email=admin@admin.com +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 role-create --name=admin +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=admin --pass={{ ADMIN_PASS }} --email=admin@admin.com +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-create --name=admin --description="Admin Tenant" +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=admin --tenant=admin --role=admin +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=admin --role=_member_ --tenant=admin + +# create a normal user + +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=demo --pass={{ DEMO_PASS }} --email=DEMO_EMAIL +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-create --name=demo --description="Demo Tenant" +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=demo --role=_member_ --tenant=demo + +# create a service tenant +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-create --name=service --description="Service Tenant" + +# regist keystone +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=keystone --type=identity --description="OpenStack Identity" +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service_id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ identity / {print $2}') --publicurl=http://{{ HA_VIP }}:5000/v2.0 --internalurl=http://{{ HA_VIP }}:5000/v2.0 --adminurl=http://{{ HA_VIP }}:35357/v2.0 + +# Create a glance user that the Image Service can use to authenticate with the Identity service +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=glance --pass={{ GLANCE_PASS }} --email=glance@example.com +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=glance --tenant=service --role=admin + +#Register the Image Service with the Identity service so that other OpenStack services can locate it +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=glance --type=image --description="OpenStack Image Service" +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ image / {print $2}') --publicurl=http://{{ HA_VIP }}:9292 --internalurl=http://{{ HA_VIP }}:9292 --adminurl=http://{{ HA_VIP }}:9292 + +#Create a nova user that Compute uses to authenticate with the Identity Service +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=nova --pass={{ NOVA_PASS }} --email=nova@example.com +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=nova --tenant=service --role=admin + +# register Compute with the Identity Service so that other OpenStack services can locate it +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=nova --type=compute --description="OpenStack Compute" +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ compute / {print $2}') --publicurl=http://{{ HA_VIP }}:8774/v2/%\(tenant_id\)s --internalurl=http://{{ HA_VIP }}:8774/v2/%\(tenant_id\)s --adminurl=http://{{ HA_VIP }}:8774/v2/%\(tenant_id\)s + +# register netron user, role and service +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name neutron --pass {{ NEUTRON_PASS }} --email neutron@example.com +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user neutron --tenant service --role admin +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name neutron --type network --description "OpenStack Networking" +keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id $(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ network / {print $2}') --publicurl http://{{ HA_VIP }}:9696 --adminurl http://{{ HA_VIP }}:9696 --internalurl http://{{ HA_VIP }}:9696 diff --git a/compass/deploy/ansible/openstack_juno/roles/monitor/files/check_service.sh b/compass/deploy/ansible/openstack_juno/roles/monitor/files/check_service.sh new file mode 100644 index 0000000..d309673 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/monitor/files/check_service.sh @@ -0,0 +1,7 @@ +#!/bin/bash +services=`cat /opt/service | uniq` +for service in $services; do + if [ `/sbin/initctl list|awk '/stop\/waiting/{print $1}'|uniq | grep $service` ]; then + /sbin/start $service + fi +done diff --git a/compass/deploy/ansible/openstack_juno/roles/monitor/files/root b/compass/deploy/ansible/openstack_juno/roles/monitor/files/root new file mode 100644 index 0000000..9c55c4f --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/monitor/files/root @@ -0,0 +1 @@ +* * * * * /usr/local/bin/check_service.sh >> /var/log/check_service.log 2>&1 diff --git a/compass/deploy/ansible/openstack_juno/roles/monitor/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/monitor/tasks/main.yml new file mode 100644 index 0000000..e5b93f3 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/monitor/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: copy service check file + copy: src=check_service.sh dest=/usr/local/bin/check_service.sh mode=0777 + +- name: copy cron file + copy: src=root dest=/var/spool/cron/crontabs/root mode=0600 + +- name: restart cron + service: name=cron state=restarted + + diff --git a/compass/deploy/ansible/openstack_juno/roles/mq/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/mq/tasks/main.yml new file mode 100644 index 0000000..4ae4065 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/mq/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- include: rabbitmq.yml + +#- include: rabbitmq_cluster.yml +# when: HA_CLUSTER is defined diff --git a/compass/deploy/ansible/openstack_juno/roles/mq/tasks/rabbitmq.yml b/compass/deploy/ansible/openstack_juno/roles/mq/tasks/rabbitmq.yml new file mode 100644 index 0000000..5714406 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/mq/tasks/rabbitmq.yml @@ -0,0 +1,45 @@ +--- +- name: create rabbitmq directory + file: path=/etc/rabbitmq state=directory mode=0755 + +- name: copy rabbitmq config file + template: src=rabbitmq-env.conf dest=/etc/rabbitmq/rabbitmq-env.conf mode=0755 + +- name: install rabbitmq-server + apt: name=rabbitmq-server state=present + +- name: stop rabbitmq-server + service: name=rabbitmq-server + state=stopped + +- name: update .erlang.cookie + template: src=.erlang.cookie dest=/var/lib/rabbitmq/.erlang.cookie + group=rabbitmq + owner=rabbitmq + mode=0400 + when: ERLANG_TOKEN is defined + +- name: start and enable rabbitmq-server + service: name=rabbitmq-server + state=started + enabled=yes + +- name: generate mq service list + shell: echo {{ item }} >> /opt/service + with_items: + - rabbitmq-server + +- name: modify rabbitmq password + command: rabbitmqctl change_password guest {{ RABBIT_PASS }} + when: "RABBIT_USER is defined and RABBIT_USER == 'guest'" + ignore_errors: True + +- name: add rabbitmq user + command: rabbitmqctl add_user {{ RABBIT_USER }} {{ RABBIT_PASS }} + when: "RABBIT_USER is defined and RABBIT_USER != 'guest'" + ignore_errors: True + +- name: set rabbitmq user permission + command: rabbitmqctl set_permissions -p / {{ RABBIT_USER }} ".*" ".*" ".*" + when: "RABBIT_USER is defined and RABBIT_USER != 'guest'" + diff --git a/compass/deploy/ansible/openstack_juno/roles/mq/tasks/rabbitmq_cluster.yml b/compass/deploy/ansible/openstack_juno/roles/mq/tasks/rabbitmq_cluster.yml new file mode 100644 index 0000000..afd4c77 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/mq/tasks/rabbitmq_cluster.yml @@ -0,0 +1,27 @@ +--- +- name: stop rabbitmq app + command: rabbitmqctl stop_app + when: HA_CLUSTER[inventory_hostname] != '' + +- name: rabbitmqctl reset + command: rabbitmqctl reset + when: HA_CLUSTER[inventory_hostname] != '' + +- name: stop rabbitmq + shell: rabbitmqctl stop + +- name: set detach + shell: rabbitmq-server -detached + +- name: join cluster + command: rabbitmqctl join_cluster rabbit@{{ item }} + when: item != inventory_hostname and HA_CLUSTER[item] == '' + with_items: + groups['controller'] + +- name: start rabbitmq app + command: rabbitmqctl start_app + +- name: set the HA policy + rabbitmq_policy: name=ha-all pattern='^(?!amq\.).*' tags="ha-mode=all" + diff --git a/compass/deploy/ansible/openstack_juno/roles/mq/templates/.erlang.cookie b/compass/deploy/ansible/openstack_juno/roles/mq/templates/.erlang.cookie new file mode 100644 index 0000000..cadcfaf --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/mq/templates/.erlang.cookie @@ -0,0 +1 @@ +{{ ERLANG_TOKEN }} diff --git a/compass/deploy/ansible/openstack_juno/roles/mq/templates/rabbitmq-env.conf b/compass/deploy/ansible/openstack_juno/roles/mq/templates/rabbitmq-env.conf new file mode 100644 index 0000000..6dd7349 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/mq/templates/rabbitmq-env.conf @@ -0,0 +1 @@ +RABBITMQ_NODE_IP_ADDRESS={{ HA_VIP }} diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-common/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-common/handlers/main.yml new file mode 100644 index 0000000..36d779d --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-common/handlers/main.yml @@ -0,0 +1,13 @@ +--- +- name: restart neutron-plugin-openvswitch-agent + service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes + when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}" + +- name: restart neutron-l3-agent + service: name=neutron-l3-agent state=restarted enabled=yes + +- name: restart neutron-dhcp-agent + service: name=neutron-dhcp-agent state=restarted enabled=yes + +- name: restart neutron-metadata-agent + service: name=neutron-metadata-agent state=restarted enabled=yes diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/defaults/main.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/defaults/main.yml new file mode 100644 index 0000000..825178b --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/defaults/main.yml @@ -0,0 +1,2 @@ +--- +neutron_ovs_bridge_mappings: "" diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/handlers/main.yml new file mode 100644 index 0000000..36d779d --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/handlers/main.yml @@ -0,0 +1,13 @@ +--- +- name: restart neutron-plugin-openvswitch-agent + service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes + when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}" + +- name: restart neutron-l3-agent + service: name=neutron-l3-agent state=restarted enabled=yes + +- name: restart neutron-dhcp-agent + service: name=neutron-dhcp-agent state=restarted enabled=yes + +- name: restart neutron-metadata-agent + service: name=neutron-metadata-agent state=restarted enabled=yes diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/tasks/main.yml new file mode 100644 index 0000000..93ee46f --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/tasks/main.yml @@ -0,0 +1,55 @@ +--- + +- name: activate ipv4 forwarding + sysctl: name=net.ipv4.ip_forward value=1 + state=present reload=yes + +- name: deactivate ipv4 rp filter + sysctl: name=net.ipv4.conf.all.rp_filter value=0 + state=present reload=yes + +- name: deactivate ipv4 default rp filter + sysctl: name=net.ipv4.conf.default.rp_filter + value=0 state=present reload=yes + +- name: install compute-related neutron packages + apt: name={{ item }} state=present force=yes + with_items: + - neutron-common + - neutron-plugin-ml2 + - openvswitch-datapath-dkms + - openvswitch-switch + +- name: generate neutron computer service list + shell: echo {{ item }} >> /opt/service + with_items: + - neutron-plugin-openvswitch-agent + +- name: install neutron openvswitch agent + apt: name=neutron-plugin-openvswitch-agent + state=present force=yes + when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}" + +- name: config neutron + template: src=neutron-network.conf + dest=/etc/neutron/neutron.conf backup=yes + notify: + - restart neutron-plugin-openvswitch-agent + +- name: config ml2 plugin + template: src=ml2_conf.ini + dest=/etc/neutron/plugins/ml2/ml2_conf.ini + backup=yes + notify: + - restart neutron-plugin-openvswitch-agent + +- name: add br-int + openvswitch_bridge: bridge=br-int state=present + notify: + - restart neutron-plugin-openvswitch-agent + - restart nova-compute + +- include: ../../neutron-network/tasks/odl.yml + when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}" + +- meta: flush_handlers diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/dhcp_agent.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/dhcp_agent.ini new file mode 100644 index 0000000..19eb62e --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/dhcp_agent.ini @@ -0,0 +1,90 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = False +verbose = True + +# The DHCP agent will resync its state with Neutron to recover from any +# transient notification or rpc errors. The interval is number of +# seconds between attempts. +resync_interval = 5 + +# The DHCP agent requires an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP, +# BigSwitch/Floodlight) +interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Name of Open vSwitch bridge to use +# ovs_integration_bridge = br-int + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires +# no additional setup of the DHCP server. +dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +use_namespaces = True + +# The DHCP server can assist with providing metadata support on isolated +# networks. Setting this value to True will cause the DHCP server to append +# specific host routes to the DHCP request. The metadata service will only +# be activated when the subnet does not contain any router port. The guest +# instance must be configured to request host routes via DHCP (Option 121). +enable_isolated_metadata = False + +# Allows for serving metadata requests coming from a dedicated metadata +# access network whose cidr is 169.254.169.254/16 (or larger prefix), and +# is connected to a Neutron router from which the VMs send metadata +# request. In this case DHCP Option 121 will not be injected in VMs, as +# they will be able to reach 169.254.169.254 through a router. +# This option requires enable_isolated_metadata = True +enable_metadata_network = False + +# Number of threads to use during sync process. Should not exceed connection +# pool size configured on server. +# num_sync_threads = 4 + +# Location to store DHCP server config files +# dhcp_confs = $state_path/dhcp + +# Domain to use for building the hostnames +dhcp_domain = openstacklocal + +# Override the default dnsmasq settings with this file +# dnsmasq_config_file = +dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf + +# Comma-separated list of DNS servers which will be used by dnsmasq +# as forwarders. +# dnsmasq_dns_servers = + +# Limit number of leases to prevent a denial-of-service. +dnsmasq_lease_max = 16777216 + +# Location to DHCP lease relay UNIX domain socket +# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# dhcp_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the dhcp agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a dhcp server is disabled. +# dhcp_delete_namespaces = False + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/dnsmasq-neutron.conf new file mode 100644 index 0000000..7bcbd9d --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/dnsmasq-neutron.conf @@ -0,0 +1,2 @@ +dhcp-option-force=26,1454 + diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/etc/xorp/config.boot b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/etc/xorp/config.boot new file mode 100644 index 0000000..32caf96 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/etc/xorp/config.boot @@ -0,0 +1,25 @@ +interfaces { + restore-original-config-on-shutdown: false + interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { + description: "Internal pNodes interface" + disable: false + default-system-config + } +} + +protocols { + igmp { + disable: false + interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { + vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { + disable: false + version: 3 + } + } + traceoptions { + flag all { + disable: false + } + } + } +} diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/l3_agent.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/l3_agent.ini new file mode 100644 index 0000000..b394c00 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/l3_agent.ini @@ -0,0 +1,81 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = False +verbose = True + +# L3 requires that an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC) +# that supports L3 agent +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver +interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +use_namespaces = True + +# If use_namespaces is set as False then the agent can only configure one router. + +# This is done by setting the specific router_id. +# router_id = + +# When external_network_bridge is set, each L3 agent can be associated +# with no more than one external network. This value should be set to the UUID +# of that external network. To allow L3 agent support multiple external +# networks, both the external_network_bridge and gateway_external_network_id +# must be left empty. +# gateway_external_network_id = + +# Indicates that this L3 agent should also handle routers that do not have +# an external network gateway configured. This option should be True only +# for a single agent in a Neutron deployment, and may be False for all agents +# if all routers must have an external network gateway +handle_internal_only_routers = True + +# Name of bridge used for external network traffic. This should be set to +# empty value for the linux bridge. when this parameter is set, each L3 agent +# can be associated with no more than one external network. +external_network_bridge = br-ex + +# TCP Port used by Neutron metadata server +metadata_port = 9697 + +# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 +# to disable this feature. +send_arp_for_ha = 3 + +# seconds between re-sync routers' data if needed +periodic_interval = 40 + +# seconds to start to sync routers' data after +# starting agent +periodic_fuzzy_delay = 5 + +# enable_metadata_proxy, which is true by default, can be set to False +# if the Nova metadata server is not available +# enable_metadata_proxy = True + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# router_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the L3 agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a router is destroyed. +# router_delete_namespaces = False + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/metadata_agent.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/metadata_agent.ini new file mode 100644 index 0000000..6badf28 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/metadata_agent.ini @@ -0,0 +1,46 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +debug = True + +# The Neutron user information for accessing the Neutron API. +auth_url = http://{{ HA_VIP }}:5000/v2.0 +auth_region = RegionOne +# Turn off verification of the certificate for ssl +# auth_insecure = False +# Certificate Authority public key (CA cert) file for ssl +# auth_ca_cert = +admin_tenant_name = service +admin_user = neutron +admin_password = {{ NEUTRON_PASS }} + +# Network service endpoint type to pull from the keystone catalog +# endpoint_type = adminURL + +# IP address used by Nova metadata server +nova_metadata_ip = {{ HA_VIP }} + +# TCP Port used by Nova metadata server +nova_metadata_port = 8775 + +# When proxying metadata requests, Neutron signs the Instance-ID header with a +# shared secret to prevent spoofing. You may select any string for a secret, +# but it must match here and in the configuration used by the Nova Metadata +# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret +metadata_proxy_shared_secret = {{ METADATA_SECRET }} + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# Number of separate worker processes for metadata server +# metadata_workers = 0 + +# Number of backlog requests to configure the metadata server socket with +# metadata_backlog = 128 + +# URL to connect to the cache backend. +# Example of URL using memory caching backend +# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5 +# default_ttl=0 parameter will cause cache entries to never expire. +# Otherwise default_ttl specifies time in seconds a cache entry is valid for. +# No cache is used in case no value is passed. +# cache_url = diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/ml2_conf.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/ml2_conf.ini new file mode 100644 index 0000000..a790069 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/ml2_conf.ini @@ -0,0 +1,108 @@ +[ml2] +# (ListOpt) List of network type driver entrypoints to be loaded from +# the neutron.ml2.type_drivers namespace. +# +# type_drivers = local,flat,vlan,gre,vxlan +# Example: type_drivers = flat,vlan,gre,vxlan +type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }} + +# (ListOpt) Ordered list of network_types to allocate as tenant +# networks. The default value 'local' is useful for single-box testing +# but provides no connectivity between hosts. +# +# tenant_network_types = local +# Example: tenant_network_types = vlan,gre,vxlan +tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }} + +# (ListOpt) Ordered list of networking mechanism driver entrypoints +# to be loaded from the neutron.ml2.mechanism_drivers namespace. +# mechanism_drivers = +# Example: mechanism_drivers = openvswitch,mlnx +# Example: mechanism_drivers = arista +# Example: mechanism_drivers = cisco,logger +# Example: mechanism_drivers = openvswitch,brocade +# Example: mechanism_drivers = linuxbridge,brocade +mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }} + +[ml2_type_flat] +# (ListOpt) List of physical_network names with which flat networks +# can be created. Use * to allow flat networks with arbitrary +# physical_network names. +# +flat_networks = external +# Example:flat_networks = physnet1,physnet2 +# Example:flat_networks = * + +[ml2_type_vlan] +# (ListOpt) List of [::] tuples +# specifying physical_network names usable for VLAN provider and +# tenant networks, as well as ranges of VLAN tags on each +# physical_network available for allocation as tenant networks. +# +network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 + +[ml2_type_gre] +# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation +tunnel_id_ranges = 1:1000 + +[ml2_type_vxlan] +# (ListOpt) Comma-separated list of : tuples enumerating +# ranges of VXLAN VNI IDs that are available for tenant network allocation. +# +vni_ranges = 1001:4095 + +# (StrOpt) Multicast group for the VXLAN interface. When configured, will +# enable sending all broadcast traffic to this multicast group. When left +# unconfigured, will disable multicast VXLAN mode. +# +vxlan_group = 239.1.1.1 +# Example: vxlan_group = 239.1.1.1 + +[securitygroup] +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True +firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver +enable_security_group = True + +[database] +connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8 + +[ovs] +local_ip = {{ internal_ip }} +{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %} +integration_bridge = br-int +tunnel_bridge = br-tun +tunnel_id_ranges = 1001:4095 +tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }} +bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }} +{% endif %} + +[agent] +root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf +tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }} +{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %} +vxlan_udp_port = 4789 +{% endif %} +l2_population = False + +[odl] +{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} +network_vlan_ranges = 1001:4095 +tunnel_id_ranges = 1001:4095 +tun_peer_patch_port = patch-int +int_peer_patch_port = patch-tun +tenant_network_type = vxlan +tunnel_bridge = br-tun +integration_bridge = br-int +controllers = 10.1.0.15:8080:admin:admin +{% endif %} + +[ml2_odl] +{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} +username = {{ odl_username }} +password = {{ odl_password }} +url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron +{% endif %} + diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron-network.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron-network.conf new file mode 100644 index 0000000..93be9cb --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron-network.conf @@ -0,0 +1,465 @@ +[DEFAULT] +# Print more verbose output (set logging level to INFO instead of default WARNING level). +verbose = {{ VERBOSE }} + +# Print debugging output (set logging level to DEBUG instead of default WARNING level). +debug = {{ DEBUG }} + +# Where to store Neutron state files. This directory must be writable by the +# user executing the agent. +state_path = /var/lib/neutron + +# Where to store lock files +lock_path = $state_path/lock + +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s +# log_date_format = %Y-%m-%d %H:%M:%S + +# use_syslog -> syslog +# log_file and log_dir -> log_dir/log_file +# (not log_file) and log_dir -> log_dir/{binary_name}.log +# use_stderr -> stderr +# (not user_stderr) and (not log_file) -> stdout +# publish_errors -> notification system + +# use_syslog = False +# syslog_log_facility = LOG_USER + +# use_stderr = True +# log_file = +log_dir = /var/log/neutron + +# publish_errors = False + +# Address to bind the API server to +bind_host = {{ network_server_host }} + +# Port the bind the API server to +bind_port = 9696 + +# Path to the extensions. Note that this can be a colon-separated list of +# paths. For example: +# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions +# The __path__ of neutron.extensions is appended to this, so if your +# extensions are in there you don't need to specify them here +# api_extensions_path = + +# (StrOpt) Neutron core plugin entrypoint to be loaded from the +# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the +# plugins included in the neutron source distribution. For compatibility with +# previous versions, the class name of a plugin can be specified instead of its +# entrypoint name. +# +#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin +core_plugin = ml2 +# Example: core_plugin = ml2 + +# (ListOpt) List of service plugin entrypoints to be loaded from the +# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of +# the plugins included in the neutron source distribution. For compatibility +# with previous versions, the class name of a plugin can be specified instead +# of its entrypoint name. +# +# service_plugins = +# Example: service_plugins = router,firewall,lbaas,vpnaas,metering +service_plugins = router + +# Paste configuration file +api_paste_config = api-paste.ini + +# The strategy to be used for auth. +# Supported values are 'keystone'(default), 'noauth'. +auth_strategy = keystone + +# Base MAC address. The first 3 octets will remain unchanged. If the +# 4h octet is not 00, it will also be used. The others will be +# randomly generated. +# 3 octet +# base_mac = fa:16:3e:00:00:00 +# 4 octet +# base_mac = fa:16:3e:4f:00:00 + +# Maximum amount of retries to generate a unique MAC address +# mac_generation_retries = 16 + +# DHCP Lease duration (in seconds) +dhcp_lease_duration = 86400 + +# Allow sending resource operation notification to DHCP agent +# dhcp_agent_notification = True + +# Enable or disable bulk create/update/delete operations +# allow_bulk = True +# Enable or disable pagination +# allow_pagination = False +# Enable or disable sorting +# allow_sorting = False +# Enable or disable overlapping IPs for subnets +# Attention: the following parameter MUST be set to False if Neutron is +# being used in conjunction with nova security groups +allow_overlapping_ips = True +# Ensure that configured gateway is on subnet +# force_gateway_on_subnet = False + + +# RPC configuration options. Defined in rpc __init__ +# The messaging module to use, defaults to kombu. +# rpc_backend = neutron.openstack.common.rpc.impl_kombu +rpc_backend = rabbit +rabbit_host = {{ rabbit_host }} +rabbit_password = {{ RABBIT_PASS }} + +# Size of RPC thread pool +rpc_thread_pool_size = 240 +# Size of RPC connection pool +rpc_conn_pool_size = 100 +# Seconds to wait for a response from call or multicall +rpc_response_timeout = 300 +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +rpc_cast_timeout = 300 +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. +# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception +# AMQP exchange to connect to if using RabbitMQ or QPID +# control_exchange = neutron + +# If passed, use a fake RabbitMQ provider +# fake_rabbit = False + +# Configuration options if sending notifications via kombu rpc (these are +# the defaults) +# SSL version to use (valid only if SSL enabled) +# kombu_ssl_version = +# SSL key file (valid only if SSL enabled) +# kombu_ssl_keyfile = +# SSL cert file (valid only if SSL enabled) +# kombu_ssl_certfile = +# SSL certification authority file (valid only if SSL enabled) +# kombu_ssl_ca_certs = +# Port where RabbitMQ server is running/listening +rabbit_port = 5672 +# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' +# rabbit_hosts = localhost:5672 +# User ID used for RabbitMQ connections +rabbit_userid = {{ RABBIT_USER }} +# Location of a virtual RabbitMQ installation. +# rabbit_virtual_host = / +# Maximum retries with trying to connect to RabbitMQ +# (the default of 0 implies an infinite retry count) +# rabbit_max_retries = 0 +# RabbitMQ connection retry interval +# rabbit_retry_interval = 1 +# Use HA queues in RabbitMQ (x-ha-policy: all). You need to +# wipe RabbitMQ database when changing this option. (boolean value) +# rabbit_ha_queues = false +# QPID +# rpc_backend=neutron.openstack.common.rpc.impl_qpid +# Qpid broker hostname +# qpid_hostname = localhost +# Qpid broker port +# qpid_port = 5672 +# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' +# qpid_hosts = localhost:5672 +# Username for qpid connection +# qpid_username = '' +# Password for qpid connection +# qpid_password = '' +# Space separated list of SASL mechanisms to use for auth +# qpid_sasl_mechanisms = '' +# Seconds between connection keepalive heartbeats +# qpid_heartbeat = 60 +# Transport to use, either 'tcp' or 'ssl' +# qpid_protocol = tcp +# Disable Nagle algorithm +# qpid_tcp_nodelay = True + +# ZMQ +# rpc_backend=neutron.openstack.common.rpc.impl_zmq +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. +# rpc_zmq_bind_address = * + +# ============ Notification System Options ===================== + +# Notifications can be sent when network/subnet/port are created, updated or deleted. +# There are three methods of sending notifications: logging (via the +# log_file directive), rpc (via a message queue) and +# noop (no notifications sent, the default) + +# Notification_driver can be defined multiple times +# Do nothing driver +# notification_driver = neutron.openstack.common.notifier.no_op_notifier +# Logging driver +# notification_driver = neutron.openstack.common.notifier.log_notifier +# RPC driver. +notification_driver = neutron.openstack.common.notifier.rpc_notifier + +# default_notification_level is used to form actual topic name(s) or to set logging level +default_notification_level = INFO + +# default_publisher_id is a part of the notification payload +# host = myhost.com +# default_publisher_id = $host + +# Defined in rpc_notifier, can be comma separated values. +# The actual topic names will be %s.%(default_notification_level)s +notification_topics = notifications + +# Default maximum number of items returned in a single response, +# value == infinite and value < 0 means no max limit, and value must +# be greater than 0. If the number of items requested is greater than +# pagination_max_limit, server will just return pagination_max_limit +# of number of items. +# pagination_max_limit = -1 + +# Maximum number of DNS nameservers per subnet +# max_dns_nameservers = 5 + +# Maximum number of host routes per subnet +# max_subnet_host_routes = 20 + +# Maximum number of fixed ips per port +# max_fixed_ips_per_port = 5 + +# =========== items for agent management extension ============= +# Seconds to regard the agent as down; should be at least twice +# report_interval, to be sure the agent is down for good +agent_down_time = 75 +# =========== end of items for agent management extension ===== + +# =========== items for agent scheduler extension ============= +# Driver to use for scheduling network to DHCP agent +network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler +# Driver to use for scheduling router to a default L3 agent +router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler +# Driver to use for scheduling a loadbalancer pool to an lbaas agent +# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler + +# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted +# networks to first DHCP agent which sends get_active_networks message to +# neutron server +# network_auto_schedule = True + +# Allow auto scheduling routers to L3 agent. It will schedule non-hosted +# routers to first L3 agent which sends sync_routers message to neutron server +# router_auto_schedule = True + +# Number of DHCP agents scheduled to host a network. This enables redundant +# DHCP agents for configured networks. +# dhcp_agents_per_network = 1 + +# =========== end of items for agent scheduler extension ===== + +# =========== WSGI parameters related to the API server ============== +# Number of separate worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as workers. The parent process manages them. +api_workers = 8 + +# Number of separate RPC worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as RPC workers. The parent process manages them. +# This feature is experimental until issues are addressed and testing has been +# enabled for various plugins for compatibility. +rpc_workers = 8 + +# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when +# starting API server. Not supported on OS X. +# tcp_keepidle = 600 + +# Number of seconds to keep retrying to listen +# retry_until_window = 30 + +# Number of backlog requests to configure the socket with. +# backlog = 4096 + +# Max header line to accommodate large tokens +# max_header_line = 16384 + +# Enable SSL on the API server +# use_ssl = False + +# Certificate file to use when starting API server securely +# ssl_cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +# ssl_key_file = /path/to/keyfile + +# CA certificate file to use when starting API server securely to +# verify connecting clients. This is an optional parameter only required if +# API clients need to authenticate to the API server using SSL certificates +# signed by a trusted CA +# ssl_ca_file = /path/to/cafile +# ======== end of WSGI parameters related to the API server ========== + + +# ======== neutron nova interactions ========== +# Send notification to nova when port status is active. +notify_nova_on_port_status_changes = True + +# Send notifications to nova when port data (fixed_ips/floatingips) change +# so nova can update it's cache. +notify_nova_on_port_data_changes = True + +# URL for connection to nova (Only supports one nova region currently). +nova_url = http://{{ HA_VIP }}:8774/v2 + +# Name of nova region to use. Useful if keystone manages more than one region +nova_region_name = RegionOne + +# Username for connection to nova in admin context +nova_admin_username = nova + +# The uuid of the admin nova tenant + +# Password for connection to nova in admin context. +nova_admin_password = {{ NOVA_PASS }} + +# Authorization URL for connection to nova in admin context. +nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 + +# Number of seconds between sending events to nova if there are any events to send +send_events_interval = 2 + +# ======== end of neutron nova interactions ========== + +[quotas] +# Default driver to use for quota checks +quota_driver = neutron.db.quota_db.DbQuotaDriver + +# Resource name(s) that are supported in quota features +quota_items = network,subnet,port + +# Default number of resource allowed per tenant. A negative value means +# unlimited. +default_quota = -1 + +# Number of networks allowed per tenant. A negative value means unlimited. +quota_network = 100 + +# Number of subnets allowed per tenant. A negative value means unlimited. +quota_subnet = 100 + +# Number of ports allowed per tenant. A negative value means unlimited. +quota_port = 8000 + +# Number of security groups allowed per tenant. A negative value means +# unlimited. +quota_security_group = 1000 + +# Number of security group rules allowed per tenant. A negative value means +# unlimited. +quota_security_group_rule = 1000 + +# Number of vips allowed per tenant. A negative value means unlimited. +# quota_vip = 10 + +# Number of pools allowed per tenant. A negative value means unlimited. +# quota_pool = 10 + +# Number of pool members allowed per tenant. A negative value means unlimited. +# The default is unlimited because a member is not a real resource consumer +# on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_member = -1 + +# Number of health monitors allowed per tenant. A negative value means +# unlimited. +# The default is unlimited because a health monitor is not a real resource +# consumer on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_health_monitors = -1 + +# Number of routers allowed per tenant. A negative value means unlimited. +# quota_router = 10 + +# Number of floating IPs allowed per tenant. A negative value means unlimited. +# quota_floatingip = 50 + +[agent] +# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real +# root filter facility. +# Change to "sudo" to skip the filtering and just run the comand directly +root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" + +# =========== items for agent management extension ============= +# seconds between nodes reporting state to server; should be less than +# agent_down_time, best if it is half or less than agent_down_time +report_interval = 30 + +# =========== end of items for agent management extension ===== + +[keystone_authtoken] +auth_uri = http://{{ HA_VIP }}:5000/v2.0 +identity_uri = http://{{ HA_VIP }}:35357 +admin_tenant_name = service +admin_user = neutron +admin_password = {{ NEUTRON_PASS }} +signing_dir = $state_path/keystone-signing + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@127.0.0.1:3306/neutron +# Replace 127.0.0.1 above with the IP address of the database used by the +# main neutron server. (Leave it as is if the database runs on this host.) +# connection = sqlite:////var/lib/neutron/neutron.sqlite +#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron + +# The SQLAlchemy connection string used to connect to the slave database +slave_connection = + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +max_retries = 10 + +# Database reconnection interval in seconds - if the initial connection to the +# database fails +retry_interval = 10 + +# Minimum number of SQL connections to keep open in a pool +min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +max_pool_size = 100 + +# Timeout in seconds before idle sql connections are reaped +idle_timeout = 3600 + +# If set, use this value for max_overflow with sqlalchemy +max_overflow = 100 + +# Verbosity of SQL debugging information. 0=None, 100=Everything +connection_debug = 0 + +# Add python stack traces to SQL as comment strings +connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +pool_timeout = 10 + +[service_providers] +# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. +# Must be in form: +# service_provider=::[:default] +# List of allowed service types includes LOADBALANCER, FIREWALL, VPN +# Combination of and must be unique; must also be unique +# This is multiline option, example for default provider: +# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default +# example of non-default provider: +# service_provider=FIREWALL:name2:firewall_driver_path +# --- Reference implementations --- +service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default +service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default +# In order to activate Radware's lbaas driver you need to uncomment the next line. +# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. +# Otherwise comment the HA Proxy line +# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default +# uncomment the following line to make the 'netscaler' LBaaS provider available. +# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver +# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. +# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default +# Uncomment the line below to use Embrane heleos as Load Balancer service provider. +# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron.conf new file mode 100644 index 0000000..1575367 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron.conf @@ -0,0 +1,466 @@ +[DEFAULT] +# Print more verbose output (set logging level to INFO instead of default WARNING level). +verbose = {{ VERBOSE }} + +# Print debugging output (set logging level to DEBUG instead of default WARNING level). +debug = {{ VERBOSE }} + +# Where to store Neutron state files. This directory must be writable by the +# user executing the agent. +state_path = /var/lib/neutron + +# Where to store lock files +lock_path = $state_path/lock + +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s +# log_date_format = %Y-%m-%d %H:%M:%S + +# use_syslog -> syslog +# log_file and log_dir -> log_dir/log_file +# (not log_file) and log_dir -> log_dir/{binary_name}.log +# use_stderr -> stderr +# (not user_stderr) and (not log_file) -> stdout +# publish_errors -> notification system + +# use_syslog = False +# syslog_log_facility = LOG_USER + +# use_stderr = True +# log_file = +log_dir = /var/log/neutron + +# publish_errors = False + +# Address to bind the API server to +bind_host = {{ network_server_host }} + +# Port the bind the API server to +bind_port = 9696 + +# Path to the extensions. Note that this can be a colon-separated list of +# paths. For example: +# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions +# The __path__ of neutron.extensions is appended to this, so if your +# extensions are in there you don't need to specify them here +# api_extensions_path = + +# (StrOpt) Neutron core plugin entrypoint to be loaded from the +# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the +# plugins included in the neutron source distribution. For compatibility with +# previous versions, the class name of a plugin can be specified instead of its +# entrypoint name. +# +#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin +core_plugin = ml2 +# Example: core_plugin = ml2 + +# (ListOpt) List of service plugin entrypoints to be loaded from the +# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of +# the plugins included in the neutron source distribution. For compatibility +# with previous versions, the class name of a plugin can be specified instead +# of its entrypoint name. +# +# service_plugins = +# Example: service_plugins = router,firewall,lbaas,vpnaas,metering +service_plugins = router + +# Paste configuration file +api_paste_config = api-paste.ini + +# The strategy to be used for auth. +# Supported values are 'keystone'(default), 'noauth'. +auth_strategy = keystone + +# Base MAC address. The first 3 octets will remain unchanged. If the +# 4h octet is not 00, it will also be used. The others will be +# randomly generated. +# 3 octet +# base_mac = fa:16:3e:00:00:00 +# 4 octet +# base_mac = fa:16:3e:4f:00:00 + +# Maximum amount of retries to generate a unique MAC address +# mac_generation_retries = 16 + +# DHCP Lease duration (in seconds) +dhcp_lease_duration = 86400 + +# Allow sending resource operation notification to DHCP agent +# dhcp_agent_notification = True + +# Enable or disable bulk create/update/delete operations +# allow_bulk = True +# Enable or disable pagination +# allow_pagination = False +# Enable or disable sorting +# allow_sorting = False +# Enable or disable overlapping IPs for subnets +# Attention: the following parameter MUST be set to False if Neutron is +# being used in conjunction with nova security groups +allow_overlapping_ips = True +# Ensure that configured gateway is on subnet +# force_gateway_on_subnet = False + + +# RPC configuration options. Defined in rpc __init__ +# The messaging module to use, defaults to kombu. +# rpc_backend = neutron.openstack.common.rpc.impl_kombu +rpc_backend = rabbit +rabbit_host = {{ rabbit_host }} +rabbit_password = {{ RABBIT_PASS }} + +# Size of RPC thread pool +rpc_thread_pool_size = 240 +# Size of RPC connection pool +rpc_conn_pool_size = 100 +# Seconds to wait for a response from call or multicall +rpc_response_timeout = 300 +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +rpc_cast_timeout = 300 +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. +# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception +# AMQP exchange to connect to if using RabbitMQ or QPID +# control_exchange = neutron + +# If passed, use a fake RabbitMQ provider +# fake_rabbit = False + +# Configuration options if sending notifications via kombu rpc (these are +# the defaults) +# SSL version to use (valid only if SSL enabled) +# kombu_ssl_version = +# SSL key file (valid only if SSL enabled) +# kombu_ssl_keyfile = +# SSL cert file (valid only if SSL enabled) +# kombu_ssl_certfile = +# SSL certification authority file (valid only if SSL enabled) +# kombu_ssl_ca_certs = +# Port where RabbitMQ server is running/listening +rabbit_port = 5672 +# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' +# rabbit_hosts = localhost:5672 +# User ID used for RabbitMQ connections +rabbit_userid = {{ RABBIT_USER }} +# Location of a virtual RabbitMQ installation. +# rabbit_virtual_host = / +# Maximum retries with trying to connect to RabbitMQ +# (the default of 0 implies an infinite retry count) +# rabbit_max_retries = 0 +# RabbitMQ connection retry interval +# rabbit_retry_interval = 1 +# Use HA queues in RabbitMQ (x-ha-policy: all). You need to +# wipe RabbitMQ database when changing this option. (boolean value) +# rabbit_ha_queues = false +# QPID +# rpc_backend=neutron.openstack.common.rpc.impl_qpid +# Qpid broker hostname +# qpid_hostname = localhost +# Qpid broker port +# qpid_port = 5672 +# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' +# qpid_hosts = localhost:5672 +# Username for qpid connection +# qpid_username = '' +# Password for qpid connection +# qpid_password = '' +# Space separated list of SASL mechanisms to use for auth +# qpid_sasl_mechanisms = '' +# Seconds between connection keepalive heartbeats +# qpid_heartbeat = 60 +# Transport to use, either 'tcp' or 'ssl' +# qpid_protocol = tcp +# Disable Nagle algorithm +# qpid_tcp_nodelay = True + +# ZMQ +# rpc_backend=neutron.openstack.common.rpc.impl_zmq +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. +# rpc_zmq_bind_address = * + +# ============ Notification System Options ===================== + +# Notifications can be sent when network/subnet/port are created, updated or deleted. +# There are three methods of sending notifications: logging (via the +# log_file directive), rpc (via a message queue) and +# noop (no notifications sent, the default) + +# Notification_driver can be defined multiple times +# Do nothing driver +# notification_driver = neutron.openstack.common.notifier.no_op_notifier +# Logging driver +# notification_driver = neutron.openstack.common.notifier.log_notifier +# RPC driver. +notification_driver = neutron.openstack.common.notifier.rpc_notifier + +# default_notification_level is used to form actual topic name(s) or to set logging level +default_notification_level = INFO + +# default_publisher_id is a part of the notification payload +# host = myhost.com +# default_publisher_id = $host + +# Defined in rpc_notifier, can be comma separated values. +# The actual topic names will be %s.%(default_notification_level)s +notification_topics = notifications + +# Default maximum number of items returned in a single response, +# value == infinite and value < 0 means no max limit, and value must +# be greater than 0. If the number of items requested is greater than +# pagination_max_limit, server will just return pagination_max_limit +# of number of items. +# pagination_max_limit = -1 + +# Maximum number of DNS nameservers per subnet +# max_dns_nameservers = 5 + +# Maximum number of host routes per subnet +# max_subnet_host_routes = 20 + +# Maximum number of fixed ips per port +# max_fixed_ips_per_port = 5 + +# =========== items for agent management extension ============= +# Seconds to regard the agent as down; should be at least twice +# report_interval, to be sure the agent is down for good +agent_down_time = 75 +# =========== end of items for agent management extension ===== + +# =========== items for agent scheduler extension ============= +# Driver to use for scheduling network to DHCP agent +network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler +# Driver to use for scheduling router to a default L3 agent +router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler +# Driver to use for scheduling a loadbalancer pool to an lbaas agent +# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler + +# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted +# networks to first DHCP agent which sends get_active_networks message to +# neutron server +# network_auto_schedule = True + +# Allow auto scheduling routers to L3 agent. It will schedule non-hosted +# routers to first L3 agent which sends sync_routers message to neutron server +# router_auto_schedule = True + +# Number of DHCP agents scheduled to host a network. This enables redundant +# DHCP agents for configured networks. +# dhcp_agents_per_network = 1 + +# =========== end of items for agent scheduler extension ===== + +# =========== WSGI parameters related to the API server ============== +# Number of separate worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as workers. The parent process manages them. +api_workers = 8 + +# Number of separate RPC worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as RPC workers. The parent process manages them. +# This feature is experimental until issues are addressed and testing has been +# enabled for various plugins for compatibility. +rpc_workers = 8 + +# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when +# starting API server. Not supported on OS X. +# tcp_keepidle = 600 + +# Number of seconds to keep retrying to listen +# retry_until_window = 30 + +# Number of backlog requests to configure the socket with. +# backlog = 4096 + +# Max header line to accommodate large tokens +# max_header_line = 16384 + +# Enable SSL on the API server +# use_ssl = False + +# Certificate file to use when starting API server securely +# ssl_cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +# ssl_key_file = /path/to/keyfile + +# CA certificate file to use when starting API server securely to +# verify connecting clients. This is an optional parameter only required if +# API clients need to authenticate to the API server using SSL certificates +# signed by a trusted CA +# ssl_ca_file = /path/to/cafile +# ======== end of WSGI parameters related to the API server ========== + + +# ======== neutron nova interactions ========== +# Send notification to nova when port status is active. +notify_nova_on_port_status_changes = True + +# Send notifications to nova when port data (fixed_ips/floatingips) change +# so nova can update it's cache. +notify_nova_on_port_data_changes = True + +# URL for connection to nova (Only supports one nova region currently). +nova_url = http://{{ HA_VIP }}:8774/v2 + +# Name of nova region to use. Useful if keystone manages more than one region +nova_region_name = RegionOne + +# Username for connection to nova in admin context +nova_admin_username = nova + +# The uuid of the admin nova tenant +nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }} + +# Password for connection to nova in admin context. +nova_admin_password = {{ NOVA_PASS }} + +# Authorization URL for connection to nova in admin context. +nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 + +# Number of seconds between sending events to nova if there are any events to send +send_events_interval = 2 + +# ======== end of neutron nova interactions ========== + +[quotas] +# Default driver to use for quota checks +quota_driver = neutron.db.quota_db.DbQuotaDriver + +# Resource name(s) that are supported in quota features +quota_items = network,subnet,port + +# Default number of resource allowed per tenant. A negative value means +# unlimited. +default_quota = -1 + +# Number of networks allowed per tenant. A negative value means unlimited. +quota_network = 100 + +# Number of subnets allowed per tenant. A negative value means unlimited. +quota_subnet = 100 + +# Number of ports allowed per tenant. A negative value means unlimited. +quota_port = 8000 + +# Number of security groups allowed per tenant. A negative value means +# unlimited. +quota_security_group = 1000 + +# Number of security group rules allowed per tenant. A negative value means +# unlimited. +quota_security_group_rule = 1000 + +# Number of vips allowed per tenant. A negative value means unlimited. +# quota_vip = 10 + +# Number of pools allowed per tenant. A negative value means unlimited. +# quota_pool = 10 + +# Number of pool members allowed per tenant. A negative value means unlimited. +# The default is unlimited because a member is not a real resource consumer +# on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_member = -1 + +# Number of health monitors allowed per tenant. A negative value means +# unlimited. +# The default is unlimited because a health monitor is not a real resource +# consumer on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_health_monitors = -1 + +# Number of routers allowed per tenant. A negative value means unlimited. +# quota_router = 10 + +# Number of floating IPs allowed per tenant. A negative value means unlimited. +# quota_floatingip = 50 + +[agent] +# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real +# root filter facility. +# Change to "sudo" to skip the filtering and just run the comand directly +root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" + +# =========== items for agent management extension ============= +# seconds between nodes reporting state to server; should be less than +# agent_down_time, best if it is half or less than agent_down_time +report_interval = 30 + +# =========== end of items for agent management extension ===== + +[keystone_authtoken] +auth_uri = http://{{ HA_VIP }}:5000/v2.0 +identity_uri = http://{{ HA_VIP }}:35357 +admin_tenant_name = service +admin_user = neutron +admin_password = {{ NEUTRON_PASS }} +signing_dir = $state_path/keystone-signing + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@127.0.0.1:3306/neutron +# Replace 127.0.0.1 above with the IP address of the database used by the +# main neutron server. (Leave it as is if the database runs on this host.) +# connection = sqlite:////var/lib/neutron/neutron.sqlite +#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron + +# The SQLAlchemy connection string used to connect to the slave database +slave_connection = + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +max_retries = 10 + +# Database reconnection interval in seconds - if the initial connection to the +# database fails +retry_interval = 10 + +# Minimum number of SQL connections to keep open in a pool +min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +max_pool_size = 100 + +# Timeout in seconds before idle sql connections are reaped +idle_timeout = 3600 + +# If set, use this value for max_overflow with sqlalchemy +max_overflow = 100 + +# Verbosity of SQL debugging information. 0=None, 100=Everything +connection_debug = 0 + +# Add python stack traces to SQL as comment strings +connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +pool_timeout = 10 + +[service_providers] +# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. +# Must be in form: +# service_provider=::[:default] +# List of allowed service types includes LOADBALANCER, FIREWALL, VPN +# Combination of and must be unique; must also be unique +# This is multiline option, example for default provider: +# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default +# example of non-default provider: +# service_provider=FIREWALL:name2:firewall_driver_path +# --- Reference implementations --- +service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default +service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default +# In order to activate Radware's lbaas driver you need to uncomment the next line. +# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. +# Otherwise comment the HA Proxy line +# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default +# uncomment the following line to make the 'netscaler' LBaaS provider available. +# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver +# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. +# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default +# Uncomment the line below to use Embrane heleos as Load Balancer service provider. +# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron_init.sh b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron_init.sh new file mode 100644 index 0000000..b92e202 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/neutron_init.sh @@ -0,0 +1,4 @@ +# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True + +# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}} + diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/nova.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/nova.conf new file mode 100644 index 0000000..4988cb0 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-compute/templates/nova.conf @@ -0,0 +1,73 @@ +[DEFAULT] +dhcpbridge_flagfile=/etc/nova/nova.conf +dhcpbridge=/usr/bin/nova-dhcpbridge +logdir=/var/log/nova +state_path=/var/lib/nova +lock_path=/var/lock/nova +force_dhcp_release=True +iscsi_helper=tgtadm +libvirt_use_virtio_for_bridges=True +connection_type=libvirt +root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf +verbose={{ VERBOSE}} +debug={{ DEBUG }} +ec2_private_dns_show_ip=True +api_paste_config=/etc/nova/api-paste.ini +volumes_path=/var/lib/nova/volumes +enabled_apis=ec2,osapi_compute,metadata + +vif_plugging_is_fatal: false +vif_plugging_timeout: 0 + +auth_strategy = keystone + +rpc_backend = rabbit +rabbit_host = {{ rabbit_host }} +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} + +my_ip = {{ internal_ip }} +vnc_enabled = True +vncserver_listen = {{ internal_ip }} +vncserver_proxyclient_address = {{ internal_ip }} +novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html + +novncproxy_host = {{ internal_ip }} +novncproxy_port = 6080 + +network_api_class = nova.network.neutronv2.api.API +linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver +firewall_driver = nova.virt.firewall.NoopFirewallDriver +security_group_api = neutron + +instance_usage_audit = True +instance_usage_audit_period = hour +notify_on_state_change = vm_and_task_state +notification_driver = nova.openstack.common.notifier.rpc_notifier +notification_driver = ceilometer.compute.nova_notifier + +[database] +# The SQLAlchemy connection string used to connect to the database +connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova + +[conductor] +manager = nova.conductor.manager.ConductorManager +topic = conductor + +[keystone_authtoken] +auth_uri = http://{{ HA_VIP }}:5000/2.0 +identity_uri = http://{{ HA_VIP }}:35357 +admin_tenant_name = service +admin_user = nova +admin_password = {{ NOVA_PASS }} + +[glance] +host = {{ HA_VIP }} + +[neutron] +url = http://{{ HA_VIP }}:9696 +auth_strategy = keystone +admin_tenant_name = service +admin_username = neutron +admin_password = {{ NEUTRON_PASS }} +admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/handlers/main.yml new file mode 100644 index 0000000..b4c1585 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/handlers/main.yml @@ -0,0 +1,24 @@ +--- +- name: restart nova-api + service: name=nova-api state=restarted enabled=yes + +- name: restart nova-cert + service: name=nova-cert state=restarted enabled=yes + +- name: restart nova-consoleauth + service: name=nova-consoleauth state=restarted enabled=yes + +- name: restart nova-scheduler + service: name=nova-scheduler state=restarted enabled=yes + +- name: restart nova-conductor + service: name=nova-conductor state=restarted enabled=yes + +- name: restart nova-novncproxy + service: name=nova-novncproxy state=restarted enabled=yes + +- name: remove nova-sqlite-db + shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.db.removed + +- name: restart neutron-server + service: name=neutron-server state=restarted enabled=yes diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/main.yml new file mode 100644 index 0000000..9c04d74 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- include: neutron_install.yml + tags: + - install + - neutron_install + - neutron + +- include: neutron_config.yml + when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == '' + tags: + - config + - neutron_config + - neutron diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/neutron_config.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/neutron_config.yml new file mode 100644 index 0000000..77cc29a --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/neutron_config.yml @@ -0,0 +1,10 @@ +--- +- name: neutron-db-manage upgrade to Juno + shell: neutron-db-manage --config-file=/etc/neutron/neutron.conf --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini upgrade head + register: result + until: result.rc == 0 + retries: 5 + delay: 3 + notify: + - restart neutron-server + diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/neutron_install.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/neutron_install.yml new file mode 100644 index 0000000..6165299 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/tasks/neutron_install.yml @@ -0,0 +1,29 @@ +--- +- name: install controller-related neutron packages + apt: name={{ item }} state=present force=yes + with_items: + - neutron-server + - neutron-plugin-ml2 + +- name: generate neutron controll service list + shell: echo {{ item }} >> /opt/service + with_items: + - neutron-server + - neutron-plugin-ml2 + +- name: get tenant id to fill neutron.conf + shell: keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-get service | grep id | awk '{print $4}' + register: NOVA_ADMIN_TENANT_ID + +- name: update neutron conf + template: src=neutron.conf dest=/etc/neutron/neutron.conf backup=yes + notify: + - restart neutron-server + +- name: update ml2 plugin conf + template: src=ml2_conf.ini dest=/etc/neutron/plugins/ml2/ml2_conf.ini backup=yes + notify: + - restart neutron-server + +- meta: flush_handlers + diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/dhcp_agent.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/dhcp_agent.ini new file mode 100644 index 0000000..19eb62e --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/dhcp_agent.ini @@ -0,0 +1,90 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = False +verbose = True + +# The DHCP agent will resync its state with Neutron to recover from any +# transient notification or rpc errors. The interval is number of +# seconds between attempts. +resync_interval = 5 + +# The DHCP agent requires an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP, +# BigSwitch/Floodlight) +interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Name of Open vSwitch bridge to use +# ovs_integration_bridge = br-int + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires +# no additional setup of the DHCP server. +dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +use_namespaces = True + +# The DHCP server can assist with providing metadata support on isolated +# networks. Setting this value to True will cause the DHCP server to append +# specific host routes to the DHCP request. The metadata service will only +# be activated when the subnet does not contain any router port. The guest +# instance must be configured to request host routes via DHCP (Option 121). +enable_isolated_metadata = False + +# Allows for serving metadata requests coming from a dedicated metadata +# access network whose cidr is 169.254.169.254/16 (or larger prefix), and +# is connected to a Neutron router from which the VMs send metadata +# request. In this case DHCP Option 121 will not be injected in VMs, as +# they will be able to reach 169.254.169.254 through a router. +# This option requires enable_isolated_metadata = True +enable_metadata_network = False + +# Number of threads to use during sync process. Should not exceed connection +# pool size configured on server. +# num_sync_threads = 4 + +# Location to store DHCP server config files +# dhcp_confs = $state_path/dhcp + +# Domain to use for building the hostnames +dhcp_domain = openstacklocal + +# Override the default dnsmasq settings with this file +# dnsmasq_config_file = +dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf + +# Comma-separated list of DNS servers which will be used by dnsmasq +# as forwarders. +# dnsmasq_dns_servers = + +# Limit number of leases to prevent a denial-of-service. +dnsmasq_lease_max = 16777216 + +# Location to DHCP lease relay UNIX domain socket +# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# dhcp_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the dhcp agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a dhcp server is disabled. +# dhcp_delete_namespaces = False + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/dnsmasq-neutron.conf new file mode 100644 index 0000000..7bcbd9d --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/dnsmasq-neutron.conf @@ -0,0 +1,2 @@ +dhcp-option-force=26,1454 + diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/etc/xorp/config.boot b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/etc/xorp/config.boot new file mode 100644 index 0000000..32caf96 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/etc/xorp/config.boot @@ -0,0 +1,25 @@ +interfaces { + restore-original-config-on-shutdown: false + interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { + description: "Internal pNodes interface" + disable: false + default-system-config + } +} + +protocols { + igmp { + disable: false + interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { + vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { + disable: false + version: 3 + } + } + traceoptions { + flag all { + disable: false + } + } + } +} diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/l3_agent.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/l3_agent.ini new file mode 100644 index 0000000..b394c00 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/l3_agent.ini @@ -0,0 +1,81 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = False +verbose = True + +# L3 requires that an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC) +# that supports L3 agent +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver +interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +use_namespaces = True + +# If use_namespaces is set as False then the agent can only configure one router. + +# This is done by setting the specific router_id. +# router_id = + +# When external_network_bridge is set, each L3 agent can be associated +# with no more than one external network. This value should be set to the UUID +# of that external network. To allow L3 agent support multiple external +# networks, both the external_network_bridge and gateway_external_network_id +# must be left empty. +# gateway_external_network_id = + +# Indicates that this L3 agent should also handle routers that do not have +# an external network gateway configured. This option should be True only +# for a single agent in a Neutron deployment, and may be False for all agents +# if all routers must have an external network gateway +handle_internal_only_routers = True + +# Name of bridge used for external network traffic. This should be set to +# empty value for the linux bridge. when this parameter is set, each L3 agent +# can be associated with no more than one external network. +external_network_bridge = br-ex + +# TCP Port used by Neutron metadata server +metadata_port = 9697 + +# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 +# to disable this feature. +send_arp_for_ha = 3 + +# seconds between re-sync routers' data if needed +periodic_interval = 40 + +# seconds to start to sync routers' data after +# starting agent +periodic_fuzzy_delay = 5 + +# enable_metadata_proxy, which is true by default, can be set to False +# if the Nova metadata server is not available +# enable_metadata_proxy = True + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# router_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the L3 agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a router is destroyed. +# router_delete_namespaces = False + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/metadata_agent.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/metadata_agent.ini new file mode 100644 index 0000000..6badf28 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/metadata_agent.ini @@ -0,0 +1,46 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +debug = True + +# The Neutron user information for accessing the Neutron API. +auth_url = http://{{ HA_VIP }}:5000/v2.0 +auth_region = RegionOne +# Turn off verification of the certificate for ssl +# auth_insecure = False +# Certificate Authority public key (CA cert) file for ssl +# auth_ca_cert = +admin_tenant_name = service +admin_user = neutron +admin_password = {{ NEUTRON_PASS }} + +# Network service endpoint type to pull from the keystone catalog +# endpoint_type = adminURL + +# IP address used by Nova metadata server +nova_metadata_ip = {{ HA_VIP }} + +# TCP Port used by Nova metadata server +nova_metadata_port = 8775 + +# When proxying metadata requests, Neutron signs the Instance-ID header with a +# shared secret to prevent spoofing. You may select any string for a secret, +# but it must match here and in the configuration used by the Nova Metadata +# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret +metadata_proxy_shared_secret = {{ METADATA_SECRET }} + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# Number of separate worker processes for metadata server +# metadata_workers = 0 + +# Number of backlog requests to configure the metadata server socket with +# metadata_backlog = 128 + +# URL to connect to the cache backend. +# Example of URL using memory caching backend +# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5 +# default_ttl=0 parameter will cause cache entries to never expire. +# Otherwise default_ttl specifies time in seconds a cache entry is valid for. +# No cache is used in case no value is passed. +# cache_url = diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/ml2_conf.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/ml2_conf.ini new file mode 100644 index 0000000..a790069 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/ml2_conf.ini @@ -0,0 +1,108 @@ +[ml2] +# (ListOpt) List of network type driver entrypoints to be loaded from +# the neutron.ml2.type_drivers namespace. +# +# type_drivers = local,flat,vlan,gre,vxlan +# Example: type_drivers = flat,vlan,gre,vxlan +type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }} + +# (ListOpt) Ordered list of network_types to allocate as tenant +# networks. The default value 'local' is useful for single-box testing +# but provides no connectivity between hosts. +# +# tenant_network_types = local +# Example: tenant_network_types = vlan,gre,vxlan +tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }} + +# (ListOpt) Ordered list of networking mechanism driver entrypoints +# to be loaded from the neutron.ml2.mechanism_drivers namespace. +# mechanism_drivers = +# Example: mechanism_drivers = openvswitch,mlnx +# Example: mechanism_drivers = arista +# Example: mechanism_drivers = cisco,logger +# Example: mechanism_drivers = openvswitch,brocade +# Example: mechanism_drivers = linuxbridge,brocade +mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }} + +[ml2_type_flat] +# (ListOpt) List of physical_network names with which flat networks +# can be created. Use * to allow flat networks with arbitrary +# physical_network names. +# +flat_networks = external +# Example:flat_networks = physnet1,physnet2 +# Example:flat_networks = * + +[ml2_type_vlan] +# (ListOpt) List of [::] tuples +# specifying physical_network names usable for VLAN provider and +# tenant networks, as well as ranges of VLAN tags on each +# physical_network available for allocation as tenant networks. +# +network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 + +[ml2_type_gre] +# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation +tunnel_id_ranges = 1:1000 + +[ml2_type_vxlan] +# (ListOpt) Comma-separated list of : tuples enumerating +# ranges of VXLAN VNI IDs that are available for tenant network allocation. +# +vni_ranges = 1001:4095 + +# (StrOpt) Multicast group for the VXLAN interface. When configured, will +# enable sending all broadcast traffic to this multicast group. When left +# unconfigured, will disable multicast VXLAN mode. +# +vxlan_group = 239.1.1.1 +# Example: vxlan_group = 239.1.1.1 + +[securitygroup] +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True +firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver +enable_security_group = True + +[database] +connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8 + +[ovs] +local_ip = {{ internal_ip }} +{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %} +integration_bridge = br-int +tunnel_bridge = br-tun +tunnel_id_ranges = 1001:4095 +tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }} +bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }} +{% endif %} + +[agent] +root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf +tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }} +{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %} +vxlan_udp_port = 4789 +{% endif %} +l2_population = False + +[odl] +{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} +network_vlan_ranges = 1001:4095 +tunnel_id_ranges = 1001:4095 +tun_peer_patch_port = patch-int +int_peer_patch_port = patch-tun +tenant_network_type = vxlan +tunnel_bridge = br-tun +integration_bridge = br-int +controllers = 10.1.0.15:8080:admin:admin +{% endif %} + +[ml2_odl] +{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} +username = {{ odl_username }} +password = {{ odl_password }} +url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron +{% endif %} + diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron-network.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron-network.conf new file mode 100644 index 0000000..93be9cb --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron-network.conf @@ -0,0 +1,465 @@ +[DEFAULT] +# Print more verbose output (set logging level to INFO instead of default WARNING level). +verbose = {{ VERBOSE }} + +# Print debugging output (set logging level to DEBUG instead of default WARNING level). +debug = {{ DEBUG }} + +# Where to store Neutron state files. This directory must be writable by the +# user executing the agent. +state_path = /var/lib/neutron + +# Where to store lock files +lock_path = $state_path/lock + +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s +# log_date_format = %Y-%m-%d %H:%M:%S + +# use_syslog -> syslog +# log_file and log_dir -> log_dir/log_file +# (not log_file) and log_dir -> log_dir/{binary_name}.log +# use_stderr -> stderr +# (not user_stderr) and (not log_file) -> stdout +# publish_errors -> notification system + +# use_syslog = False +# syslog_log_facility = LOG_USER + +# use_stderr = True +# log_file = +log_dir = /var/log/neutron + +# publish_errors = False + +# Address to bind the API server to +bind_host = {{ network_server_host }} + +# Port the bind the API server to +bind_port = 9696 + +# Path to the extensions. Note that this can be a colon-separated list of +# paths. For example: +# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions +# The __path__ of neutron.extensions is appended to this, so if your +# extensions are in there you don't need to specify them here +# api_extensions_path = + +# (StrOpt) Neutron core plugin entrypoint to be loaded from the +# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the +# plugins included in the neutron source distribution. For compatibility with +# previous versions, the class name of a plugin can be specified instead of its +# entrypoint name. +# +#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin +core_plugin = ml2 +# Example: core_plugin = ml2 + +# (ListOpt) List of service plugin entrypoints to be loaded from the +# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of +# the plugins included in the neutron source distribution. For compatibility +# with previous versions, the class name of a plugin can be specified instead +# of its entrypoint name. +# +# service_plugins = +# Example: service_plugins = router,firewall,lbaas,vpnaas,metering +service_plugins = router + +# Paste configuration file +api_paste_config = api-paste.ini + +# The strategy to be used for auth. +# Supported values are 'keystone'(default), 'noauth'. +auth_strategy = keystone + +# Base MAC address. The first 3 octets will remain unchanged. If the +# 4h octet is not 00, it will also be used. The others will be +# randomly generated. +# 3 octet +# base_mac = fa:16:3e:00:00:00 +# 4 octet +# base_mac = fa:16:3e:4f:00:00 + +# Maximum amount of retries to generate a unique MAC address +# mac_generation_retries = 16 + +# DHCP Lease duration (in seconds) +dhcp_lease_duration = 86400 + +# Allow sending resource operation notification to DHCP agent +# dhcp_agent_notification = True + +# Enable or disable bulk create/update/delete operations +# allow_bulk = True +# Enable or disable pagination +# allow_pagination = False +# Enable or disable sorting +# allow_sorting = False +# Enable or disable overlapping IPs for subnets +# Attention: the following parameter MUST be set to False if Neutron is +# being used in conjunction with nova security groups +allow_overlapping_ips = True +# Ensure that configured gateway is on subnet +# force_gateway_on_subnet = False + + +# RPC configuration options. Defined in rpc __init__ +# The messaging module to use, defaults to kombu. +# rpc_backend = neutron.openstack.common.rpc.impl_kombu +rpc_backend = rabbit +rabbit_host = {{ rabbit_host }} +rabbit_password = {{ RABBIT_PASS }} + +# Size of RPC thread pool +rpc_thread_pool_size = 240 +# Size of RPC connection pool +rpc_conn_pool_size = 100 +# Seconds to wait for a response from call or multicall +rpc_response_timeout = 300 +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +rpc_cast_timeout = 300 +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. +# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception +# AMQP exchange to connect to if using RabbitMQ or QPID +# control_exchange = neutron + +# If passed, use a fake RabbitMQ provider +# fake_rabbit = False + +# Configuration options if sending notifications via kombu rpc (these are +# the defaults) +# SSL version to use (valid only if SSL enabled) +# kombu_ssl_version = +# SSL key file (valid only if SSL enabled) +# kombu_ssl_keyfile = +# SSL cert file (valid only if SSL enabled) +# kombu_ssl_certfile = +# SSL certification authority file (valid only if SSL enabled) +# kombu_ssl_ca_certs = +# Port where RabbitMQ server is running/listening +rabbit_port = 5672 +# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' +# rabbit_hosts = localhost:5672 +# User ID used for RabbitMQ connections +rabbit_userid = {{ RABBIT_USER }} +# Location of a virtual RabbitMQ installation. +# rabbit_virtual_host = / +# Maximum retries with trying to connect to RabbitMQ +# (the default of 0 implies an infinite retry count) +# rabbit_max_retries = 0 +# RabbitMQ connection retry interval +# rabbit_retry_interval = 1 +# Use HA queues in RabbitMQ (x-ha-policy: all). You need to +# wipe RabbitMQ database when changing this option. (boolean value) +# rabbit_ha_queues = false +# QPID +# rpc_backend=neutron.openstack.common.rpc.impl_qpid +# Qpid broker hostname +# qpid_hostname = localhost +# Qpid broker port +# qpid_port = 5672 +# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' +# qpid_hosts = localhost:5672 +# Username for qpid connection +# qpid_username = '' +# Password for qpid connection +# qpid_password = '' +# Space separated list of SASL mechanisms to use for auth +# qpid_sasl_mechanisms = '' +# Seconds between connection keepalive heartbeats +# qpid_heartbeat = 60 +# Transport to use, either 'tcp' or 'ssl' +# qpid_protocol = tcp +# Disable Nagle algorithm +# qpid_tcp_nodelay = True + +# ZMQ +# rpc_backend=neutron.openstack.common.rpc.impl_zmq +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. +# rpc_zmq_bind_address = * + +# ============ Notification System Options ===================== + +# Notifications can be sent when network/subnet/port are created, updated or deleted. +# There are three methods of sending notifications: logging (via the +# log_file directive), rpc (via a message queue) and +# noop (no notifications sent, the default) + +# Notification_driver can be defined multiple times +# Do nothing driver +# notification_driver = neutron.openstack.common.notifier.no_op_notifier +# Logging driver +# notification_driver = neutron.openstack.common.notifier.log_notifier +# RPC driver. +notification_driver = neutron.openstack.common.notifier.rpc_notifier + +# default_notification_level is used to form actual topic name(s) or to set logging level +default_notification_level = INFO + +# default_publisher_id is a part of the notification payload +# host = myhost.com +# default_publisher_id = $host + +# Defined in rpc_notifier, can be comma separated values. +# The actual topic names will be %s.%(default_notification_level)s +notification_topics = notifications + +# Default maximum number of items returned in a single response, +# value == infinite and value < 0 means no max limit, and value must +# be greater than 0. If the number of items requested is greater than +# pagination_max_limit, server will just return pagination_max_limit +# of number of items. +# pagination_max_limit = -1 + +# Maximum number of DNS nameservers per subnet +# max_dns_nameservers = 5 + +# Maximum number of host routes per subnet +# max_subnet_host_routes = 20 + +# Maximum number of fixed ips per port +# max_fixed_ips_per_port = 5 + +# =========== items for agent management extension ============= +# Seconds to regard the agent as down; should be at least twice +# report_interval, to be sure the agent is down for good +agent_down_time = 75 +# =========== end of items for agent management extension ===== + +# =========== items for agent scheduler extension ============= +# Driver to use for scheduling network to DHCP agent +network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler +# Driver to use for scheduling router to a default L3 agent +router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler +# Driver to use for scheduling a loadbalancer pool to an lbaas agent +# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler + +# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted +# networks to first DHCP agent which sends get_active_networks message to +# neutron server +# network_auto_schedule = True + +# Allow auto scheduling routers to L3 agent. It will schedule non-hosted +# routers to first L3 agent which sends sync_routers message to neutron server +# router_auto_schedule = True + +# Number of DHCP agents scheduled to host a network. This enables redundant +# DHCP agents for configured networks. +# dhcp_agents_per_network = 1 + +# =========== end of items for agent scheduler extension ===== + +# =========== WSGI parameters related to the API server ============== +# Number of separate worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as workers. The parent process manages them. +api_workers = 8 + +# Number of separate RPC worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as RPC workers. The parent process manages them. +# This feature is experimental until issues are addressed and testing has been +# enabled for various plugins for compatibility. +rpc_workers = 8 + +# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when +# starting API server. Not supported on OS X. +# tcp_keepidle = 600 + +# Number of seconds to keep retrying to listen +# retry_until_window = 30 + +# Number of backlog requests to configure the socket with. +# backlog = 4096 + +# Max header line to accommodate large tokens +# max_header_line = 16384 + +# Enable SSL on the API server +# use_ssl = False + +# Certificate file to use when starting API server securely +# ssl_cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +# ssl_key_file = /path/to/keyfile + +# CA certificate file to use when starting API server securely to +# verify connecting clients. This is an optional parameter only required if +# API clients need to authenticate to the API server using SSL certificates +# signed by a trusted CA +# ssl_ca_file = /path/to/cafile +# ======== end of WSGI parameters related to the API server ========== + + +# ======== neutron nova interactions ========== +# Send notification to nova when port status is active. +notify_nova_on_port_status_changes = True + +# Send notifications to nova when port data (fixed_ips/floatingips) change +# so nova can update it's cache. +notify_nova_on_port_data_changes = True + +# URL for connection to nova (Only supports one nova region currently). +nova_url = http://{{ HA_VIP }}:8774/v2 + +# Name of nova region to use. Useful if keystone manages more than one region +nova_region_name = RegionOne + +# Username for connection to nova in admin context +nova_admin_username = nova + +# The uuid of the admin nova tenant + +# Password for connection to nova in admin context. +nova_admin_password = {{ NOVA_PASS }} + +# Authorization URL for connection to nova in admin context. +nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 + +# Number of seconds between sending events to nova if there are any events to send +send_events_interval = 2 + +# ======== end of neutron nova interactions ========== + +[quotas] +# Default driver to use for quota checks +quota_driver = neutron.db.quota_db.DbQuotaDriver + +# Resource name(s) that are supported in quota features +quota_items = network,subnet,port + +# Default number of resource allowed per tenant. A negative value means +# unlimited. +default_quota = -1 + +# Number of networks allowed per tenant. A negative value means unlimited. +quota_network = 100 + +# Number of subnets allowed per tenant. A negative value means unlimited. +quota_subnet = 100 + +# Number of ports allowed per tenant. A negative value means unlimited. +quota_port = 8000 + +# Number of security groups allowed per tenant. A negative value means +# unlimited. +quota_security_group = 1000 + +# Number of security group rules allowed per tenant. A negative value means +# unlimited. +quota_security_group_rule = 1000 + +# Number of vips allowed per tenant. A negative value means unlimited. +# quota_vip = 10 + +# Number of pools allowed per tenant. A negative value means unlimited. +# quota_pool = 10 + +# Number of pool members allowed per tenant. A negative value means unlimited. +# The default is unlimited because a member is not a real resource consumer +# on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_member = -1 + +# Number of health monitors allowed per tenant. A negative value means +# unlimited. +# The default is unlimited because a health monitor is not a real resource +# consumer on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_health_monitors = -1 + +# Number of routers allowed per tenant. A negative value means unlimited. +# quota_router = 10 + +# Number of floating IPs allowed per tenant. A negative value means unlimited. +# quota_floatingip = 50 + +[agent] +# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real +# root filter facility. +# Change to "sudo" to skip the filtering and just run the comand directly +root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" + +# =========== items for agent management extension ============= +# seconds between nodes reporting state to server; should be less than +# agent_down_time, best if it is half or less than agent_down_time +report_interval = 30 + +# =========== end of items for agent management extension ===== + +[keystone_authtoken] +auth_uri = http://{{ HA_VIP }}:5000/v2.0 +identity_uri = http://{{ HA_VIP }}:35357 +admin_tenant_name = service +admin_user = neutron +admin_password = {{ NEUTRON_PASS }} +signing_dir = $state_path/keystone-signing + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@127.0.0.1:3306/neutron +# Replace 127.0.0.1 above with the IP address of the database used by the +# main neutron server. (Leave it as is if the database runs on this host.) +# connection = sqlite:////var/lib/neutron/neutron.sqlite +#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron + +# The SQLAlchemy connection string used to connect to the slave database +slave_connection = + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +max_retries = 10 + +# Database reconnection interval in seconds - if the initial connection to the +# database fails +retry_interval = 10 + +# Minimum number of SQL connections to keep open in a pool +min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +max_pool_size = 100 + +# Timeout in seconds before idle sql connections are reaped +idle_timeout = 3600 + +# If set, use this value for max_overflow with sqlalchemy +max_overflow = 100 + +# Verbosity of SQL debugging information. 0=None, 100=Everything +connection_debug = 0 + +# Add python stack traces to SQL as comment strings +connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +pool_timeout = 10 + +[service_providers] +# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. +# Must be in form: +# service_provider=::[:default] +# List of allowed service types includes LOADBALANCER, FIREWALL, VPN +# Combination of and must be unique; must also be unique +# This is multiline option, example for default provider: +# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default +# example of non-default provider: +# service_provider=FIREWALL:name2:firewall_driver_path +# --- Reference implementations --- +service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default +service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default +# In order to activate Radware's lbaas driver you need to uncomment the next line. +# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. +# Otherwise comment the HA Proxy line +# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default +# uncomment the following line to make the 'netscaler' LBaaS provider available. +# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver +# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. +# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default +# Uncomment the line below to use Embrane heleos as Load Balancer service provider. +# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron.conf new file mode 100644 index 0000000..2a66e94 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron.conf @@ -0,0 +1,466 @@ +[DEFAULT] +# Print more verbose output (set logging level to INFO instead of default WARNING level). +verbose = {{ VERBOSE }} + +# Print debugging output (set logging level to DEBUG instead of default WARNING level). +debug = {{ VERBOSE }} + +# Where to store Neutron state files. This directory must be writable by the +# user executing the agent. +state_path = /var/lib/neutron + +# Where to store lock files +lock_path = $state_path/lock + +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s +# log_date_format = %Y-%m-%d %H:%M:%S + +# use_syslog -> syslog +# log_file and log_dir -> log_dir/log_file +# (not log_file) and log_dir -> log_dir/{binary_name}.log +# use_stderr -> stderr +# (not user_stderr) and (not log_file) -> stdout +# publish_errors -> notification system + +# use_syslog = False +# syslog_log_facility = LOG_USER + +# use_stderr = True +# log_file = +log_dir = /var/log/neutron + +# publish_errors = False + +# Address to bind the API server to +bind_host = {{ network_server_host }} + +# Port the bind the API server to +bind_port = 9696 + +# Path to the extensions. Note that this can be a colon-separated list of +# paths. For example: +# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions +# The __path__ of neutron.extensions is appended to this, so if your +# extensions are in there you don't need to specify them here +# api_extensions_path = + +# (StrOpt) Neutron core plugin entrypoint to be loaded from the +# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the +# plugins included in the neutron source distribution. For compatibility with +# previous versions, the class name of a plugin can be specified instead of its +# entrypoint name. +# +#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin +core_plugin = ml2 +# Example: core_plugin = ml2 + +# (ListOpt) List of service plugin entrypoints to be loaded from the +# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of +# the plugins included in the neutron source distribution. For compatibility +# with previous versions, the class name of a plugin can be specified instead +# of its entrypoint name. +# +# service_plugins = +# Example: service_plugins = router,firewall,lbaas,vpnaas,metering +service_plugins = router + +# Paste configuration file +api_paste_config = api-paste.ini + +# The strategy to be used for auth. +# Supported values are 'keystone'(default), 'noauth'. +auth_strategy = keystone + +# Base MAC address. The first 3 octets will remain unchanged. If the +# 4h octet is not 00, it will also be used. The others will be +# randomly generated. +# 3 octet +# base_mac = fa:16:3e:00:00:00 +# 4 octet +# base_mac = fa:16:3e:4f:00:00 + +# Maximum amount of retries to generate a unique MAC address +# mac_generation_retries = 16 + +# DHCP Lease duration (in seconds) +dhcp_lease_duration = 86400 + +# Allow sending resource operation notification to DHCP agent +# dhcp_agent_notification = True + +# Enable or disable bulk create/update/delete operations +# allow_bulk = True +# Enable or disable pagination +# allow_pagination = False +# Enable or disable sorting +# allow_sorting = False +# Enable or disable overlapping IPs for subnets +# Attention: the following parameter MUST be set to False if Neutron is +# being used in conjunction with nova security groups +allow_overlapping_ips = True +# Ensure that configured gateway is on subnet +# force_gateway_on_subnet = False + + +# RPC configuration options. Defined in rpc __init__ +# The messaging module to use, defaults to kombu. +# rpc_backend = neutron.openstack.common.rpc.impl_kombu +rpc_backend = rabbit +rabbit_host = {{ rabbit_host }} +rabbit_password = {{ RABBIT_PASS }} + +# Size of RPC thread pool +rpc_thread_pool_size = 240 +# Size of RPC connection pool +rpc_conn_pool_size = 100 +# Seconds to wait for a response from call or multicall +rpc_response_timeout = 300 +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +rpc_cast_timeout = 300 +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. +# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception +# AMQP exchange to connect to if using RabbitMQ or QPID +# control_exchange = neutron + +# If passed, use a fake RabbitMQ provider +# fake_rabbit = False + +# Configuration options if sending notifications via kombu rpc (these are +# the defaults) +# SSL version to use (valid only if SSL enabled) +# kombu_ssl_version = +# SSL key file (valid only if SSL enabled) +# kombu_ssl_keyfile = +# SSL cert file (valid only if SSL enabled) +# kombu_ssl_certfile = +# SSL certification authority file (valid only if SSL enabled) +# kombu_ssl_ca_certs = +# Port where RabbitMQ server is running/listening +rabbit_port = 5672 +# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' +# rabbit_hosts = localhost:5672 +# User ID used for RabbitMQ connections +rabbit_userid = {{ RABBIT_USER }} +# Location of a virtual RabbitMQ installation. +# rabbit_virtual_host = / +# Maximum retries with trying to connect to RabbitMQ +# (the default of 0 implies an infinite retry count) +# rabbit_max_retries = 0 +# RabbitMQ connection retry interval +# rabbit_retry_interval = 1 +# Use HA queues in RabbitMQ (x-ha-policy: all). You need to +# wipe RabbitMQ database when changing this option. (boolean value) +# rabbit_ha_queues = false +# QPID +# rpc_backend=neutron.openstack.common.rpc.impl_qpid +# Qpid broker hostname +# qpid_hostname = localhost +# Qpid broker port +# qpid_port = 5672 +# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' +# qpid_hosts = localhost:5672 +# Username for qpid connection +# qpid_username = '' +# Password for qpid connection +# qpid_password = '' +# Space separated list of SASL mechanisms to use for auth +# qpid_sasl_mechanisms = '' +# Seconds between connection keepalive heartbeats +# qpid_heartbeat = 60 +# Transport to use, either 'tcp' or 'ssl' +# qpid_protocol = tcp +# Disable Nagle algorithm +# qpid_tcp_nodelay = True + +# ZMQ +# rpc_backend=neutron.openstack.common.rpc.impl_zmq +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. +# rpc_zmq_bind_address = * + +# ============ Notification System Options ===================== + +# Notifications can be sent when network/subnet/port are created, updated or deleted. +# There are three methods of sending notifications: logging (via the +# log_file directive), rpc (via a message queue) and +# noop (no notifications sent, the default) + +# Notification_driver can be defined multiple times +# Do nothing driver +# notification_driver = neutron.openstack.common.notifier.no_op_notifier +# Logging driver +# notification_driver = neutron.openstack.common.notifier.log_notifier +# RPC driver. +notification_driver = neutron.openstack.common.notifier.rpc_notifier + +# default_notification_level is used to form actual topic name(s) or to set logging level +default_notification_level = INFO + +# default_publisher_id is a part of the notification payload +# host = myhost.com +# default_publisher_id = $host + +# Defined in rpc_notifier, can be comma separated values. +# The actual topic names will be %s.%(default_notification_level)s +notification_topics = notifications + +# Default maximum number of items returned in a single response, +# value == infinite and value < 0 means no max limit, and value must +# be greater than 0. If the number of items requested is greater than +# pagination_max_limit, server will just return pagination_max_limit +# of number of items. +# pagination_max_limit = -1 + +# Maximum number of DNS nameservers per subnet +# max_dns_nameservers = 5 + +# Maximum number of host routes per subnet +# max_subnet_host_routes = 20 + +# Maximum number of fixed ips per port +# max_fixed_ips_per_port = 5 + +# =========== items for agent management extension ============= +# Seconds to regard the agent as down; should be at least twice +# report_interval, to be sure the agent is down for good +agent_down_time = 75 +# =========== end of items for agent management extension ===== + +# =========== items for agent scheduler extension ============= +# Driver to use for scheduling network to DHCP agent +network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler +# Driver to use for scheduling router to a default L3 agent +router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler +# Driver to use for scheduling a loadbalancer pool to an lbaas agent +# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler + +# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted +# networks to first DHCP agent which sends get_active_networks message to +# neutron server +# network_auto_schedule = True + +# Allow auto scheduling routers to L3 agent. It will schedule non-hosted +# routers to first L3 agent which sends sync_routers message to neutron server +# router_auto_schedule = True + +# Number of DHCP agents scheduled to host a network. This enables redundant +# DHCP agents for configured networks. +# dhcp_agents_per_network = 1 + +# =========== end of items for agent scheduler extension ===== + +# =========== WSGI parameters related to the API server ============== +# Number of separate worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as workers. The parent process manages them. +api_workers = 8 + +# Number of separate RPC worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as RPC workers. The parent process manages them. +# This feature is experimental until issues are addressed and testing has been +# enabled for various plugins for compatibility. +rpc_workers = 8 + +# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when +# starting API server. Not supported on OS X. +# tcp_keepidle = 600 + +# Number of seconds to keep retrying to listen +# retry_until_window = 30 + +# Number of backlog requests to configure the socket with. +# backlog = 4096 + +# Max header line to accommodate large tokens +# max_header_line = 16384 + +# Enable SSL on the API server +# use_ssl = False + +# Certificate file to use when starting API server securely +# ssl_cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +# ssl_key_file = /path/to/keyfile + +# CA certificate file to use when starting API server securely to +# verify connecting clients. This is an optional parameter only required if +# API clients need to authenticate to the API server using SSL certificates +# signed by a trusted CA +# ssl_ca_file = /path/to/cafile +# ======== end of WSGI parameters related to the API server ========== + + +# ======== neutron nova interactions ========== +# Send notification to nova when port status is active. +notify_nova_on_port_status_changes = True + +# Send notifications to nova when port data (fixed_ips/floatingips) change +# so nova can update it's cache. +notify_nova_on_port_data_changes = True + +# URL for connection to nova (Only supports one nova region currently). +nova_url = http://{{ HA_VIP }}:8774/v2 + +# Name of nova region to use. Useful if keystone manages more than one region +nova_region_name = RegionOne + +# Username for connection to nova in admin context +nova_admin_username = nova + +# The uuid of the admin nova tenant +nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }} + +# Password for connection to nova in admin context. +nova_admin_password = {{ NOVA_PASS }} + +# Authorization URL for connection to nova in admin context. +nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 + +# Number of seconds between sending events to nova if there are any events to send +send_events_interval = 2 + +# ======== end of neutron nova interactions ========== + +[quotas] +# Default driver to use for quota checks +quota_driver = neutron.db.quota_db.DbQuotaDriver + +# Resource name(s) that are supported in quota features +quota_items = network,subnet,port + +# Default number of resource allowed per tenant. A negative value means +# unlimited. +default_quota = -1 + +# Number of networks allowed per tenant. A negative value means unlimited. +quota_network = 100 + +# Number of subnets allowed per tenant. A negative value means unlimited. +quota_subnet = 100 + +# Number of ports allowed per tenant. A negative value means unlimited. +quota_port = 8000 + +# Number of security groups allowed per tenant. A negative value means +# unlimited. +quota_security_group = 1000 + +# Number of security group rules allowed per tenant. A negative value means +# unlimited. +quota_security_group_rule = 1000 + +# Number of vips allowed per tenant. A negative value means unlimited. +# quota_vip = 10 + +# Number of pools allowed per tenant. A negative value means unlimited. +# quota_pool = 10 + +# Number of pool members allowed per tenant. A negative value means unlimited. +# The default is unlimited because a member is not a real resource consumer +# on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_member = -1 + +# Number of health monitors allowed per tenant. A negative value means +# unlimited. +# The default is unlimited because a health monitor is not a real resource +# consumer on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_health_monitors = -1 + +# Number of routers allowed per tenant. A negative value means unlimited. +# quota_router = 10 + +# Number of floating IPs allowed per tenant. A negative value means unlimited. +# quota_floatingip = 50 + +[agent] +# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real +# root filter facility. +# Change to "sudo" to skip the filtering and just run the comand directly +root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" + +# =========== items for agent management extension ============= +# seconds between nodes reporting state to server; should be less than +# agent_down_time, best if it is half or less than agent_down_time +report_interval = 30 + +# =========== end of items for agent management extension ===== + +[keystone_authtoken] +auth_uri = http://{{ HA_VIP }}:5000/v2.0 +identity_uri = http://{{ HA_VIP }}:35357 +admin_tenant_name = service +admin_user = neutron +admin_password = {{ NEUTRON_PASS }} +signing_dir = $state_path/keystone-signing + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@127.0.0.1:3306/neutron +# Replace 127.0.0.1 above with the IP address of the database used by the +# main neutron server. (Leave it as is if the database runs on this host.) +# connection = sqlite:////var/lib/neutron/neutron.sqlite +connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron + +# The SQLAlchemy connection string used to connect to the slave database +slave_connection = + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +max_retries = 10 + +# Database reconnection interval in seconds - if the initial connection to the +# database fails +retry_interval = 10 + +# Minimum number of SQL connections to keep open in a pool +min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +max_pool_size = 100 + +# Timeout in seconds before idle sql connections are reaped +idle_timeout = 3600 + +# If set, use this value for max_overflow with sqlalchemy +max_overflow = 100 + +# Verbosity of SQL debugging information. 0=None, 100=Everything +connection_debug = 0 + +# Add python stack traces to SQL as comment strings +connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +pool_timeout = 10 + +[service_providers] +# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. +# Must be in form: +# service_provider=::[:default] +# List of allowed service types includes LOADBALANCER, FIREWALL, VPN +# Combination of and must be unique; must also be unique +# This is multiline option, example for default provider: +# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default +# example of non-default provider: +# service_provider=FIREWALL:name2:firewall_driver_path +# --- Reference implementations --- +service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default +service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default +# In order to activate Radware's lbaas driver you need to uncomment the next line. +# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. +# Otherwise comment the HA Proxy line +# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default +# uncomment the following line to make the 'netscaler' LBaaS provider available. +# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver +# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. +# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default +# Uncomment the line below to use Embrane heleos as Load Balancer service provider. +# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron_init.sh b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron_init.sh new file mode 100644 index 0000000..b92e202 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/neutron_init.sh @@ -0,0 +1,4 @@ +# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True + +# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}} + diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/nova.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/nova.conf new file mode 100644 index 0000000..9587073 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-controller/templates/nova.conf @@ -0,0 +1,69 @@ +[DEFAULT] +dhcpbridge_flagfile=/etc/nova/nova.conf +dhcpbridge=/usr/bin/nova-dhcpbridge +logdir=/var/log/nova +state_path=/var/lib/nova +lock_path=/var/lock/nova +force_dhcp_release=True +iscsi_helper=tgtadm +libvirt_use_virtio_for_bridges=True +connection_type=libvirt +root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf +verbose={{ VERBOSE}} +debug={{ DEBUG }} +ec2_private_dns_show_ip=True +api_paste_config=/etc/nova/api-paste.ini +volumes_path=/var/lib/nova/volumes +enabled_apis=ec2,osapi_compute,metadata + +vif_plugging_is_fatal: false +vif_plugging_timeout: 0 + +auth_strategy = keystone + +rpc_backend = rabbit +rabbit_host = {{ rabbit_host }} +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} + +my_ip = {{ internal_ip }} +vnc_enabled = True +vncserver_listen = {{ internal_ip }} +vncserver_proxyclient_address = {{ internal_ip }} +novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html + +novncproxy_host = {{ internal_ip }} +novncproxy_port = 6080 + +network_api_class = nova.network.neutronv2.api.API +linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver +firewall_driver = nova.virt.firewall.NoopFirewallDriver +security_group_api = neutron + +instance_usage_audit = True +instance_usage_audit_period = hour +notify_on_state_change = vm_and_task_state +notification_driver = nova.openstack.common.notifier.rpc_notifier +notification_driver = ceilometer.compute.nova_notifier + +[database] +# The SQLAlchemy connection string used to connect to the database +connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova + +[keystone_authtoken] +auth_uri = http://{{ HA_VIP }}:5000/2.0 +identity_uri = http://{{ HA_VIP }}:35357 +admin_tenant_name = service +admin_user = nova +admin_password = {{ NOVA_PASS }} + +[glance] +host = {{ HA_VIP }} + +[neutron] +url = http://{{ HA_VIP }}:9696 +auth_strategy = keystone +admin_tenant_name = service +admin_username = neutron +admin_password = {{ NEUTRON_PASS }} +admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-network/handlers/main.yml new file mode 100644 index 0000000..d6c5cc8 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-network/handlers/main.yml @@ -0,0 +1,21 @@ +--- +- name: restart neutron-plugin-openvswitch-agent + service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes + when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}" + +- name: restart neutron-l3-agent + service: name=neutron-l3-agent state=restarted enabled=yes + +- name: kill dnsmasq + command: killall dnsmasq + ignore_errors: True + +- name: restart neutron-dhcp-agent + service: name=neutron-dhcp-agent state=restarted enabled=yes + +- name: restart neutron-metadata-agent + service: name=neutron-metadata-agent state=restarted enabled=yes + +- name: restart xorp + service: name=xorp state=restarted enabled=yes sleep=10 + ignore_errors: True diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/igmp-router.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/igmp-router.yml new file mode 100644 index 0000000..d6f38a0 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/igmp-router.yml @@ -0,0 +1,20 @@ +--- +- name: Install XORP to provide IGMP router functionality + apt: pkg=xorp + +- name: configure xorp + template: src=etc/xorp/config.boot dest=/etc/xorp/config.boot + notify: + - restart xorp + +- name: set xorp defaults + lineinfile: dest=/etc/default/xorp regexp=^RUN= line=RUN=yes + notify: + - restart xorp + +- meta: flush_handlers + +- name: start and enable xorp service + service: name=xorp state=started enabled=yes + retries: 2 + delay: 10 diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/main.yml new file mode 100644 index 0000000..1d4b591 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/main.yml @@ -0,0 +1,114 @@ +--- +- name: activate ipv4 forwarding + sysctl: name=net.ipv4.ip_forward value=1 + state=present reload=yes + +- name: deactivate ipv4 rp filter + sysctl: name=net.ipv4.conf.all.rp_filter value=0 + state=present reload=yes + +- name: deactivate ipv4 default rp filter + sysctl: name=net.ipv4.conf.default.rp_filter + value=0 state=present reload=yes + +- name: install neutron network related packages + apt: name={{ item }} state=present force=yes + with_items: + - neutron-plugin-ml2 + - openvswitch-datapath-dkms + - openvswitch-switch + - neutron-l3-agent + - neutron-dhcp-agent + +- name: generate neutron service list + shell: echo {{ item }} >> /opt/service + with_items: + - openvswitch-switch + - neutron-l3-agent + - neutron-dhcp-agent + - neutron-plugin-openvswitch-agent + - neutron-metadata-agent + - xorp + +- name: install neutron openvswitch agent + apt: name=neutron-plugin-openvswitch-agent + state=present force=yes + when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}" + +- name: config neutron + template: src=neutron-network.conf + dest=/etc/neutron/neutron.conf backup=yes + notify: + - restart neutron-plugin-openvswitch-agent + - restart neutron-l3-agent + - kill dnsmasq + - restart neutron-dhcp-agent + - restart neutron-metadata-agent + +- name: config l3 agent + template: src=l3_agent.ini dest=/etc/neutron/l3_agent.ini + backup=yes + notify: + - restart neutron-l3-agent + +- name: config dhcp agent + template: src=dhcp_agent.ini dest=/etc/neutron/dhcp_agent.ini + backup=yes + notify: + - kill dnsmasq + - restart neutron-dhcp-agent + +- name: update dnsmasq-neutron.conf + template: src=dnsmasq-neutron.conf + dest=/etc/neutron/dnsmasq-neutron.conf + notify: + - kill dnsmasq + - restart neutron-dhcp-agent + +- name: config metadata agent + template: src=metadata_agent.ini + dest=/etc/neutron/metadata_agent.ini backup=yes + notify: + - restart neutron-metadata-agent + +- name: config ml2 plugin + template: src=ml2_conf.ini + dest=/etc/neutron/plugins/ml2/ml2_conf.ini + backup=yes + notify: + - restart neutron-plugin-openvswitch-agent + +- meta: flush_handlers + +- name: add br-int + openvswitch_bridge: bridge=br-int state=present + +- name: add br-ex + openvswitch_bridge: bridge=br-ex state=present + when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}" + +- name: assign a port to br-ex for physical ext interface + openvswitch_port: bridge=br-ex port={{ INTERFACE_NAME }} + state=present + when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}" + +- include: igmp-router.yml + when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}" + +- name: assert kernel support for vxlan + command: modinfo -F version vxlan + when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}" + +- name: assert iproute2 suppport for vxlan + command: ip link add type vxlan help + register: iproute_out + failed_when: iproute_out.rc == 255 + when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}" + +- include: odl.yml + when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}" + +- name: restart ovs service + service: name=openvswitch-switch state=restarted enabled=yes + +- meta: flush_handlers diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/odl.yml b/compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/odl.yml new file mode 100644 index 0000000..a2b449c --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-network/tasks/odl.yml @@ -0,0 +1,13 @@ +--- +- name: ovs set manager + command: ovs-vsctl set-manager tcp:{{ controller }}:6640 + +- name: get ovs uuid + shell: ovs-vsctl get Open_vSwitch . _uuid + register: ovs_uuid + +- name: set bridge_mappings + command: ovs-vsctl set Open_vSwitch {{ ovs_uuid.stdout }} other_config:bridge_mappings=physnet1:{{ INTERFACE_NAME }} + +- name: set local ip + command: ovs-vsctl set Open_vSwitch {{ ovs_uuid.stdout }} other_config:local_ip={{ internal_ip }} diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/dhcp_agent.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/dhcp_agent.ini new file mode 100644 index 0000000..19eb62e --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/dhcp_agent.ini @@ -0,0 +1,90 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = False +verbose = True + +# The DHCP agent will resync its state with Neutron to recover from any +# transient notification or rpc errors. The interval is number of +# seconds between attempts. +resync_interval = 5 + +# The DHCP agent requires an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP, +# BigSwitch/Floodlight) +interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Name of Open vSwitch bridge to use +# ovs_integration_bridge = br-int + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires +# no additional setup of the DHCP server. +dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +use_namespaces = True + +# The DHCP server can assist with providing metadata support on isolated +# networks. Setting this value to True will cause the DHCP server to append +# specific host routes to the DHCP request. The metadata service will only +# be activated when the subnet does not contain any router port. The guest +# instance must be configured to request host routes via DHCP (Option 121). +enable_isolated_metadata = False + +# Allows for serving metadata requests coming from a dedicated metadata +# access network whose cidr is 169.254.169.254/16 (or larger prefix), and +# is connected to a Neutron router from which the VMs send metadata +# request. In this case DHCP Option 121 will not be injected in VMs, as +# they will be able to reach 169.254.169.254 through a router. +# This option requires enable_isolated_metadata = True +enable_metadata_network = False + +# Number of threads to use during sync process. Should not exceed connection +# pool size configured on server. +# num_sync_threads = 4 + +# Location to store DHCP server config files +# dhcp_confs = $state_path/dhcp + +# Domain to use for building the hostnames +dhcp_domain = openstacklocal + +# Override the default dnsmasq settings with this file +# dnsmasq_config_file = +dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf + +# Comma-separated list of DNS servers which will be used by dnsmasq +# as forwarders. +# dnsmasq_dns_servers = + +# Limit number of leases to prevent a denial-of-service. +dnsmasq_lease_max = 16777216 + +# Location to DHCP lease relay UNIX domain socket +# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# dhcp_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the dhcp agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a dhcp server is disabled. +# dhcp_delete_namespaces = False + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/dnsmasq-neutron.conf new file mode 100644 index 0000000..7bcbd9d --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/dnsmasq-neutron.conf @@ -0,0 +1,2 @@ +dhcp-option-force=26,1454 + diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/etc/xorp/config.boot b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/etc/xorp/config.boot new file mode 100644 index 0000000..32caf96 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/etc/xorp/config.boot @@ -0,0 +1,25 @@ +interfaces { + restore-original-config-on-shutdown: false + interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { + description: "Internal pNodes interface" + disable: false + default-system-config + } +} + +protocols { + igmp { + disable: false + interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { + vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { + disable: false + version: 3 + } + } + traceoptions { + flag all { + disable: false + } + } + } +} diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/l3_agent.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/l3_agent.ini new file mode 100644 index 0000000..b394c00 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/l3_agent.ini @@ -0,0 +1,81 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = False +verbose = True + +# L3 requires that an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC) +# that supports L3 agent +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver +interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +use_namespaces = True + +# If use_namespaces is set as False then the agent can only configure one router. + +# This is done by setting the specific router_id. +# router_id = + +# When external_network_bridge is set, each L3 agent can be associated +# with no more than one external network. This value should be set to the UUID +# of that external network. To allow L3 agent support multiple external +# networks, both the external_network_bridge and gateway_external_network_id +# must be left empty. +# gateway_external_network_id = + +# Indicates that this L3 agent should also handle routers that do not have +# an external network gateway configured. This option should be True only +# for a single agent in a Neutron deployment, and may be False for all agents +# if all routers must have an external network gateway +handle_internal_only_routers = True + +# Name of bridge used for external network traffic. This should be set to +# empty value for the linux bridge. when this parameter is set, each L3 agent +# can be associated with no more than one external network. +external_network_bridge = br-ex + +# TCP Port used by Neutron metadata server +metadata_port = 9697 + +# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 +# to disable this feature. +send_arp_for_ha = 3 + +# seconds between re-sync routers' data if needed +periodic_interval = 40 + +# seconds to start to sync routers' data after +# starting agent +periodic_fuzzy_delay = 5 + +# enable_metadata_proxy, which is true by default, can be set to False +# if the Nova metadata server is not available +# enable_metadata_proxy = True + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# router_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the L3 agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a router is destroyed. +# router_delete_namespaces = False + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/metadata_agent.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/metadata_agent.ini new file mode 100644 index 0000000..6badf28 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/metadata_agent.ini @@ -0,0 +1,46 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +debug = True + +# The Neutron user information for accessing the Neutron API. +auth_url = http://{{ HA_VIP }}:5000/v2.0 +auth_region = RegionOne +# Turn off verification of the certificate for ssl +# auth_insecure = False +# Certificate Authority public key (CA cert) file for ssl +# auth_ca_cert = +admin_tenant_name = service +admin_user = neutron +admin_password = {{ NEUTRON_PASS }} + +# Network service endpoint type to pull from the keystone catalog +# endpoint_type = adminURL + +# IP address used by Nova metadata server +nova_metadata_ip = {{ HA_VIP }} + +# TCP Port used by Nova metadata server +nova_metadata_port = 8775 + +# When proxying metadata requests, Neutron signs the Instance-ID header with a +# shared secret to prevent spoofing. You may select any string for a secret, +# but it must match here and in the configuration used by the Nova Metadata +# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret +metadata_proxy_shared_secret = {{ METADATA_SECRET }} + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# Number of separate worker processes for metadata server +# metadata_workers = 0 + +# Number of backlog requests to configure the metadata server socket with +# metadata_backlog = 128 + +# URL to connect to the cache backend. +# Example of URL using memory caching backend +# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5 +# default_ttl=0 parameter will cause cache entries to never expire. +# Otherwise default_ttl specifies time in seconds a cache entry is valid for. +# No cache is used in case no value is passed. +# cache_url = diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/ml2_conf.ini b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/ml2_conf.ini new file mode 100644 index 0000000..a790069 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/ml2_conf.ini @@ -0,0 +1,108 @@ +[ml2] +# (ListOpt) List of network type driver entrypoints to be loaded from +# the neutron.ml2.type_drivers namespace. +# +# type_drivers = local,flat,vlan,gre,vxlan +# Example: type_drivers = flat,vlan,gre,vxlan +type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }} + +# (ListOpt) Ordered list of network_types to allocate as tenant +# networks. The default value 'local' is useful for single-box testing +# but provides no connectivity between hosts. +# +# tenant_network_types = local +# Example: tenant_network_types = vlan,gre,vxlan +tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }} + +# (ListOpt) Ordered list of networking mechanism driver entrypoints +# to be loaded from the neutron.ml2.mechanism_drivers namespace. +# mechanism_drivers = +# Example: mechanism_drivers = openvswitch,mlnx +# Example: mechanism_drivers = arista +# Example: mechanism_drivers = cisco,logger +# Example: mechanism_drivers = openvswitch,brocade +# Example: mechanism_drivers = linuxbridge,brocade +mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }} + +[ml2_type_flat] +# (ListOpt) List of physical_network names with which flat networks +# can be created. Use * to allow flat networks with arbitrary +# physical_network names. +# +flat_networks = external +# Example:flat_networks = physnet1,physnet2 +# Example:flat_networks = * + +[ml2_type_vlan] +# (ListOpt) List of [::] tuples +# specifying physical_network names usable for VLAN provider and +# tenant networks, as well as ranges of VLAN tags on each +# physical_network available for allocation as tenant networks. +# +network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 + +[ml2_type_gre] +# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation +tunnel_id_ranges = 1:1000 + +[ml2_type_vxlan] +# (ListOpt) Comma-separated list of : tuples enumerating +# ranges of VXLAN VNI IDs that are available for tenant network allocation. +# +vni_ranges = 1001:4095 + +# (StrOpt) Multicast group for the VXLAN interface. When configured, will +# enable sending all broadcast traffic to this multicast group. When left +# unconfigured, will disable multicast VXLAN mode. +# +vxlan_group = 239.1.1.1 +# Example: vxlan_group = 239.1.1.1 + +[securitygroup] +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True +firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver +enable_security_group = True + +[database] +connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8 + +[ovs] +local_ip = {{ internal_ip }} +{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %} +integration_bridge = br-int +tunnel_bridge = br-tun +tunnel_id_ranges = 1001:4095 +tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }} +bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }} +{% endif %} + +[agent] +root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf +tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }} +{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %} +vxlan_udp_port = 4789 +{% endif %} +l2_population = False + +[odl] +{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} +network_vlan_ranges = 1001:4095 +tunnel_id_ranges = 1001:4095 +tun_peer_patch_port = patch-int +int_peer_patch_port = patch-tun +tenant_network_type = vxlan +tunnel_bridge = br-tun +integration_bridge = br-int +controllers = 10.1.0.15:8080:admin:admin +{% endif %} + +[ml2_odl] +{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} +username = {{ odl_username }} +password = {{ odl_password }} +url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron +{% endif %} + diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron-network.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron-network.conf new file mode 100644 index 0000000..93be9cb --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron-network.conf @@ -0,0 +1,465 @@ +[DEFAULT] +# Print more verbose output (set logging level to INFO instead of default WARNING level). +verbose = {{ VERBOSE }} + +# Print debugging output (set logging level to DEBUG instead of default WARNING level). +debug = {{ DEBUG }} + +# Where to store Neutron state files. This directory must be writable by the +# user executing the agent. +state_path = /var/lib/neutron + +# Where to store lock files +lock_path = $state_path/lock + +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s +# log_date_format = %Y-%m-%d %H:%M:%S + +# use_syslog -> syslog +# log_file and log_dir -> log_dir/log_file +# (not log_file) and log_dir -> log_dir/{binary_name}.log +# use_stderr -> stderr +# (not user_stderr) and (not log_file) -> stdout +# publish_errors -> notification system + +# use_syslog = False +# syslog_log_facility = LOG_USER + +# use_stderr = True +# log_file = +log_dir = /var/log/neutron + +# publish_errors = False + +# Address to bind the API server to +bind_host = {{ network_server_host }} + +# Port the bind the API server to +bind_port = 9696 + +# Path to the extensions. Note that this can be a colon-separated list of +# paths. For example: +# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions +# The __path__ of neutron.extensions is appended to this, so if your +# extensions are in there you don't need to specify them here +# api_extensions_path = + +# (StrOpt) Neutron core plugin entrypoint to be loaded from the +# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the +# plugins included in the neutron source distribution. For compatibility with +# previous versions, the class name of a plugin can be specified instead of its +# entrypoint name. +# +#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin +core_plugin = ml2 +# Example: core_plugin = ml2 + +# (ListOpt) List of service plugin entrypoints to be loaded from the +# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of +# the plugins included in the neutron source distribution. For compatibility +# with previous versions, the class name of a plugin can be specified instead +# of its entrypoint name. +# +# service_plugins = +# Example: service_plugins = router,firewall,lbaas,vpnaas,metering +service_plugins = router + +# Paste configuration file +api_paste_config = api-paste.ini + +# The strategy to be used for auth. +# Supported values are 'keystone'(default), 'noauth'. +auth_strategy = keystone + +# Base MAC address. The first 3 octets will remain unchanged. If the +# 4h octet is not 00, it will also be used. The others will be +# randomly generated. +# 3 octet +# base_mac = fa:16:3e:00:00:00 +# 4 octet +# base_mac = fa:16:3e:4f:00:00 + +# Maximum amount of retries to generate a unique MAC address +# mac_generation_retries = 16 + +# DHCP Lease duration (in seconds) +dhcp_lease_duration = 86400 + +# Allow sending resource operation notification to DHCP agent +# dhcp_agent_notification = True + +# Enable or disable bulk create/update/delete operations +# allow_bulk = True +# Enable or disable pagination +# allow_pagination = False +# Enable or disable sorting +# allow_sorting = False +# Enable or disable overlapping IPs for subnets +# Attention: the following parameter MUST be set to False if Neutron is +# being used in conjunction with nova security groups +allow_overlapping_ips = True +# Ensure that configured gateway is on subnet +# force_gateway_on_subnet = False + + +# RPC configuration options. Defined in rpc __init__ +# The messaging module to use, defaults to kombu. +# rpc_backend = neutron.openstack.common.rpc.impl_kombu +rpc_backend = rabbit +rabbit_host = {{ rabbit_host }} +rabbit_password = {{ RABBIT_PASS }} + +# Size of RPC thread pool +rpc_thread_pool_size = 240 +# Size of RPC connection pool +rpc_conn_pool_size = 100 +# Seconds to wait for a response from call or multicall +rpc_response_timeout = 300 +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +rpc_cast_timeout = 300 +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. +# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception +# AMQP exchange to connect to if using RabbitMQ or QPID +# control_exchange = neutron + +# If passed, use a fake RabbitMQ provider +# fake_rabbit = False + +# Configuration options if sending notifications via kombu rpc (these are +# the defaults) +# SSL version to use (valid only if SSL enabled) +# kombu_ssl_version = +# SSL key file (valid only if SSL enabled) +# kombu_ssl_keyfile = +# SSL cert file (valid only if SSL enabled) +# kombu_ssl_certfile = +# SSL certification authority file (valid only if SSL enabled) +# kombu_ssl_ca_certs = +# Port where RabbitMQ server is running/listening +rabbit_port = 5672 +# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' +# rabbit_hosts = localhost:5672 +# User ID used for RabbitMQ connections +rabbit_userid = {{ RABBIT_USER }} +# Location of a virtual RabbitMQ installation. +# rabbit_virtual_host = / +# Maximum retries with trying to connect to RabbitMQ +# (the default of 0 implies an infinite retry count) +# rabbit_max_retries = 0 +# RabbitMQ connection retry interval +# rabbit_retry_interval = 1 +# Use HA queues in RabbitMQ (x-ha-policy: all). You need to +# wipe RabbitMQ database when changing this option. (boolean value) +# rabbit_ha_queues = false +# QPID +# rpc_backend=neutron.openstack.common.rpc.impl_qpid +# Qpid broker hostname +# qpid_hostname = localhost +# Qpid broker port +# qpid_port = 5672 +# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' +# qpid_hosts = localhost:5672 +# Username for qpid connection +# qpid_username = '' +# Password for qpid connection +# qpid_password = '' +# Space separated list of SASL mechanisms to use for auth +# qpid_sasl_mechanisms = '' +# Seconds between connection keepalive heartbeats +# qpid_heartbeat = 60 +# Transport to use, either 'tcp' or 'ssl' +# qpid_protocol = tcp +# Disable Nagle algorithm +# qpid_tcp_nodelay = True + +# ZMQ +# rpc_backend=neutron.openstack.common.rpc.impl_zmq +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. +# rpc_zmq_bind_address = * + +# ============ Notification System Options ===================== + +# Notifications can be sent when network/subnet/port are created, updated or deleted. +# There are three methods of sending notifications: logging (via the +# log_file directive), rpc (via a message queue) and +# noop (no notifications sent, the default) + +# Notification_driver can be defined multiple times +# Do nothing driver +# notification_driver = neutron.openstack.common.notifier.no_op_notifier +# Logging driver +# notification_driver = neutron.openstack.common.notifier.log_notifier +# RPC driver. +notification_driver = neutron.openstack.common.notifier.rpc_notifier + +# default_notification_level is used to form actual topic name(s) or to set logging level +default_notification_level = INFO + +# default_publisher_id is a part of the notification payload +# host = myhost.com +# default_publisher_id = $host + +# Defined in rpc_notifier, can be comma separated values. +# The actual topic names will be %s.%(default_notification_level)s +notification_topics = notifications + +# Default maximum number of items returned in a single response, +# value == infinite and value < 0 means no max limit, and value must +# be greater than 0. If the number of items requested is greater than +# pagination_max_limit, server will just return pagination_max_limit +# of number of items. +# pagination_max_limit = -1 + +# Maximum number of DNS nameservers per subnet +# max_dns_nameservers = 5 + +# Maximum number of host routes per subnet +# max_subnet_host_routes = 20 + +# Maximum number of fixed ips per port +# max_fixed_ips_per_port = 5 + +# =========== items for agent management extension ============= +# Seconds to regard the agent as down; should be at least twice +# report_interval, to be sure the agent is down for good +agent_down_time = 75 +# =========== end of items for agent management extension ===== + +# =========== items for agent scheduler extension ============= +# Driver to use for scheduling network to DHCP agent +network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler +# Driver to use for scheduling router to a default L3 agent +router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler +# Driver to use for scheduling a loadbalancer pool to an lbaas agent +# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler + +# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted +# networks to first DHCP agent which sends get_active_networks message to +# neutron server +# network_auto_schedule = True + +# Allow auto scheduling routers to L3 agent. It will schedule non-hosted +# routers to first L3 agent which sends sync_routers message to neutron server +# router_auto_schedule = True + +# Number of DHCP agents scheduled to host a network. This enables redundant +# DHCP agents for configured networks. +# dhcp_agents_per_network = 1 + +# =========== end of items for agent scheduler extension ===== + +# =========== WSGI parameters related to the API server ============== +# Number of separate worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as workers. The parent process manages them. +api_workers = 8 + +# Number of separate RPC worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as RPC workers. The parent process manages them. +# This feature is experimental until issues are addressed and testing has been +# enabled for various plugins for compatibility. +rpc_workers = 8 + +# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when +# starting API server. Not supported on OS X. +# tcp_keepidle = 600 + +# Number of seconds to keep retrying to listen +# retry_until_window = 30 + +# Number of backlog requests to configure the socket with. +# backlog = 4096 + +# Max header line to accommodate large tokens +# max_header_line = 16384 + +# Enable SSL on the API server +# use_ssl = False + +# Certificate file to use when starting API server securely +# ssl_cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +# ssl_key_file = /path/to/keyfile + +# CA certificate file to use when starting API server securely to +# verify connecting clients. This is an optional parameter only required if +# API clients need to authenticate to the API server using SSL certificates +# signed by a trusted CA +# ssl_ca_file = /path/to/cafile +# ======== end of WSGI parameters related to the API server ========== + + +# ======== neutron nova interactions ========== +# Send notification to nova when port status is active. +notify_nova_on_port_status_changes = True + +# Send notifications to nova when port data (fixed_ips/floatingips) change +# so nova can update it's cache. +notify_nova_on_port_data_changes = True + +# URL for connection to nova (Only supports one nova region currently). +nova_url = http://{{ HA_VIP }}:8774/v2 + +# Name of nova region to use. Useful if keystone manages more than one region +nova_region_name = RegionOne + +# Username for connection to nova in admin context +nova_admin_username = nova + +# The uuid of the admin nova tenant + +# Password for connection to nova in admin context. +nova_admin_password = {{ NOVA_PASS }} + +# Authorization URL for connection to nova in admin context. +nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 + +# Number of seconds between sending events to nova if there are any events to send +send_events_interval = 2 + +# ======== end of neutron nova interactions ========== + +[quotas] +# Default driver to use for quota checks +quota_driver = neutron.db.quota_db.DbQuotaDriver + +# Resource name(s) that are supported in quota features +quota_items = network,subnet,port + +# Default number of resource allowed per tenant. A negative value means +# unlimited. +default_quota = -1 + +# Number of networks allowed per tenant. A negative value means unlimited. +quota_network = 100 + +# Number of subnets allowed per tenant. A negative value means unlimited. +quota_subnet = 100 + +# Number of ports allowed per tenant. A negative value means unlimited. +quota_port = 8000 + +# Number of security groups allowed per tenant. A negative value means +# unlimited. +quota_security_group = 1000 + +# Number of security group rules allowed per tenant. A negative value means +# unlimited. +quota_security_group_rule = 1000 + +# Number of vips allowed per tenant. A negative value means unlimited. +# quota_vip = 10 + +# Number of pools allowed per tenant. A negative value means unlimited. +# quota_pool = 10 + +# Number of pool members allowed per tenant. A negative value means unlimited. +# The default is unlimited because a member is not a real resource consumer +# on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_member = -1 + +# Number of health monitors allowed per tenant. A negative value means +# unlimited. +# The default is unlimited because a health monitor is not a real resource +# consumer on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_health_monitors = -1 + +# Number of routers allowed per tenant. A negative value means unlimited. +# quota_router = 10 + +# Number of floating IPs allowed per tenant. A negative value means unlimited. +# quota_floatingip = 50 + +[agent] +# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real +# root filter facility. +# Change to "sudo" to skip the filtering and just run the comand directly +root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" + +# =========== items for agent management extension ============= +# seconds between nodes reporting state to server; should be less than +# agent_down_time, best if it is half or less than agent_down_time +report_interval = 30 + +# =========== end of items for agent management extension ===== + +[keystone_authtoken] +auth_uri = http://{{ HA_VIP }}:5000/v2.0 +identity_uri = http://{{ HA_VIP }}:35357 +admin_tenant_name = service +admin_user = neutron +admin_password = {{ NEUTRON_PASS }} +signing_dir = $state_path/keystone-signing + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@127.0.0.1:3306/neutron +# Replace 127.0.0.1 above with the IP address of the database used by the +# main neutron server. (Leave it as is if the database runs on this host.) +# connection = sqlite:////var/lib/neutron/neutron.sqlite +#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron + +# The SQLAlchemy connection string used to connect to the slave database +slave_connection = + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +max_retries = 10 + +# Database reconnection interval in seconds - if the initial connection to the +# database fails +retry_interval = 10 + +# Minimum number of SQL connections to keep open in a pool +min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +max_pool_size = 100 + +# Timeout in seconds before idle sql connections are reaped +idle_timeout = 3600 + +# If set, use this value for max_overflow with sqlalchemy +max_overflow = 100 + +# Verbosity of SQL debugging information. 0=None, 100=Everything +connection_debug = 0 + +# Add python stack traces to SQL as comment strings +connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +pool_timeout = 10 + +[service_providers] +# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. +# Must be in form: +# service_provider=::[:default] +# List of allowed service types includes LOADBALANCER, FIREWALL, VPN +# Combination of and must be unique; must also be unique +# This is multiline option, example for default provider: +# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default +# example of non-default provider: +# service_provider=FIREWALL:name2:firewall_driver_path +# --- Reference implementations --- +service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default +service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default +# In order to activate Radware's lbaas driver you need to uncomment the next line. +# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. +# Otherwise comment the HA Proxy line +# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default +# uncomment the following line to make the 'netscaler' LBaaS provider available. +# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver +# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. +# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default +# Uncomment the line below to use Embrane heleos as Load Balancer service provider. +# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron.conf new file mode 100644 index 0000000..1575367 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron.conf @@ -0,0 +1,466 @@ +[DEFAULT] +# Print more verbose output (set logging level to INFO instead of default WARNING level). +verbose = {{ VERBOSE }} + +# Print debugging output (set logging level to DEBUG instead of default WARNING level). +debug = {{ VERBOSE }} + +# Where to store Neutron state files. This directory must be writable by the +# user executing the agent. +state_path = /var/lib/neutron + +# Where to store lock files +lock_path = $state_path/lock + +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s +# log_date_format = %Y-%m-%d %H:%M:%S + +# use_syslog -> syslog +# log_file and log_dir -> log_dir/log_file +# (not log_file) and log_dir -> log_dir/{binary_name}.log +# use_stderr -> stderr +# (not user_stderr) and (not log_file) -> stdout +# publish_errors -> notification system + +# use_syslog = False +# syslog_log_facility = LOG_USER + +# use_stderr = True +# log_file = +log_dir = /var/log/neutron + +# publish_errors = False + +# Address to bind the API server to +bind_host = {{ network_server_host }} + +# Port the bind the API server to +bind_port = 9696 + +# Path to the extensions. Note that this can be a colon-separated list of +# paths. For example: +# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions +# The __path__ of neutron.extensions is appended to this, so if your +# extensions are in there you don't need to specify them here +# api_extensions_path = + +# (StrOpt) Neutron core plugin entrypoint to be loaded from the +# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the +# plugins included in the neutron source distribution. For compatibility with +# previous versions, the class name of a plugin can be specified instead of its +# entrypoint name. +# +#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin +core_plugin = ml2 +# Example: core_plugin = ml2 + +# (ListOpt) List of service plugin entrypoints to be loaded from the +# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of +# the plugins included in the neutron source distribution. For compatibility +# with previous versions, the class name of a plugin can be specified instead +# of its entrypoint name. +# +# service_plugins = +# Example: service_plugins = router,firewall,lbaas,vpnaas,metering +service_plugins = router + +# Paste configuration file +api_paste_config = api-paste.ini + +# The strategy to be used for auth. +# Supported values are 'keystone'(default), 'noauth'. +auth_strategy = keystone + +# Base MAC address. The first 3 octets will remain unchanged. If the +# 4h octet is not 00, it will also be used. The others will be +# randomly generated. +# 3 octet +# base_mac = fa:16:3e:00:00:00 +# 4 octet +# base_mac = fa:16:3e:4f:00:00 + +# Maximum amount of retries to generate a unique MAC address +# mac_generation_retries = 16 + +# DHCP Lease duration (in seconds) +dhcp_lease_duration = 86400 + +# Allow sending resource operation notification to DHCP agent +# dhcp_agent_notification = True + +# Enable or disable bulk create/update/delete operations +# allow_bulk = True +# Enable or disable pagination +# allow_pagination = False +# Enable or disable sorting +# allow_sorting = False +# Enable or disable overlapping IPs for subnets +# Attention: the following parameter MUST be set to False if Neutron is +# being used in conjunction with nova security groups +allow_overlapping_ips = True +# Ensure that configured gateway is on subnet +# force_gateway_on_subnet = False + + +# RPC configuration options. Defined in rpc __init__ +# The messaging module to use, defaults to kombu. +# rpc_backend = neutron.openstack.common.rpc.impl_kombu +rpc_backend = rabbit +rabbit_host = {{ rabbit_host }} +rabbit_password = {{ RABBIT_PASS }} + +# Size of RPC thread pool +rpc_thread_pool_size = 240 +# Size of RPC connection pool +rpc_conn_pool_size = 100 +# Seconds to wait for a response from call or multicall +rpc_response_timeout = 300 +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +rpc_cast_timeout = 300 +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. +# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception +# AMQP exchange to connect to if using RabbitMQ or QPID +# control_exchange = neutron + +# If passed, use a fake RabbitMQ provider +# fake_rabbit = False + +# Configuration options if sending notifications via kombu rpc (these are +# the defaults) +# SSL version to use (valid only if SSL enabled) +# kombu_ssl_version = +# SSL key file (valid only if SSL enabled) +# kombu_ssl_keyfile = +# SSL cert file (valid only if SSL enabled) +# kombu_ssl_certfile = +# SSL certification authority file (valid only if SSL enabled) +# kombu_ssl_ca_certs = +# Port where RabbitMQ server is running/listening +rabbit_port = 5672 +# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' +# rabbit_hosts = localhost:5672 +# User ID used for RabbitMQ connections +rabbit_userid = {{ RABBIT_USER }} +# Location of a virtual RabbitMQ installation. +# rabbit_virtual_host = / +# Maximum retries with trying to connect to RabbitMQ +# (the default of 0 implies an infinite retry count) +# rabbit_max_retries = 0 +# RabbitMQ connection retry interval +# rabbit_retry_interval = 1 +# Use HA queues in RabbitMQ (x-ha-policy: all). You need to +# wipe RabbitMQ database when changing this option. (boolean value) +# rabbit_ha_queues = false +# QPID +# rpc_backend=neutron.openstack.common.rpc.impl_qpid +# Qpid broker hostname +# qpid_hostname = localhost +# Qpid broker port +# qpid_port = 5672 +# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' +# qpid_hosts = localhost:5672 +# Username for qpid connection +# qpid_username = '' +# Password for qpid connection +# qpid_password = '' +# Space separated list of SASL mechanisms to use for auth +# qpid_sasl_mechanisms = '' +# Seconds between connection keepalive heartbeats +# qpid_heartbeat = 60 +# Transport to use, either 'tcp' or 'ssl' +# qpid_protocol = tcp +# Disable Nagle algorithm +# qpid_tcp_nodelay = True + +# ZMQ +# rpc_backend=neutron.openstack.common.rpc.impl_zmq +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. +# rpc_zmq_bind_address = * + +# ============ Notification System Options ===================== + +# Notifications can be sent when network/subnet/port are created, updated or deleted. +# There are three methods of sending notifications: logging (via the +# log_file directive), rpc (via a message queue) and +# noop (no notifications sent, the default) + +# Notification_driver can be defined multiple times +# Do nothing driver +# notification_driver = neutron.openstack.common.notifier.no_op_notifier +# Logging driver +# notification_driver = neutron.openstack.common.notifier.log_notifier +# RPC driver. +notification_driver = neutron.openstack.common.notifier.rpc_notifier + +# default_notification_level is used to form actual topic name(s) or to set logging level +default_notification_level = INFO + +# default_publisher_id is a part of the notification payload +# host = myhost.com +# default_publisher_id = $host + +# Defined in rpc_notifier, can be comma separated values. +# The actual topic names will be %s.%(default_notification_level)s +notification_topics = notifications + +# Default maximum number of items returned in a single response, +# value == infinite and value < 0 means no max limit, and value must +# be greater than 0. If the number of items requested is greater than +# pagination_max_limit, server will just return pagination_max_limit +# of number of items. +# pagination_max_limit = -1 + +# Maximum number of DNS nameservers per subnet +# max_dns_nameservers = 5 + +# Maximum number of host routes per subnet +# max_subnet_host_routes = 20 + +# Maximum number of fixed ips per port +# max_fixed_ips_per_port = 5 + +# =========== items for agent management extension ============= +# Seconds to regard the agent as down; should be at least twice +# report_interval, to be sure the agent is down for good +agent_down_time = 75 +# =========== end of items for agent management extension ===== + +# =========== items for agent scheduler extension ============= +# Driver to use for scheduling network to DHCP agent +network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler +# Driver to use for scheduling router to a default L3 agent +router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler +# Driver to use for scheduling a loadbalancer pool to an lbaas agent +# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler + +# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted +# networks to first DHCP agent which sends get_active_networks message to +# neutron server +# network_auto_schedule = True + +# Allow auto scheduling routers to L3 agent. It will schedule non-hosted +# routers to first L3 agent which sends sync_routers message to neutron server +# router_auto_schedule = True + +# Number of DHCP agents scheduled to host a network. This enables redundant +# DHCP agents for configured networks. +# dhcp_agents_per_network = 1 + +# =========== end of items for agent scheduler extension ===== + +# =========== WSGI parameters related to the API server ============== +# Number of separate worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as workers. The parent process manages them. +api_workers = 8 + +# Number of separate RPC worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as RPC workers. The parent process manages them. +# This feature is experimental until issues are addressed and testing has been +# enabled for various plugins for compatibility. +rpc_workers = 8 + +# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when +# starting API server. Not supported on OS X. +# tcp_keepidle = 600 + +# Number of seconds to keep retrying to listen +# retry_until_window = 30 + +# Number of backlog requests to configure the socket with. +# backlog = 4096 + +# Max header line to accommodate large tokens +# max_header_line = 16384 + +# Enable SSL on the API server +# use_ssl = False + +# Certificate file to use when starting API server securely +# ssl_cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +# ssl_key_file = /path/to/keyfile + +# CA certificate file to use when starting API server securely to +# verify connecting clients. This is an optional parameter only required if +# API clients need to authenticate to the API server using SSL certificates +# signed by a trusted CA +# ssl_ca_file = /path/to/cafile +# ======== end of WSGI parameters related to the API server ========== + + +# ======== neutron nova interactions ========== +# Send notification to nova when port status is active. +notify_nova_on_port_status_changes = True + +# Send notifications to nova when port data (fixed_ips/floatingips) change +# so nova can update it's cache. +notify_nova_on_port_data_changes = True + +# URL for connection to nova (Only supports one nova region currently). +nova_url = http://{{ HA_VIP }}:8774/v2 + +# Name of nova region to use. Useful if keystone manages more than one region +nova_region_name = RegionOne + +# Username for connection to nova in admin context +nova_admin_username = nova + +# The uuid of the admin nova tenant +nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }} + +# Password for connection to nova in admin context. +nova_admin_password = {{ NOVA_PASS }} + +# Authorization URL for connection to nova in admin context. +nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 + +# Number of seconds between sending events to nova if there are any events to send +send_events_interval = 2 + +# ======== end of neutron nova interactions ========== + +[quotas] +# Default driver to use for quota checks +quota_driver = neutron.db.quota_db.DbQuotaDriver + +# Resource name(s) that are supported in quota features +quota_items = network,subnet,port + +# Default number of resource allowed per tenant. A negative value means +# unlimited. +default_quota = -1 + +# Number of networks allowed per tenant. A negative value means unlimited. +quota_network = 100 + +# Number of subnets allowed per tenant. A negative value means unlimited. +quota_subnet = 100 + +# Number of ports allowed per tenant. A negative value means unlimited. +quota_port = 8000 + +# Number of security groups allowed per tenant. A negative value means +# unlimited. +quota_security_group = 1000 + +# Number of security group rules allowed per tenant. A negative value means +# unlimited. +quota_security_group_rule = 1000 + +# Number of vips allowed per tenant. A negative value means unlimited. +# quota_vip = 10 + +# Number of pools allowed per tenant. A negative value means unlimited. +# quota_pool = 10 + +# Number of pool members allowed per tenant. A negative value means unlimited. +# The default is unlimited because a member is not a real resource consumer +# on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_member = -1 + +# Number of health monitors allowed per tenant. A negative value means +# unlimited. +# The default is unlimited because a health monitor is not a real resource +# consumer on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_health_monitors = -1 + +# Number of routers allowed per tenant. A negative value means unlimited. +# quota_router = 10 + +# Number of floating IPs allowed per tenant. A negative value means unlimited. +# quota_floatingip = 50 + +[agent] +# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real +# root filter facility. +# Change to "sudo" to skip the filtering and just run the comand directly +root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" + +# =========== items for agent management extension ============= +# seconds between nodes reporting state to server; should be less than +# agent_down_time, best if it is half or less than agent_down_time +report_interval = 30 + +# =========== end of items for agent management extension ===== + +[keystone_authtoken] +auth_uri = http://{{ HA_VIP }}:5000/v2.0 +identity_uri = http://{{ HA_VIP }}:35357 +admin_tenant_name = service +admin_user = neutron +admin_password = {{ NEUTRON_PASS }} +signing_dir = $state_path/keystone-signing + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@127.0.0.1:3306/neutron +# Replace 127.0.0.1 above with the IP address of the database used by the +# main neutron server. (Leave it as is if the database runs on this host.) +# connection = sqlite:////var/lib/neutron/neutron.sqlite +#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron + +# The SQLAlchemy connection string used to connect to the slave database +slave_connection = + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +max_retries = 10 + +# Database reconnection interval in seconds - if the initial connection to the +# database fails +retry_interval = 10 + +# Minimum number of SQL connections to keep open in a pool +min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +max_pool_size = 100 + +# Timeout in seconds before idle sql connections are reaped +idle_timeout = 3600 + +# If set, use this value for max_overflow with sqlalchemy +max_overflow = 100 + +# Verbosity of SQL debugging information. 0=None, 100=Everything +connection_debug = 0 + +# Add python stack traces to SQL as comment strings +connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +pool_timeout = 10 + +[service_providers] +# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. +# Must be in form: +# service_provider=::[:default] +# List of allowed service types includes LOADBALANCER, FIREWALL, VPN +# Combination of and must be unique; must also be unique +# This is multiline option, example for default provider: +# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default +# example of non-default provider: +# service_provider=FIREWALL:name2:firewall_driver_path +# --- Reference implementations --- +service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default +service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default +# In order to activate Radware's lbaas driver you need to uncomment the next line. +# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. +# Otherwise comment the HA Proxy line +# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default +# uncomment the following line to make the 'netscaler' LBaaS provider available. +# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver +# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. +# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default +# Uncomment the line below to use Embrane heleos as Load Balancer service provider. +# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron_init.sh b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron_init.sh new file mode 100644 index 0000000..b92e202 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/neutron_init.sh @@ -0,0 +1,4 @@ +# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True + +# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}} + diff --git a/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/nova.conf b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/nova.conf new file mode 100644 index 0000000..9587073 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/neutron-network/templates/nova.conf @@ -0,0 +1,69 @@ +[DEFAULT] +dhcpbridge_flagfile=/etc/nova/nova.conf +dhcpbridge=/usr/bin/nova-dhcpbridge +logdir=/var/log/nova +state_path=/var/lib/nova +lock_path=/var/lock/nova +force_dhcp_release=True +iscsi_helper=tgtadm +libvirt_use_virtio_for_bridges=True +connection_type=libvirt +root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf +verbose={{ VERBOSE}} +debug={{ DEBUG }} +ec2_private_dns_show_ip=True +api_paste_config=/etc/nova/api-paste.ini +volumes_path=/var/lib/nova/volumes +enabled_apis=ec2,osapi_compute,metadata + +vif_plugging_is_fatal: false +vif_plugging_timeout: 0 + +auth_strategy = keystone + +rpc_backend = rabbit +rabbit_host = {{ rabbit_host }} +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} + +my_ip = {{ internal_ip }} +vnc_enabled = True +vncserver_listen = {{ internal_ip }} +vncserver_proxyclient_address = {{ internal_ip }} +novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html + +novncproxy_host = {{ internal_ip }} +novncproxy_port = 6080 + +network_api_class = nova.network.neutronv2.api.API +linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver +firewall_driver = nova.virt.firewall.NoopFirewallDriver +security_group_api = neutron + +instance_usage_audit = True +instance_usage_audit_period = hour +notify_on_state_change = vm_and_task_state +notification_driver = nova.openstack.common.notifier.rpc_notifier +notification_driver = ceilometer.compute.nova_notifier + +[database] +# The SQLAlchemy connection string used to connect to the database +connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova + +[keystone_authtoken] +auth_uri = http://{{ HA_VIP }}:5000/2.0 +identity_uri = http://{{ HA_VIP }}:35357 +admin_tenant_name = service +admin_user = nova +admin_password = {{ NOVA_PASS }} + +[glance] +host = {{ HA_VIP }} + +[neutron] +url = http://{{ HA_VIP }}:9696 +auth_strategy = keystone +admin_tenant_name = service +admin_username = neutron +admin_password = {{ NEUTRON_PASS }} +admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-compute/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/nova-compute/handlers/main.yml new file mode 100644 index 0000000..c135003 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/nova-compute/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: restart nova-compute + service: name=nova-compute state=restarted enabled=yes diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-compute/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/nova-compute/tasks/main.yml new file mode 100644 index 0000000..51c8dfa --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/nova-compute/tasks/main.yml @@ -0,0 +1,21 @@ +--- +- name: install nova-compute related packages + apt: name=nova-compute-kvm state=present force=yes + +- name: update nova-compute conf + template: src={{ item }} dest=/etc/nova/{{ item }} + with_items: + - nova.conf + - nova-compute.conf + notify: + - restart nova-compute + +- name: generate neutron controll service list + shell: echo {{ item }} >> /opt/service + with_items: + - nova-compute + +- meta: flush_handlers + +- name: remove nova sqlite db + shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.removed diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-compute/templates/nova-compute.conf b/compass/deploy/ansible/openstack_juno/roles/nova-compute/templates/nova-compute.conf new file mode 100644 index 0000000..401dee7 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/nova-compute/templates/nova-compute.conf @@ -0,0 +1,7 @@ +[DEFAULT] +compute_driver=libvirt.LibvirtDriver +force_raw_images = true +[libvirt] +virt_type=qemu +images_type = raw +mem_stats_period_seconds=0 diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-compute/templates/nova.conf b/compass/deploy/ansible/openstack_juno/roles/nova-compute/templates/nova.conf new file mode 100644 index 0000000..4988cb0 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/nova-compute/templates/nova.conf @@ -0,0 +1,73 @@ +[DEFAULT] +dhcpbridge_flagfile=/etc/nova/nova.conf +dhcpbridge=/usr/bin/nova-dhcpbridge +logdir=/var/log/nova +state_path=/var/lib/nova +lock_path=/var/lock/nova +force_dhcp_release=True +iscsi_helper=tgtadm +libvirt_use_virtio_for_bridges=True +connection_type=libvirt +root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf +verbose={{ VERBOSE}} +debug={{ DEBUG }} +ec2_private_dns_show_ip=True +api_paste_config=/etc/nova/api-paste.ini +volumes_path=/var/lib/nova/volumes +enabled_apis=ec2,osapi_compute,metadata + +vif_plugging_is_fatal: false +vif_plugging_timeout: 0 + +auth_strategy = keystone + +rpc_backend = rabbit +rabbit_host = {{ rabbit_host }} +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} + +my_ip = {{ internal_ip }} +vnc_enabled = True +vncserver_listen = {{ internal_ip }} +vncserver_proxyclient_address = {{ internal_ip }} +novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html + +novncproxy_host = {{ internal_ip }} +novncproxy_port = 6080 + +network_api_class = nova.network.neutronv2.api.API +linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver +firewall_driver = nova.virt.firewall.NoopFirewallDriver +security_group_api = neutron + +instance_usage_audit = True +instance_usage_audit_period = hour +notify_on_state_change = vm_and_task_state +notification_driver = nova.openstack.common.notifier.rpc_notifier +notification_driver = ceilometer.compute.nova_notifier + +[database] +# The SQLAlchemy connection string used to connect to the database +connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova + +[conductor] +manager = nova.conductor.manager.ConductorManager +topic = conductor + +[keystone_authtoken] +auth_uri = http://{{ HA_VIP }}:5000/2.0 +identity_uri = http://{{ HA_VIP }}:35357 +admin_tenant_name = service +admin_user = nova +admin_password = {{ NOVA_PASS }} + +[glance] +host = {{ HA_VIP }} + +[neutron] +url = http://{{ HA_VIP }}:9696 +auth_strategy = keystone +admin_tenant_name = service +admin_username = neutron +admin_password = {{ NEUTRON_PASS }} +admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/handlers/main.yml b/compass/deploy/ansible/openstack_juno/roles/nova-controller/handlers/main.yml new file mode 100644 index 0000000..b4c1585 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/nova-controller/handlers/main.yml @@ -0,0 +1,24 @@ +--- +- name: restart nova-api + service: name=nova-api state=restarted enabled=yes + +- name: restart nova-cert + service: name=nova-cert state=restarted enabled=yes + +- name: restart nova-consoleauth + service: name=nova-consoleauth state=restarted enabled=yes + +- name: restart nova-scheduler + service: name=nova-scheduler state=restarted enabled=yes + +- name: restart nova-conductor + service: name=nova-conductor state=restarted enabled=yes + +- name: restart nova-novncproxy + service: name=nova-novncproxy state=restarted enabled=yes + +- name: remove nova-sqlite-db + shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.db.removed + +- name: restart neutron-server + service: name=neutron-server state=restarted enabled=yes diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/main.yml new file mode 100644 index 0000000..72a9f4d --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- include: nova_install.yml + tags: + - install + - nova_install + - nova + +- include: nova_config.yml + when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == '' + tags: + - config + - nova_config + - nova diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/nova_config.yml b/compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/nova_config.yml new file mode 100644 index 0000000..62351fa --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/nova_config.yml @@ -0,0 +1,16 @@ +--- +- name: nova db sync + command: su -s /bin/sh -c "nova-manage db sync" nova + register: result + until: result.rc == 0 + retries: 5 + delay: 3 + notify: + - restart nova-api + - restart nova-cert + - restart nova-consoleauth + - restart nova-scheduler + - restart nova-conductor + - restart nova-novncproxy + +- meta: flush_handlers diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/nova_install.yml b/compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/nova_install.yml new file mode 100644 index 0000000..a1cded5 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/nova-controller/tasks/nova_install.yml @@ -0,0 +1,35 @@ +--- +- name: install nova related packages + apt: name={{ item }} state=present force=yes + with_items: + - nova-api + - nova-cert + - nova-conductor + - nova-consoleauth + - nova-novncproxy + - nova-scheduler + - python-novaclient + - python-oslo.rootwrap + +- name: generate nova controll service list + shell: echo {{ item }} >> /opt/service + with_items: + - nova-api + - nova-cert + - nova-conductor + - nova-consoleauth + - nova-novncproxy + - nova-scheduler + +- name: update nova conf + template: src=nova.conf + dest=/etc/nova/nova.conf + backup=yes + notify: + - restart nova-api + - restart nova-cert + - restart nova-consoleauth + - restart nova-scheduler + - restart nova-conductor + - restart nova-novncproxy + - remove nova-sqlite-db diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/dhcp_agent.ini b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/dhcp_agent.ini new file mode 100644 index 0000000..19eb62e --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/dhcp_agent.ini @@ -0,0 +1,90 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = False +verbose = True + +# The DHCP agent will resync its state with Neutron to recover from any +# transient notification or rpc errors. The interval is number of +# seconds between attempts. +resync_interval = 5 + +# The DHCP agent requires an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP, +# BigSwitch/Floodlight) +interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Name of Open vSwitch bridge to use +# ovs_integration_bridge = br-int + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires +# no additional setup of the DHCP server. +dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +use_namespaces = True + +# The DHCP server can assist with providing metadata support on isolated +# networks. Setting this value to True will cause the DHCP server to append +# specific host routes to the DHCP request. The metadata service will only +# be activated when the subnet does not contain any router port. The guest +# instance must be configured to request host routes via DHCP (Option 121). +enable_isolated_metadata = False + +# Allows for serving metadata requests coming from a dedicated metadata +# access network whose cidr is 169.254.169.254/16 (or larger prefix), and +# is connected to a Neutron router from which the VMs send metadata +# request. In this case DHCP Option 121 will not be injected in VMs, as +# they will be able to reach 169.254.169.254 through a router. +# This option requires enable_isolated_metadata = True +enable_metadata_network = False + +# Number of threads to use during sync process. Should not exceed connection +# pool size configured on server. +# num_sync_threads = 4 + +# Location to store DHCP server config files +# dhcp_confs = $state_path/dhcp + +# Domain to use for building the hostnames +dhcp_domain = openstacklocal + +# Override the default dnsmasq settings with this file +# dnsmasq_config_file = +dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf + +# Comma-separated list of DNS servers which will be used by dnsmasq +# as forwarders. +# dnsmasq_dns_servers = + +# Limit number of leases to prevent a denial-of-service. +dnsmasq_lease_max = 16777216 + +# Location to DHCP lease relay UNIX domain socket +# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# dhcp_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the dhcp agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a dhcp server is disabled. +# dhcp_delete_namespaces = False + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/dnsmasq-neutron.conf new file mode 100644 index 0000000..7bcbd9d --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/dnsmasq-neutron.conf @@ -0,0 +1,2 @@ +dhcp-option-force=26,1454 + diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/etc/xorp/config.boot b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/etc/xorp/config.boot new file mode 100644 index 0000000..32caf96 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/etc/xorp/config.boot @@ -0,0 +1,25 @@ +interfaces { + restore-original-config-on-shutdown: false + interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { + description: "Internal pNodes interface" + disable: false + default-system-config + } +} + +protocols { + igmp { + disable: false + interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { + vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { + disable: false + version: 3 + } + } + traceoptions { + flag all { + disable: false + } + } + } +} diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/l3_agent.ini b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/l3_agent.ini new file mode 100644 index 0000000..b394c00 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/l3_agent.ini @@ -0,0 +1,81 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +# debug = False +verbose = True + +# L3 requires that an interface driver be set. Choose the one that best +# matches your plugin. +# interface_driver = + +# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC) +# that supports L3 agent +# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver +interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver + +# Use veth for an OVS interface or not. +# Support kernels with limited namespace support +# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. +# ovs_use_veth = False + +# Example of interface_driver option for LinuxBridge +# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver + +# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and +# iproute2 package that supports namespaces). +use_namespaces = True + +# If use_namespaces is set as False then the agent can only configure one router. + +# This is done by setting the specific router_id. +# router_id = + +# When external_network_bridge is set, each L3 agent can be associated +# with no more than one external network. This value should be set to the UUID +# of that external network. To allow L3 agent support multiple external +# networks, both the external_network_bridge and gateway_external_network_id +# must be left empty. +# gateway_external_network_id = + +# Indicates that this L3 agent should also handle routers that do not have +# an external network gateway configured. This option should be True only +# for a single agent in a Neutron deployment, and may be False for all agents +# if all routers must have an external network gateway +handle_internal_only_routers = True + +# Name of bridge used for external network traffic. This should be set to +# empty value for the linux bridge. when this parameter is set, each L3 agent +# can be associated with no more than one external network. +external_network_bridge = br-ex + +# TCP Port used by Neutron metadata server +metadata_port = 9697 + +# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 +# to disable this feature. +send_arp_for_ha = 3 + +# seconds between re-sync routers' data if needed +periodic_interval = 40 + +# seconds to start to sync routers' data after +# starting agent +periodic_fuzzy_delay = 5 + +# enable_metadata_proxy, which is true by default, can be set to False +# if the Nova metadata server is not available +# enable_metadata_proxy = True + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# router_delete_namespaces, which is false by default, can be set to True if +# namespaces can be deleted cleanly on the host running the L3 agent. +# Do not enable this until you understand the problem with the Linux iproute +# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and +# you are sure that your version of iproute does not suffer from the problem. +# If True, namespaces will be deleted when a router is destroyed. +# router_delete_namespaces = False + +# Timeout for ovs-vsctl commands. +# If the timeout expires, ovs commands will fail with ALARMCLOCK error. +# ovs_vsctl_timeout = 10 diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/metadata_agent.ini b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/metadata_agent.ini new file mode 100644 index 0000000..6badf28 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/metadata_agent.ini @@ -0,0 +1,46 @@ +[DEFAULT] +# Show debugging output in log (sets DEBUG log level output) +debug = True + +# The Neutron user information for accessing the Neutron API. +auth_url = http://{{ HA_VIP }}:5000/v2.0 +auth_region = RegionOne +# Turn off verification of the certificate for ssl +# auth_insecure = False +# Certificate Authority public key (CA cert) file for ssl +# auth_ca_cert = +admin_tenant_name = service +admin_user = neutron +admin_password = {{ NEUTRON_PASS }} + +# Network service endpoint type to pull from the keystone catalog +# endpoint_type = adminURL + +# IP address used by Nova metadata server +nova_metadata_ip = {{ HA_VIP }} + +# TCP Port used by Nova metadata server +nova_metadata_port = 8775 + +# When proxying metadata requests, Neutron signs the Instance-ID header with a +# shared secret to prevent spoofing. You may select any string for a secret, +# but it must match here and in the configuration used by the Nova Metadata +# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret +metadata_proxy_shared_secret = {{ METADATA_SECRET }} + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy + +# Number of separate worker processes for metadata server +# metadata_workers = 0 + +# Number of backlog requests to configure the metadata server socket with +# metadata_backlog = 128 + +# URL to connect to the cache backend. +# Example of URL using memory caching backend +# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5 +# default_ttl=0 parameter will cause cache entries to never expire. +# Otherwise default_ttl specifies time in seconds a cache entry is valid for. +# No cache is used in case no value is passed. +# cache_url = diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/ml2_conf.ini b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/ml2_conf.ini new file mode 100644 index 0000000..a790069 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/ml2_conf.ini @@ -0,0 +1,108 @@ +[ml2] +# (ListOpt) List of network type driver entrypoints to be loaded from +# the neutron.ml2.type_drivers namespace. +# +# type_drivers = local,flat,vlan,gre,vxlan +# Example: type_drivers = flat,vlan,gre,vxlan +type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }} + +# (ListOpt) Ordered list of network_types to allocate as tenant +# networks. The default value 'local' is useful for single-box testing +# but provides no connectivity between hosts. +# +# tenant_network_types = local +# Example: tenant_network_types = vlan,gre,vxlan +tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }} + +# (ListOpt) Ordered list of networking mechanism driver entrypoints +# to be loaded from the neutron.ml2.mechanism_drivers namespace. +# mechanism_drivers = +# Example: mechanism_drivers = openvswitch,mlnx +# Example: mechanism_drivers = arista +# Example: mechanism_drivers = cisco,logger +# Example: mechanism_drivers = openvswitch,brocade +# Example: mechanism_drivers = linuxbridge,brocade +mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }} + +[ml2_type_flat] +# (ListOpt) List of physical_network names with which flat networks +# can be created. Use * to allow flat networks with arbitrary +# physical_network names. +# +flat_networks = external +# Example:flat_networks = physnet1,physnet2 +# Example:flat_networks = * + +[ml2_type_vlan] +# (ListOpt) List of [::] tuples +# specifying physical_network names usable for VLAN provider and +# tenant networks, as well as ranges of VLAN tags on each +# physical_network available for allocation as tenant networks. +# +network_vlan_ranges = +# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 + +[ml2_type_gre] +# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation +tunnel_id_ranges = 1:1000 + +[ml2_type_vxlan] +# (ListOpt) Comma-separated list of : tuples enumerating +# ranges of VXLAN VNI IDs that are available for tenant network allocation. +# +vni_ranges = 1001:4095 + +# (StrOpt) Multicast group for the VXLAN interface. When configured, will +# enable sending all broadcast traffic to this multicast group. When left +# unconfigured, will disable multicast VXLAN mode. +# +vxlan_group = 239.1.1.1 +# Example: vxlan_group = 239.1.1.1 + +[securitygroup] +# Controls if neutron security group is enabled or not. +# It should be false when you use nova security group. +# enable_security_group = True +firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver +enable_security_group = True + +[database] +connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8 + +[ovs] +local_ip = {{ internal_ip }} +{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %} +integration_bridge = br-int +tunnel_bridge = br-tun +tunnel_id_ranges = 1001:4095 +tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }} +bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }} +{% endif %} + +[agent] +root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf +tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }} +{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %} +vxlan_udp_port = 4789 +{% endif %} +l2_population = False + +[odl] +{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} +network_vlan_ranges = 1001:4095 +tunnel_id_ranges = 1001:4095 +tun_peer_patch_port = patch-int +int_peer_patch_port = patch-tun +tenant_network_type = vxlan +tunnel_bridge = br-tun +integration_bridge = br-int +controllers = 10.1.0.15:8080:admin:admin +{% endif %} + +[ml2_odl] +{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} +username = {{ odl_username }} +password = {{ odl_password }} +url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron +{% endif %} + diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron-network.conf b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron-network.conf new file mode 100644 index 0000000..93be9cb --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron-network.conf @@ -0,0 +1,465 @@ +[DEFAULT] +# Print more verbose output (set logging level to INFO instead of default WARNING level). +verbose = {{ VERBOSE }} + +# Print debugging output (set logging level to DEBUG instead of default WARNING level). +debug = {{ DEBUG }} + +# Where to store Neutron state files. This directory must be writable by the +# user executing the agent. +state_path = /var/lib/neutron + +# Where to store lock files +lock_path = $state_path/lock + +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s +# log_date_format = %Y-%m-%d %H:%M:%S + +# use_syslog -> syslog +# log_file and log_dir -> log_dir/log_file +# (not log_file) and log_dir -> log_dir/{binary_name}.log +# use_stderr -> stderr +# (not user_stderr) and (not log_file) -> stdout +# publish_errors -> notification system + +# use_syslog = False +# syslog_log_facility = LOG_USER + +# use_stderr = True +# log_file = +log_dir = /var/log/neutron + +# publish_errors = False + +# Address to bind the API server to +bind_host = {{ network_server_host }} + +# Port the bind the API server to +bind_port = 9696 + +# Path to the extensions. Note that this can be a colon-separated list of +# paths. For example: +# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions +# The __path__ of neutron.extensions is appended to this, so if your +# extensions are in there you don't need to specify them here +# api_extensions_path = + +# (StrOpt) Neutron core plugin entrypoint to be loaded from the +# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the +# plugins included in the neutron source distribution. For compatibility with +# previous versions, the class name of a plugin can be specified instead of its +# entrypoint name. +# +#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin +core_plugin = ml2 +# Example: core_plugin = ml2 + +# (ListOpt) List of service plugin entrypoints to be loaded from the +# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of +# the plugins included in the neutron source distribution. For compatibility +# with previous versions, the class name of a plugin can be specified instead +# of its entrypoint name. +# +# service_plugins = +# Example: service_plugins = router,firewall,lbaas,vpnaas,metering +service_plugins = router + +# Paste configuration file +api_paste_config = api-paste.ini + +# The strategy to be used for auth. +# Supported values are 'keystone'(default), 'noauth'. +auth_strategy = keystone + +# Base MAC address. The first 3 octets will remain unchanged. If the +# 4h octet is not 00, it will also be used. The others will be +# randomly generated. +# 3 octet +# base_mac = fa:16:3e:00:00:00 +# 4 octet +# base_mac = fa:16:3e:4f:00:00 + +# Maximum amount of retries to generate a unique MAC address +# mac_generation_retries = 16 + +# DHCP Lease duration (in seconds) +dhcp_lease_duration = 86400 + +# Allow sending resource operation notification to DHCP agent +# dhcp_agent_notification = True + +# Enable or disable bulk create/update/delete operations +# allow_bulk = True +# Enable or disable pagination +# allow_pagination = False +# Enable or disable sorting +# allow_sorting = False +# Enable or disable overlapping IPs for subnets +# Attention: the following parameter MUST be set to False if Neutron is +# being used in conjunction with nova security groups +allow_overlapping_ips = True +# Ensure that configured gateway is on subnet +# force_gateway_on_subnet = False + + +# RPC configuration options. Defined in rpc __init__ +# The messaging module to use, defaults to kombu. +# rpc_backend = neutron.openstack.common.rpc.impl_kombu +rpc_backend = rabbit +rabbit_host = {{ rabbit_host }} +rabbit_password = {{ RABBIT_PASS }} + +# Size of RPC thread pool +rpc_thread_pool_size = 240 +# Size of RPC connection pool +rpc_conn_pool_size = 100 +# Seconds to wait for a response from call or multicall +rpc_response_timeout = 300 +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +rpc_cast_timeout = 300 +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. +# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception +# AMQP exchange to connect to if using RabbitMQ or QPID +# control_exchange = neutron + +# If passed, use a fake RabbitMQ provider +# fake_rabbit = False + +# Configuration options if sending notifications via kombu rpc (these are +# the defaults) +# SSL version to use (valid only if SSL enabled) +# kombu_ssl_version = +# SSL key file (valid only if SSL enabled) +# kombu_ssl_keyfile = +# SSL cert file (valid only if SSL enabled) +# kombu_ssl_certfile = +# SSL certification authority file (valid only if SSL enabled) +# kombu_ssl_ca_certs = +# Port where RabbitMQ server is running/listening +rabbit_port = 5672 +# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' +# rabbit_hosts = localhost:5672 +# User ID used for RabbitMQ connections +rabbit_userid = {{ RABBIT_USER }} +# Location of a virtual RabbitMQ installation. +# rabbit_virtual_host = / +# Maximum retries with trying to connect to RabbitMQ +# (the default of 0 implies an infinite retry count) +# rabbit_max_retries = 0 +# RabbitMQ connection retry interval +# rabbit_retry_interval = 1 +# Use HA queues in RabbitMQ (x-ha-policy: all). You need to +# wipe RabbitMQ database when changing this option. (boolean value) +# rabbit_ha_queues = false +# QPID +# rpc_backend=neutron.openstack.common.rpc.impl_qpid +# Qpid broker hostname +# qpid_hostname = localhost +# Qpid broker port +# qpid_port = 5672 +# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' +# qpid_hosts = localhost:5672 +# Username for qpid connection +# qpid_username = '' +# Password for qpid connection +# qpid_password = '' +# Space separated list of SASL mechanisms to use for auth +# qpid_sasl_mechanisms = '' +# Seconds between connection keepalive heartbeats +# qpid_heartbeat = 60 +# Transport to use, either 'tcp' or 'ssl' +# qpid_protocol = tcp +# Disable Nagle algorithm +# qpid_tcp_nodelay = True + +# ZMQ +# rpc_backend=neutron.openstack.common.rpc.impl_zmq +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. +# rpc_zmq_bind_address = * + +# ============ Notification System Options ===================== + +# Notifications can be sent when network/subnet/port are created, updated or deleted. +# There are three methods of sending notifications: logging (via the +# log_file directive), rpc (via a message queue) and +# noop (no notifications sent, the default) + +# Notification_driver can be defined multiple times +# Do nothing driver +# notification_driver = neutron.openstack.common.notifier.no_op_notifier +# Logging driver +# notification_driver = neutron.openstack.common.notifier.log_notifier +# RPC driver. +notification_driver = neutron.openstack.common.notifier.rpc_notifier + +# default_notification_level is used to form actual topic name(s) or to set logging level +default_notification_level = INFO + +# default_publisher_id is a part of the notification payload +# host = myhost.com +# default_publisher_id = $host + +# Defined in rpc_notifier, can be comma separated values. +# The actual topic names will be %s.%(default_notification_level)s +notification_topics = notifications + +# Default maximum number of items returned in a single response, +# value == infinite and value < 0 means no max limit, and value must +# be greater than 0. If the number of items requested is greater than +# pagination_max_limit, server will just return pagination_max_limit +# of number of items. +# pagination_max_limit = -1 + +# Maximum number of DNS nameservers per subnet +# max_dns_nameservers = 5 + +# Maximum number of host routes per subnet +# max_subnet_host_routes = 20 + +# Maximum number of fixed ips per port +# max_fixed_ips_per_port = 5 + +# =========== items for agent management extension ============= +# Seconds to regard the agent as down; should be at least twice +# report_interval, to be sure the agent is down for good +agent_down_time = 75 +# =========== end of items for agent management extension ===== + +# =========== items for agent scheduler extension ============= +# Driver to use for scheduling network to DHCP agent +network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler +# Driver to use for scheduling router to a default L3 agent +router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler +# Driver to use for scheduling a loadbalancer pool to an lbaas agent +# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler + +# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted +# networks to first DHCP agent which sends get_active_networks message to +# neutron server +# network_auto_schedule = True + +# Allow auto scheduling routers to L3 agent. It will schedule non-hosted +# routers to first L3 agent which sends sync_routers message to neutron server +# router_auto_schedule = True + +# Number of DHCP agents scheduled to host a network. This enables redundant +# DHCP agents for configured networks. +# dhcp_agents_per_network = 1 + +# =========== end of items for agent scheduler extension ===== + +# =========== WSGI parameters related to the API server ============== +# Number of separate worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as workers. The parent process manages them. +api_workers = 8 + +# Number of separate RPC worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as RPC workers. The parent process manages them. +# This feature is experimental until issues are addressed and testing has been +# enabled for various plugins for compatibility. +rpc_workers = 8 + +# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when +# starting API server. Not supported on OS X. +# tcp_keepidle = 600 + +# Number of seconds to keep retrying to listen +# retry_until_window = 30 + +# Number of backlog requests to configure the socket with. +# backlog = 4096 + +# Max header line to accommodate large tokens +# max_header_line = 16384 + +# Enable SSL on the API server +# use_ssl = False + +# Certificate file to use when starting API server securely +# ssl_cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +# ssl_key_file = /path/to/keyfile + +# CA certificate file to use when starting API server securely to +# verify connecting clients. This is an optional parameter only required if +# API clients need to authenticate to the API server using SSL certificates +# signed by a trusted CA +# ssl_ca_file = /path/to/cafile +# ======== end of WSGI parameters related to the API server ========== + + +# ======== neutron nova interactions ========== +# Send notification to nova when port status is active. +notify_nova_on_port_status_changes = True + +# Send notifications to nova when port data (fixed_ips/floatingips) change +# so nova can update it's cache. +notify_nova_on_port_data_changes = True + +# URL for connection to nova (Only supports one nova region currently). +nova_url = http://{{ HA_VIP }}:8774/v2 + +# Name of nova region to use. Useful if keystone manages more than one region +nova_region_name = RegionOne + +# Username for connection to nova in admin context +nova_admin_username = nova + +# The uuid of the admin nova tenant + +# Password for connection to nova in admin context. +nova_admin_password = {{ NOVA_PASS }} + +# Authorization URL for connection to nova in admin context. +nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 + +# Number of seconds between sending events to nova if there are any events to send +send_events_interval = 2 + +# ======== end of neutron nova interactions ========== + +[quotas] +# Default driver to use for quota checks +quota_driver = neutron.db.quota_db.DbQuotaDriver + +# Resource name(s) that are supported in quota features +quota_items = network,subnet,port + +# Default number of resource allowed per tenant. A negative value means +# unlimited. +default_quota = -1 + +# Number of networks allowed per tenant. A negative value means unlimited. +quota_network = 100 + +# Number of subnets allowed per tenant. A negative value means unlimited. +quota_subnet = 100 + +# Number of ports allowed per tenant. A negative value means unlimited. +quota_port = 8000 + +# Number of security groups allowed per tenant. A negative value means +# unlimited. +quota_security_group = 1000 + +# Number of security group rules allowed per tenant. A negative value means +# unlimited. +quota_security_group_rule = 1000 + +# Number of vips allowed per tenant. A negative value means unlimited. +# quota_vip = 10 + +# Number of pools allowed per tenant. A negative value means unlimited. +# quota_pool = 10 + +# Number of pool members allowed per tenant. A negative value means unlimited. +# The default is unlimited because a member is not a real resource consumer +# on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_member = -1 + +# Number of health monitors allowed per tenant. A negative value means +# unlimited. +# The default is unlimited because a health monitor is not a real resource +# consumer on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_health_monitors = -1 + +# Number of routers allowed per tenant. A negative value means unlimited. +# quota_router = 10 + +# Number of floating IPs allowed per tenant. A negative value means unlimited. +# quota_floatingip = 50 + +[agent] +# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real +# root filter facility. +# Change to "sudo" to skip the filtering and just run the comand directly +root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" + +# =========== items for agent management extension ============= +# seconds between nodes reporting state to server; should be less than +# agent_down_time, best if it is half or less than agent_down_time +report_interval = 30 + +# =========== end of items for agent management extension ===== + +[keystone_authtoken] +auth_uri = http://{{ HA_VIP }}:5000/v2.0 +identity_uri = http://{{ HA_VIP }}:35357 +admin_tenant_name = service +admin_user = neutron +admin_password = {{ NEUTRON_PASS }} +signing_dir = $state_path/keystone-signing + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@127.0.0.1:3306/neutron +# Replace 127.0.0.1 above with the IP address of the database used by the +# main neutron server. (Leave it as is if the database runs on this host.) +# connection = sqlite:////var/lib/neutron/neutron.sqlite +#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron + +# The SQLAlchemy connection string used to connect to the slave database +slave_connection = + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +max_retries = 10 + +# Database reconnection interval in seconds - if the initial connection to the +# database fails +retry_interval = 10 + +# Minimum number of SQL connections to keep open in a pool +min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +max_pool_size = 100 + +# Timeout in seconds before idle sql connections are reaped +idle_timeout = 3600 + +# If set, use this value for max_overflow with sqlalchemy +max_overflow = 100 + +# Verbosity of SQL debugging information. 0=None, 100=Everything +connection_debug = 0 + +# Add python stack traces to SQL as comment strings +connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +pool_timeout = 10 + +[service_providers] +# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. +# Must be in form: +# service_provider=::[:default] +# List of allowed service types includes LOADBALANCER, FIREWALL, VPN +# Combination of and must be unique; must also be unique +# This is multiline option, example for default provider: +# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default +# example of non-default provider: +# service_provider=FIREWALL:name2:firewall_driver_path +# --- Reference implementations --- +service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default +service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default +# In order to activate Radware's lbaas driver you need to uncomment the next line. +# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. +# Otherwise comment the HA Proxy line +# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default +# uncomment the following line to make the 'netscaler' LBaaS provider available. +# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver +# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. +# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default +# Uncomment the line below to use Embrane heleos as Load Balancer service provider. +# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron.conf b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron.conf new file mode 100644 index 0000000..1575367 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron.conf @@ -0,0 +1,466 @@ +[DEFAULT] +# Print more verbose output (set logging level to INFO instead of default WARNING level). +verbose = {{ VERBOSE }} + +# Print debugging output (set logging level to DEBUG instead of default WARNING level). +debug = {{ VERBOSE }} + +# Where to store Neutron state files. This directory must be writable by the +# user executing the agent. +state_path = /var/lib/neutron + +# Where to store lock files +lock_path = $state_path/lock + +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s +# log_date_format = %Y-%m-%d %H:%M:%S + +# use_syslog -> syslog +# log_file and log_dir -> log_dir/log_file +# (not log_file) and log_dir -> log_dir/{binary_name}.log +# use_stderr -> stderr +# (not user_stderr) and (not log_file) -> stdout +# publish_errors -> notification system + +# use_syslog = False +# syslog_log_facility = LOG_USER + +# use_stderr = True +# log_file = +log_dir = /var/log/neutron + +# publish_errors = False + +# Address to bind the API server to +bind_host = {{ network_server_host }} + +# Port the bind the API server to +bind_port = 9696 + +# Path to the extensions. Note that this can be a colon-separated list of +# paths. For example: +# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions +# The __path__ of neutron.extensions is appended to this, so if your +# extensions are in there you don't need to specify them here +# api_extensions_path = + +# (StrOpt) Neutron core plugin entrypoint to be loaded from the +# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the +# plugins included in the neutron source distribution. For compatibility with +# previous versions, the class name of a plugin can be specified instead of its +# entrypoint name. +# +#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin +core_plugin = ml2 +# Example: core_plugin = ml2 + +# (ListOpt) List of service plugin entrypoints to be loaded from the +# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of +# the plugins included in the neutron source distribution. For compatibility +# with previous versions, the class name of a plugin can be specified instead +# of its entrypoint name. +# +# service_plugins = +# Example: service_plugins = router,firewall,lbaas,vpnaas,metering +service_plugins = router + +# Paste configuration file +api_paste_config = api-paste.ini + +# The strategy to be used for auth. +# Supported values are 'keystone'(default), 'noauth'. +auth_strategy = keystone + +# Base MAC address. The first 3 octets will remain unchanged. If the +# 4h octet is not 00, it will also be used. The others will be +# randomly generated. +# 3 octet +# base_mac = fa:16:3e:00:00:00 +# 4 octet +# base_mac = fa:16:3e:4f:00:00 + +# Maximum amount of retries to generate a unique MAC address +# mac_generation_retries = 16 + +# DHCP Lease duration (in seconds) +dhcp_lease_duration = 86400 + +# Allow sending resource operation notification to DHCP agent +# dhcp_agent_notification = True + +# Enable or disable bulk create/update/delete operations +# allow_bulk = True +# Enable or disable pagination +# allow_pagination = False +# Enable or disable sorting +# allow_sorting = False +# Enable or disable overlapping IPs for subnets +# Attention: the following parameter MUST be set to False if Neutron is +# being used in conjunction with nova security groups +allow_overlapping_ips = True +# Ensure that configured gateway is on subnet +# force_gateway_on_subnet = False + + +# RPC configuration options. Defined in rpc __init__ +# The messaging module to use, defaults to kombu. +# rpc_backend = neutron.openstack.common.rpc.impl_kombu +rpc_backend = rabbit +rabbit_host = {{ rabbit_host }} +rabbit_password = {{ RABBIT_PASS }} + +# Size of RPC thread pool +rpc_thread_pool_size = 240 +# Size of RPC connection pool +rpc_conn_pool_size = 100 +# Seconds to wait for a response from call or multicall +rpc_response_timeout = 300 +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +rpc_cast_timeout = 300 +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. +# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception +# AMQP exchange to connect to if using RabbitMQ or QPID +# control_exchange = neutron + +# If passed, use a fake RabbitMQ provider +# fake_rabbit = False + +# Configuration options if sending notifications via kombu rpc (these are +# the defaults) +# SSL version to use (valid only if SSL enabled) +# kombu_ssl_version = +# SSL key file (valid only if SSL enabled) +# kombu_ssl_keyfile = +# SSL cert file (valid only if SSL enabled) +# kombu_ssl_certfile = +# SSL certification authority file (valid only if SSL enabled) +# kombu_ssl_ca_certs = +# Port where RabbitMQ server is running/listening +rabbit_port = 5672 +# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' +# rabbit_hosts = localhost:5672 +# User ID used for RabbitMQ connections +rabbit_userid = {{ RABBIT_USER }} +# Location of a virtual RabbitMQ installation. +# rabbit_virtual_host = / +# Maximum retries with trying to connect to RabbitMQ +# (the default of 0 implies an infinite retry count) +# rabbit_max_retries = 0 +# RabbitMQ connection retry interval +# rabbit_retry_interval = 1 +# Use HA queues in RabbitMQ (x-ha-policy: all). You need to +# wipe RabbitMQ database when changing this option. (boolean value) +# rabbit_ha_queues = false +# QPID +# rpc_backend=neutron.openstack.common.rpc.impl_qpid +# Qpid broker hostname +# qpid_hostname = localhost +# Qpid broker port +# qpid_port = 5672 +# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) +# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' +# qpid_hosts = localhost:5672 +# Username for qpid connection +# qpid_username = '' +# Password for qpid connection +# qpid_password = '' +# Space separated list of SASL mechanisms to use for auth +# qpid_sasl_mechanisms = '' +# Seconds between connection keepalive heartbeats +# qpid_heartbeat = 60 +# Transport to use, either 'tcp' or 'ssl' +# qpid_protocol = tcp +# Disable Nagle algorithm +# qpid_tcp_nodelay = True + +# ZMQ +# rpc_backend=neutron.openstack.common.rpc.impl_zmq +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. +# rpc_zmq_bind_address = * + +# ============ Notification System Options ===================== + +# Notifications can be sent when network/subnet/port are created, updated or deleted. +# There are three methods of sending notifications: logging (via the +# log_file directive), rpc (via a message queue) and +# noop (no notifications sent, the default) + +# Notification_driver can be defined multiple times +# Do nothing driver +# notification_driver = neutron.openstack.common.notifier.no_op_notifier +# Logging driver +# notification_driver = neutron.openstack.common.notifier.log_notifier +# RPC driver. +notification_driver = neutron.openstack.common.notifier.rpc_notifier + +# default_notification_level is used to form actual topic name(s) or to set logging level +default_notification_level = INFO + +# default_publisher_id is a part of the notification payload +# host = myhost.com +# default_publisher_id = $host + +# Defined in rpc_notifier, can be comma separated values. +# The actual topic names will be %s.%(default_notification_level)s +notification_topics = notifications + +# Default maximum number of items returned in a single response, +# value == infinite and value < 0 means no max limit, and value must +# be greater than 0. If the number of items requested is greater than +# pagination_max_limit, server will just return pagination_max_limit +# of number of items. +# pagination_max_limit = -1 + +# Maximum number of DNS nameservers per subnet +# max_dns_nameservers = 5 + +# Maximum number of host routes per subnet +# max_subnet_host_routes = 20 + +# Maximum number of fixed ips per port +# max_fixed_ips_per_port = 5 + +# =========== items for agent management extension ============= +# Seconds to regard the agent as down; should be at least twice +# report_interval, to be sure the agent is down for good +agent_down_time = 75 +# =========== end of items for agent management extension ===== + +# =========== items for agent scheduler extension ============= +# Driver to use for scheduling network to DHCP agent +network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler +# Driver to use for scheduling router to a default L3 agent +router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler +# Driver to use for scheduling a loadbalancer pool to an lbaas agent +# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler + +# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted +# networks to first DHCP agent which sends get_active_networks message to +# neutron server +# network_auto_schedule = True + +# Allow auto scheduling routers to L3 agent. It will schedule non-hosted +# routers to first L3 agent which sends sync_routers message to neutron server +# router_auto_schedule = True + +# Number of DHCP agents scheduled to host a network. This enables redundant +# DHCP agents for configured networks. +# dhcp_agents_per_network = 1 + +# =========== end of items for agent scheduler extension ===== + +# =========== WSGI parameters related to the API server ============== +# Number of separate worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as workers. The parent process manages them. +api_workers = 8 + +# Number of separate RPC worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as RPC workers. The parent process manages them. +# This feature is experimental until issues are addressed and testing has been +# enabled for various plugins for compatibility. +rpc_workers = 8 + +# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when +# starting API server. Not supported on OS X. +# tcp_keepidle = 600 + +# Number of seconds to keep retrying to listen +# retry_until_window = 30 + +# Number of backlog requests to configure the socket with. +# backlog = 4096 + +# Max header line to accommodate large tokens +# max_header_line = 16384 + +# Enable SSL on the API server +# use_ssl = False + +# Certificate file to use when starting API server securely +# ssl_cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +# ssl_key_file = /path/to/keyfile + +# CA certificate file to use when starting API server securely to +# verify connecting clients. This is an optional parameter only required if +# API clients need to authenticate to the API server using SSL certificates +# signed by a trusted CA +# ssl_ca_file = /path/to/cafile +# ======== end of WSGI parameters related to the API server ========== + + +# ======== neutron nova interactions ========== +# Send notification to nova when port status is active. +notify_nova_on_port_status_changes = True + +# Send notifications to nova when port data (fixed_ips/floatingips) change +# so nova can update it's cache. +notify_nova_on_port_data_changes = True + +# URL for connection to nova (Only supports one nova region currently). +nova_url = http://{{ HA_VIP }}:8774/v2 + +# Name of nova region to use. Useful if keystone manages more than one region +nova_region_name = RegionOne + +# Username for connection to nova in admin context +nova_admin_username = nova + +# The uuid of the admin nova tenant +nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }} + +# Password for connection to nova in admin context. +nova_admin_password = {{ NOVA_PASS }} + +# Authorization URL for connection to nova in admin context. +nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 + +# Number of seconds between sending events to nova if there are any events to send +send_events_interval = 2 + +# ======== end of neutron nova interactions ========== + +[quotas] +# Default driver to use for quota checks +quota_driver = neutron.db.quota_db.DbQuotaDriver + +# Resource name(s) that are supported in quota features +quota_items = network,subnet,port + +# Default number of resource allowed per tenant. A negative value means +# unlimited. +default_quota = -1 + +# Number of networks allowed per tenant. A negative value means unlimited. +quota_network = 100 + +# Number of subnets allowed per tenant. A negative value means unlimited. +quota_subnet = 100 + +# Number of ports allowed per tenant. A negative value means unlimited. +quota_port = 8000 + +# Number of security groups allowed per tenant. A negative value means +# unlimited. +quota_security_group = 1000 + +# Number of security group rules allowed per tenant. A negative value means +# unlimited. +quota_security_group_rule = 1000 + +# Number of vips allowed per tenant. A negative value means unlimited. +# quota_vip = 10 + +# Number of pools allowed per tenant. A negative value means unlimited. +# quota_pool = 10 + +# Number of pool members allowed per tenant. A negative value means unlimited. +# The default is unlimited because a member is not a real resource consumer +# on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_member = -1 + +# Number of health monitors allowed per tenant. A negative value means +# unlimited. +# The default is unlimited because a health monitor is not a real resource +# consumer on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_health_monitors = -1 + +# Number of routers allowed per tenant. A negative value means unlimited. +# quota_router = 10 + +# Number of floating IPs allowed per tenant. A negative value means unlimited. +# quota_floatingip = 50 + +[agent] +# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real +# root filter facility. +# Change to "sudo" to skip the filtering and just run the comand directly +root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" + +# =========== items for agent management extension ============= +# seconds between nodes reporting state to server; should be less than +# agent_down_time, best if it is half or less than agent_down_time +report_interval = 30 + +# =========== end of items for agent management extension ===== + +[keystone_authtoken] +auth_uri = http://{{ HA_VIP }}:5000/v2.0 +identity_uri = http://{{ HA_VIP }}:35357 +admin_tenant_name = service +admin_user = neutron +admin_password = {{ NEUTRON_PASS }} +signing_dir = $state_path/keystone-signing + +[database] +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@127.0.0.1:3306/neutron +# Replace 127.0.0.1 above with the IP address of the database used by the +# main neutron server. (Leave it as is if the database runs on this host.) +# connection = sqlite:////var/lib/neutron/neutron.sqlite +#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron + +# The SQLAlchemy connection string used to connect to the slave database +slave_connection = + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +max_retries = 10 + +# Database reconnection interval in seconds - if the initial connection to the +# database fails +retry_interval = 10 + +# Minimum number of SQL connections to keep open in a pool +min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +max_pool_size = 100 + +# Timeout in seconds before idle sql connections are reaped +idle_timeout = 3600 + +# If set, use this value for max_overflow with sqlalchemy +max_overflow = 100 + +# Verbosity of SQL debugging information. 0=None, 100=Everything +connection_debug = 0 + +# Add python stack traces to SQL as comment strings +connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +pool_timeout = 10 + +[service_providers] +# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. +# Must be in form: +# service_provider=::[:default] +# List of allowed service types includes LOADBALANCER, FIREWALL, VPN +# Combination of and must be unique; must also be unique +# This is multiline option, example for default provider: +# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default +# example of non-default provider: +# service_provider=FIREWALL:name2:firewall_driver_path +# --- Reference implementations --- +service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default +service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default +# In order to activate Radware's lbaas driver you need to uncomment the next line. +# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. +# Otherwise comment the HA Proxy line +# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default +# uncomment the following line to make the 'netscaler' LBaaS provider available. +# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver +# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. +# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default +# Uncomment the line below to use Embrane heleos as Load Balancer service provider. +# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron_init.sh b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron_init.sh new file mode 100644 index 0000000..b92e202 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/neutron_init.sh @@ -0,0 +1,4 @@ +# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True + +# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}} + diff --git a/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/nova.conf b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/nova.conf new file mode 100644 index 0000000..c8991a3 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/nova-controller/templates/nova.conf @@ -0,0 +1,72 @@ +[DEFAULT] +dhcpbridge_flagfile=/etc/nova/nova.conf +dhcpbridge=/usr/bin/nova-dhcpbridge +logdir=/var/log/nova +state_path=/var/lib/nova +lock_path=/var/lock/nova +force_dhcp_release=True +iscsi_helper=tgtadm +libvirt_use_virtio_for_bridges=True +connection_type=libvirt +root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf +verbose={{ VERBOSE}} +debug={{ DEBUG }} +ec2_private_dns_show_ip=True +api_paste_config=/etc/nova/api-paste.ini +volumes_path=/var/lib/nova/volumes +enabled_apis=osapi_compute,metadata + +vif_plugging_is_fatal: false +vif_plugging_timeout: 0 + +auth_strategy = keystone + +rpc_backend = rabbit +rabbit_host = {{ rabbit_host }} +rabbit_userid = {{ RABBIT_USER }} +rabbit_password = {{ RABBIT_PASS }} + +osapi_compute_listen={{ internal_ip }} +metadata_listen={{ internal_ip }} + +my_ip = {{ internal_ip }} +vnc_enabled = True +vncserver_listen = {{ internal_ip }} +vncserver_proxyclient_address = {{ internal_ip }} +novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html + +novncproxy_host = {{ internal_ip }} +novncproxy_port = 6080 + +network_api_class = nova.network.neutronv2.api.API +linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver +firewall_driver = nova.virt.firewall.NoopFirewallDriver +security_group_api = neutron + +instance_usage_audit = True +instance_usage_audit_period = hour +notify_on_state_change = vm_and_task_state +notification_driver = nova.openstack.common.notifier.rpc_notifier +notification_driver = ceilometer.compute.nova_notifier + +[database] +# The SQLAlchemy connection string used to connect to the database +connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova + +[keystone_authtoken] +auth_uri = http://{{ HA_VIP }}:5000/2.0 +identity_uri = http://{{ HA_VIP }}:35357 +admin_tenant_name = service +admin_user = nova +admin_password = {{ NOVA_PASS }} + +[glance] +host = {{ HA_VIP }} + +[neutron] +url = http://{{ HA_VIP }}:9696 +auth_strategy = keystone +admin_tenant_name = service +admin_username = neutron +admin_password = {{ NEUTRON_PASS }} +admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 diff --git a/compass/deploy/ansible/openstack_juno/roles/repo/tasks/main.yml b/compass/deploy/ansible/openstack_juno/roles/repo/tasks/main.yml new file mode 100644 index 0000000..21f4ef0 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/repo/tasks/main.yml @@ -0,0 +1,14 @@ +--- +- name: copy local sources.list + template: src=sources.list dest=/etc/apt/sources.list backup=yes + when: LOCAL_REPO is defined + +- name: copy deb packages + shell: cp -rf /opt/repo/pool/main/ /var/cache/apt/archive/ + ignore_errors: True + +- name: add juno cloudarchive + apt_repository: repo="{{ juno_cloud_archive }}" state=present + +- name: first update pkgs + apt: update_cache=yes diff --git a/compass/deploy/ansible/openstack_juno/roles/repo/templates/sources.list b/compass/deploy/ansible/openstack_juno/roles/repo/templates/sources.list new file mode 100644 index 0000000..8b062e7 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/roles/repo/templates/sources.list @@ -0,0 +1 @@ +{{ LOCAL_REPO }} diff --git a/compass/deploy/ansible/openstack_juno/single-controller.yml b/compass/deploy/ansible/openstack_juno/single-controller.yml new file mode 100644 index 0000000..15220ca --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/single-controller.yml @@ -0,0 +1,38 @@ +--- +- hosts: all + remote_user: root + sudo: true + roles: + - repo + +- hosts: controller + sudo: True + roles: + - common + - database + - mq + - keystone + - nova-controller + - neutron-controller + - dashboard + - cinder-controller + - glance + +- hosts: network + sudo: True + roles: + - common + - neutron-network + +- hosts: storage + sudo: True + roles: + - common + - cinder-volume + +- hosts: compute + sudo: True + roles: + - common + - nova-compute + - neutron-compute diff --git a/compass/deploy/ansible/openstack_juno/storage.yml b/compass/deploy/ansible/openstack_juno/storage.yml new file mode 100644 index 0000000..3c0aa41 --- /dev/null +++ b/compass/deploy/ansible/openstack_juno/storage.yml @@ -0,0 +1,8 @@ +--- +- hosts: all + remote_user: vagrant + sudo: True + roles: + - repo + - common + - cinder-volume diff --git a/compass/deploy/conf/base.conf b/compass/deploy/conf/base.conf index 8362b9a..bb9d66c 100644 --- a/compass/deploy/conf/base.conf +++ b/compass/deploy/conf/base.conf @@ -1,3 +1,4 @@ +export COMPASS_SERVER=10.1.0.12 export COMPASS_SERVER_URL="http://10.1.0.12/api" export COMPASS_USER_EMAIL="admin@huawei.com" export COMPASS_USER_PASSWORD="admin" diff --git a/compass/deploy/conf/cluster.conf b/compass/deploy/conf/cluster.conf new file mode 100644 index 0000000..4f43027 --- /dev/null +++ b/compass/deploy/conf/cluster.conf @@ -0,0 +1,20 @@ +export VIRT_NUMBER=5 +export VIRT_CPUS=4 +export VIRT_MEM=16384 +export VIRT_DISK=30G +export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*' +#export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*' +export ADAPTER_NAME="openstack_juno" +export ADAPTER_TARGET_SYSTEM_PATTERN="^openstack$" +export ADAPTER_FLAVOR_PATTERN="HA-ansible-multinodes" +export HOSTNAMES="host1,host2,host3,host4,host5" +export HOST_ROLES="host1=controller,ha;host2=controller,ha;host3=controller,ha;host4=compute;host5=compute" +export DEFAULT_ROLES="" +export SWITCH_IPS="1.1.1.1" +export SWITCH_CREDENTIAL="version=2c,community=public" +export DEPLOYMENT_TIMEOUT="150" +export POLL_SWITCHES_FLAG="nopoll_switches" +export DASHBOARD_URL="" +export REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +source ${REGTEST_DIR}/base.conf +export VIP="10.1.0.222" diff --git a/compass/deploy/conf/five.conf b/compass/deploy/conf/five.conf index e63e514..32981aa 100644 --- a/compass/deploy/conf/five.conf +++ b/compass/deploy/conf/five.conf @@ -1,6 +1,6 @@ export VIRT_NUMBER=5 export VIRT_CPUS=4 -export VIRT_MEM=4096 +export VIRT_MEM=16384 export VIRT_DISK=30G export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*' #export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*' diff --git a/compass/deploy/deploy-vm.sh b/compass/deploy/deploy-vm.sh index 18857cd..41ef209 100644 --- a/compass/deploy/deploy-vm.sh +++ b/compass/deploy/deploy-vm.sh @@ -16,6 +16,12 @@ fi cp bin/switch_virtualenv.py.template bin/switch_virtualenv.py sed -i "s|\$PythonHome|$VIRTUAL_ENV|g" bin/switch_virtualenv.py #source ../compass-install/ci/allinone.conf +/usr/bin/expect ${SCRIPT_DIR}/../deploy/remote_excute.exp \ + "ssh root@${COMPASS_SERVER} mkdir -p /opt/compass/bin/ansible_callbacks" vagrant + +/usr/bin/expect ${SCRIPT_DIR}/../deploy/remote_excute.exp \ + "scp -r ${SCRIPT_DIR}/../deploy/status_callback.py root@${COMPASS_SERVER}:/opt/compass/bin/ansible_callbacks/status_callback.py" \ + vagrant bin/client.py --logfile= --loglevel=debug --logdir= --compass_server="${COMPASS_SERVER_URL}" \ --compass_user_email="${COMPASS_USER_EMAIL}" --compass_user_password="${COMPASS_USER_PASSWORD}" \ --cluster_name="${CLUSTER_NAME}" --language="${LANGUAGE}" --timezone="${TIMEZONE}" \ @@ -32,7 +38,8 @@ bin/client.py --logfile= --loglevel=debug --logdir= --compass_server="${COMPASS_ --network_mapping="${NETWORK_MAPPING}" --package_config_json_file="${PACKAGE_CONFIG_FILENAME}" \ --host_roles="${HOST_ROLES}" --default_roles="${DEFAULT_ROLES}" --switch_ips="${SWITCH_IPS}" \ --machines=${machines//\'} --switch_credential="${SWITCH_CREDENTIAL}" \ ---deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}" +--deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}" \ +--cluster_vip="${VIP}" deploy_result=$? tear_down_machines cd ../compass-install diff --git a/compass/deploy/remote_excute.exp b/compass/deploy/remote_excute.exp new file mode 100644 index 0000000..9dd112b --- /dev/null +++ b/compass/deploy/remote_excute.exp @@ -0,0 +1,23 @@ +#!/usr/bin/expect + +set command [lindex $argv 0] +set passwd [lindex $argv 1] + +eval spawn "$command" +set timeout 60 + +expect { + -re ".*es.*o.*" + { + exp_send "yes\r" + exp_continue + } + + -re ".*sword:" { + exp_send "$passwd\r" + + } + +} + +interact diff --git a/compass/deploy/status_callback.py b/compass/deploy/status_callback.py new file mode 100644 index 0000000..8619132 --- /dev/null +++ b/compass/deploy/status_callback.py @@ -0,0 +1,174 @@ +# (C) 2012, Michael DeHaan, + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import httplib +import json +import sys +import logging + +def task_error(host, data): + logging.info("task_error: host=%s,data=%s" % (host, data)) + + if type(data) == dict: + invocation = data.pop('invocation', {}) + + notify_host("localhost", host, "failed") + +class CallbackModule(object): + """ + logs playbook results, per host, in /var/log/ansible/hosts + """ + + def on_any(self, *args, **kwargs): + pass + + def runner_on_failed(self, host, res, ignore_errors=False): + task_error(host, res) + + def runner_on_ok(self, host, res): + pass + + def runner_on_skipped(self, host, item=None): + pass + + def runner_on_unreachable(self, host, res): + pass + + def runner_on_no_hosts(self): + pass + + def runner_on_async_poll(self, host, res, jid, clock): + pass + + def runner_on_async_ok(self, host, res, jid): + pass + + def runner_on_async_failed(self, host, res, jid): + task_error(host, res) + + def playbook_on_start(self): + pass + + def playbook_on_notify(self, host, handler): + pass + + def playbook_on_no_hosts_matched(self): + pass + + def playbook_on_no_hosts_remaining(self): + pass + + def playbook_on_task_start(self, name, is_conditional): + pass + + def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): + pass + + def playbook_on_setup(self): + pass + + def playbook_on_import_for_host(self, host, imported_file): + pass + + def playbook_on_not_import_for_host(self, host, missing_file): + pass + + def playbook_on_play_start(self, name): + pass + + def playbook_on_stats(self, stats): + logging.info("playbook_on_stats enter") + hosts = sorted(stats.processed.keys()) + host_vars = self.playbook.inventory.get_variables(hosts[0]) + cluster_name = host_vars['cluster_name'] + failures = False + unreachable = False + + for host in hosts: + summary = stats.summarize(host) + + if summary['failures'] > 0: + failures = True + if summary['unreachable'] > 0: + unreachable = True + + if failures or unreachable: + for host in hosts: + notify_host("localhost", host, "error") + return + + for host in hosts: + clusterhost_name = host + "." + cluster_name + notify_host("localhost", clusterhost_name, "succ") + + +def raise_for_status(resp): + if resp.status < 200 or resp.status > 300: + raise RuntimeError("%s, %s, %s" % (resp.status, resp.reason, resp.read())) + +def auth(conn): + credential = {} + credential['email'] = "admin@huawei.com" + credential['password'] = "admin" + url = "/api/users/token" + headers = {"Content-type": "application/json", + "Accept": "*/*"} + conn.request("POST", url, json.dumps(credential), headers) + resp = conn.getresponse() + + raise_for_status(resp) + return json.loads(resp.read())["token"] + +def notify_host(compass_host, host, status): + if status == "succ": + body = {"ready": True} + url = "/api/clusterhosts/%s/state_internal" % host + elif status == "error": + body = {"state": "ERROR"} + host = host.strip("host") + url = "/api/clusterhosts/%s/state" % host + else: + logging.error("notify_host: host %s with status %s is not supported" \ + % (host, status)) + return + + headers = {"Content-type": "application/json", + "Accept": "*/*"} + + conn = httplib.HTTPConnection(compass_host, 80) + token = auth(conn) + headers["X-Auth-Token"] = token + logging.info("host=%s,url=%s,body=%s,headers=%s" % (compass_host,url,json.dumps(body),headers)) + conn.request("POST", url, json.dumps(body), headers) + resp = conn.getresponse() + try: + raise_for_status(resp) + logging.info("notify host status success!!! status=%s, body=%s" % (resp.status, resp.read())) + except Exception as e: + logging.error("http request failed %s" % str(e)) + raise + finally: + conn.close() + +if __name__ == "__main__": + if len(sys.argv) != 3: + logging.error("params: host, status is need") + sys.exit(1) + + host = sys.argv[1] + status = sys.argv[2] + notify_host(host, status) -- cgit 1.2.3-korg From fbac78cb5277b044f3318c831d4da92663097a6c Mon Sep 17 00:00:00 2001 From: Szilard Cserey Date: Wed, 17 Jun 2015 12:14:54 +0200 Subject: Fuel Config Reap + Additional Refactoring for Autodeployment 1. Refactor the whole autodeployment code in such a way that the preparation of Fuel VM + networking and the autodeployment itself can be executed all at once 2. Functionality added that allows reaping of Fuel configuration from an existing environment and create DHA and DEA configuration files from it JIRA: [BGS-2] Create Fuel deployment script Change-Id: Ia22ae9b050085aaa4cadb4ee6c7bfd556c4bc18a Signed-off-by: Szilard Cserey --- fuel/deploy/README.txt | 106 ++- .../conf/ericsson_montreal_lab/ha/dea.yaml | 993 +++++++++++++++++++++ .../conf/ericsson_montreal_lab/ha/dha.yaml | 54 ++ .../conf/ericsson_montreal_lab/multinode/dea.yaml | 987 ++++++++++++++++++++ .../conf/ericsson_montreal_lab/multinode/dha.yaml | 54 ++ .../conf/linux_foundation_lab/ha/dea.yaml | 950 ++++++++++++++++++++ .../conf/linux_foundation_lab/ha/dha.yaml | 49 + .../conf/linux_foundation_lab/multinode/dea.yaml | 950 ++++++++++++++++++++ .../conf/linux_foundation_lab/multinode/dha.yaml | 49 + fuel/deploy/baremetal/dea.yaml | 982 -------------------- fuel/deploy/baremetal/dha.yaml | 53 -- fuel/deploy/baremetal/vm/vFuel | 87 -- fuel/deploy/baremetal/vms/fuel.xml | 87 ++ fuel/deploy/baremetal/vms/fuel_lf.xml | 93 ++ fuel/deploy/cloud/configure_nodes.py | 6 +- fuel/deploy/cloud/deploy.py | 44 +- fuel/deploy/cloud/deployment.py | 12 +- fuel/deploy/common.py | 21 + fuel/deploy/dea.py | 2 + fuel/deploy/deploy.py | 150 ++-- fuel/deploy/deploy_env.py | 6 +- fuel/deploy/dha_adapters/hardware_adapter.py | 9 +- fuel/deploy/dha_adapters/hp_adapter.py | 4 +- fuel/deploy/dha_adapters/ipmi_adapter.py | 50 +- fuel/deploy/dha_adapters/libvirt_adapter.py | 6 - fuel/deploy/environments/__init__.py | 1 + fuel/deploy/environments/execution_environment.py | 67 ++ fuel/deploy/environments/libvirt_environment.py | 93 ++ fuel/deploy/environments/virtual_fuel.py | 60 ++ fuel/deploy/install-ubuntu-packages.sh | 18 - fuel/deploy/install_fuel_master.py | 33 +- fuel/deploy/libvirt/conf/ha/dea.yaml | 976 ++++++++++++++++++++ fuel/deploy/libvirt/conf/ha/dha.yaml | 42 + fuel/deploy/libvirt/conf/multinode/dea.yaml | 976 ++++++++++++++++++++ fuel/deploy/libvirt/conf/multinode/dha.yaml | 42 + fuel/deploy/libvirt/dea.yaml | 976 -------------------- fuel/deploy/libvirt/dha.yaml | 80 -- fuel/deploy/libvirt/networks/fuel1 | 12 - fuel/deploy/libvirt/networks/fuel1.xml | 12 + fuel/deploy/libvirt/networks/fuel2 | 5 - fuel/deploy/libvirt/networks/fuel2.xml | 5 + fuel/deploy/libvirt/networks/fuel3 | 5 - fuel/deploy/libvirt/networks/fuel3.xml | 5 + fuel/deploy/libvirt/networks/fuel4 | 12 - fuel/deploy/libvirt/networks/fuel4.xml | 12 + fuel/deploy/libvirt/vms/compute | 91 -- fuel/deploy/libvirt/vms/compute.xml | 91 ++ fuel/deploy/libvirt/vms/controller | 90 -- fuel/deploy/libvirt/vms/controller.xml | 90 ++ fuel/deploy/libvirt/vms/fuel-master | 93 -- fuel/deploy/libvirt/vms/fuel.xml | 93 ++ fuel/deploy/reap.py | 330 +++++++ fuel/deploy/setup_environment.py | 165 ---- fuel/deploy/setup_execution_environment.py | 36 + fuel/deploy/setup_vfuel.py | 143 --- fuel/deploy/ssh_client.py | 10 +- 56 files changed, 7469 insertions(+), 2999 deletions(-) create mode 100644 fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dea.yaml create mode 100644 fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dha.yaml create mode 100644 fuel/deploy/baremetal/conf/ericsson_montreal_lab/multinode/dea.yaml create mode 100644 fuel/deploy/baremetal/conf/ericsson_montreal_lab/multinode/dha.yaml create mode 100644 fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dea.yaml create mode 100644 fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dha.yaml create mode 100644 fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dea.yaml create mode 100644 fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dha.yaml delete mode 100644 fuel/deploy/baremetal/dea.yaml delete mode 100644 fuel/deploy/baremetal/dha.yaml delete mode 100644 fuel/deploy/baremetal/vm/vFuel create mode 100644 fuel/deploy/baremetal/vms/fuel.xml create mode 100644 fuel/deploy/baremetal/vms/fuel_lf.xml create mode 100644 fuel/deploy/environments/__init__.py create mode 100644 fuel/deploy/environments/execution_environment.py create mode 100644 fuel/deploy/environments/libvirt_environment.py create mode 100644 fuel/deploy/environments/virtual_fuel.py delete mode 100755 fuel/deploy/install-ubuntu-packages.sh create mode 100644 fuel/deploy/libvirt/conf/ha/dea.yaml create mode 100644 fuel/deploy/libvirt/conf/ha/dha.yaml create mode 100644 fuel/deploy/libvirt/conf/multinode/dea.yaml create mode 100644 fuel/deploy/libvirt/conf/multinode/dha.yaml delete mode 100644 fuel/deploy/libvirt/dea.yaml delete mode 100644 fuel/deploy/libvirt/dha.yaml delete mode 100644 fuel/deploy/libvirt/networks/fuel1 create mode 100644 fuel/deploy/libvirt/networks/fuel1.xml delete mode 100644 fuel/deploy/libvirt/networks/fuel2 create mode 100644 fuel/deploy/libvirt/networks/fuel2.xml delete mode 100644 fuel/deploy/libvirt/networks/fuel3 create mode 100644 fuel/deploy/libvirt/networks/fuel3.xml delete mode 100644 fuel/deploy/libvirt/networks/fuel4 create mode 100644 fuel/deploy/libvirt/networks/fuel4.xml delete mode 100644 fuel/deploy/libvirt/vms/compute create mode 100644 fuel/deploy/libvirt/vms/compute.xml delete mode 100644 fuel/deploy/libvirt/vms/controller create mode 100644 fuel/deploy/libvirt/vms/controller.xml delete mode 100644 fuel/deploy/libvirt/vms/fuel-master create mode 100644 fuel/deploy/libvirt/vms/fuel.xml create mode 100644 fuel/deploy/reap.py delete mode 100644 fuel/deploy/setup_environment.py create mode 100644 fuel/deploy/setup_execution_environment.py delete mode 100644 fuel/deploy/setup_vfuel.py diff --git a/fuel/deploy/README.txt b/fuel/deploy/README.txt index d392f8f..6f322d0 100644 --- a/fuel/deploy/README.txt +++ b/fuel/deploy/README.txt @@ -1,71 +1,109 @@ -======== How to prepare and run the OPNFV Autodeployment ======= +======== PREREQUISITES ======== -in fuel/build/deploy run these: +the following applications and python modules are required to be installed: +- example for Ubuntu environment: +sudo apt-get install -y libvirt-bin qemu-kvm tightvncserver virt-manager +sshpass fuseiso genisoimage blackbox xterm python-pip +sudo restart libvirt-bin +sudo pip install pyyaml netaddr paramiko lxml scp ---- Step.1 Install prerequisites -sudo ./install-ubuntu-packages.sh +======== PREPARE and RUN the OPNFV Autodeployment ======== +--- Step.1 Prepare the DEA and DHA configuration files and the OPNFV ISO file +Make sure that you are using the right DEA - Deployment Environment Adapter and +DHA - Deployment Hardware Adapter configuration files, the ones provided are only templates +you will have to modify them according to your needs +- If wou wish to deploy OPNFV cloud environment on top of KVM/Libvirt + virtualization use as example the following configuration files: ---- Step.2-A If wou want to deploy OPNFV cloud environment on top of KVM/Libvirt virtualization - run the following environment setup script + => libvirt/conf/ha + dea.yaml + dha.yaml -sudo python setup_environment.py + => libvirt/conf/multinode + dea.yaml + dha.yaml -Example: - sudo python setup_environment.py /mnt/images dha.yaml +- If you wish to deploy OPNFV cloud environment on baremetal + use as example the following configuration files: + => baremetal/conf/ericsson_montreal_lab/ha + dea.yaml + dha.yaml + => baremetal/conf/ericsson_montreal_lab/multinode + dea.yaml + dha.yaml + => baremetal/conf/linux_foundation_lab/ha + dea.yaml + dha.yaml + => baremetal/conf/linux_foundation_lab/multinode + dea.yaml + dha.yaml ---- Step.2-B If you want to deploy OPNFV cloud environment on baremetal run the - following environment setup script -sudo python setup_vfuel.py +--- Step.2 Run Autodeployment: -Example: - sudo python setup_vfuel.py /mnt/images dha.yaml +usage: python deploy.py [-h] [-nf] + [iso_file] dea_file dha_file [storage_dir] + [pxe_bridge] +positional arguments: + iso_file ISO File [default: OPNFV.iso] + dea_file Deployment Environment Adapter: dea.yaml + dha_file Deployment Hardware Adapter: dha.yaml + storage_dir Storage Directory [default: images] + pxe_bridge Linux Bridge for booting up the Fuel Master VM [default: pxebr] -WARNING!: -setup_vfuel.py adds the following snippet into /etc/network/interfaces -making sure to replace in setup_vfuel.py interfafe 'p1p1.20' with your actual outbound -interface in order to provide network access to the Fuel master for DNS and NTP. +optional arguments: + -h, --help show this help message and exit + -nf Do not install Fuel Master (and Node VMs when using libvirt) -iface vfuelnet inet static - bridge_ports em1 - address 10.40.0.1 - netmask 255.255.255.0 - pre-down iptables -t nat -D POSTROUTING --out-interface p1p1.20 -j MASQUERADE -m comment --comment "vfuelnet" - pre-down iptables -D FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet" - post-up iptables -t nat -A POSTROUTING --out-interface p1p1.20 -j MASQUERADE -m comment --comment "vfuelnet" - post-up iptables -A FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet" +* WARNING: +If is not specified, Autodeployment will use +"/images" as default, and it will create it, +if it hasn't been created before +If is not specified, Autodeployment will use "pxebr" as default, +if the bridge does not exist, the application will terminate with an error message +IF is not specified, Autodeployment will use "/OPNFV.iso" +as default, if the iso file does not exist, the application will terminate with an error message + is not required for Autodeployment in virtual environment, even if it is specified +it will not be used at all ---- Step.3 Start Autodeployment -Make sure you use the right Deployment Environment Adapter and -Deployment Hardware Adaper configuration files: - - for baremetal: baremetal/dea.yaml baremetal/dha.yaml +* EXAMPLES: - - for libvirt: libvirt/dea.yaml libvirt/dha.yaml +- Install Fuel Master and deploy OPNFV Cloud from scratch on Baremetal Environment +sudo python deploy.py ~/ISO/opnfv.iso ~/CONF/baremetal/dea.yaml ~/CONF/baremetal/dha.yaml /mnt/images pxebr -sudo python deploy.py [-nf] -Example: - sudo python deploy.py ~/ISO/opnfv.iso baremetal/dea.yaml baremetal/dha.yaml +- Install Fuel Master and deploy OPNFV Cloud from scratch on Virtual Environment + +sudo python deploy.py ~/ISO/opnfv.iso ~/CONF/virtual/dea.yaml ~/CONF/virtual/dha.yaml /mnt/images + + + +- Deploy OPNFV Cloud on an already active Environment where Fuel Master VM is running + so no need to install Fuel again + +sudo python deploy.py -nf ~/CONF/baremetal/dea.yaml ~/CONF/baremetal/dha.yaml + +sudo python deploy.py -nf ~/CONF/virtual/dea.yaml ~/CONF/virtual/dha.yaml diff --git a/fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dea.yaml b/fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dea.yaml new file mode 100644 index 0000000..dc8014d --- /dev/null +++ b/fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dea.yaml @@ -0,0 +1,993 @@ +title: Deployment Environment Adapter (DEA) +# DEA API version supported +version: 1.1 +created: Tue May 5 15:33:07 UTC 2015 +comment: Test environment Ericsson Montreal +environment_name: opnfv +environment_mode: ha +wanted_release: Juno on Ubuntu 12.04.4 +nodes: +- id: 1 + interfaces: interfaces_1 + transformations: transformations_1 + role: ceph-osd,controller +- id: 2 + interfaces: interfaces_1 + transformations: transformations_1 + role: ceph-osd,controller +- id: 3 + interfaces: interfaces_1 + transformations: transformations_1 + role: ceph-osd,controller +- id: 4 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +- id: 5 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +- id: 6 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +fuel: + ADMIN_NETWORK: + ipaddress: 10.40.0.2 + netmask: 255.255.255.0 + dhcp_pool_start: 10.40.0.3 + dhcp_pool_end: 10.40.0.254 + DNS_UPSTREAM: 10.118.32.193 + DNS_DOMAIN: opnfvericsson.ca + DNS_SEARCH: opnfvericsson.ca + FUEL_ACCESS: + user: admin + password: admin + HOSTNAME: opnfv + NTP1: 10.118.34.219 + NTP2: + NTP3: +interfaces: + interfaces_1: + eth0: + - fuelweb_admin + eth2: + - public + - management + - storage + - private +transformations: + transformations_1: + - action: add-br + name: br-eth0 + - action: add-port + bridge: br-eth0 + name: eth0 + - action: add-br + name: br-eth1 + - action: add-port + bridge: br-eth1 + name: eth1 + - action: add-br + name: br-eth2 + - action: add-port + bridge: br-eth2 + name: eth2 + - action: add-br + name: br-eth3 + - action: add-port + bridge: br-eth3 + name: eth3 + - action: add-br + name: br-eth4 + - action: add-port + bridge: br-eth4 + name: eth4 + - action: add-br + name: br-eth5 + - action: add-port + bridge: br-eth5 + name: eth5 + - action: add-br + name: br-ex + - action: add-br + name: br-mgmt + - action: add-br + name: br-storage + - action: add-br + name: br-fw-admin + - action: add-br + name: br-prv + - action: add-patch + bridges: + - br-eth2 + - br-storage + tags: + - 220 + - 0 + vlan_ids: + - 220 + - 0 + - action: add-patch + bridges: + - br-eth2 + - br-mgmt + tags: + - 320 + - 0 + vlan_ids: + - 320 + - 0 + - action: add-patch + bridges: + - br-eth0 + - br-fw-admin + - action: add-patch + bridges: + - br-eth2 + - br-ex + tags: + - 20 + - 0 + vlan_ids: + - 20 + - 0 + - action: add-patch + bridges: + - br-eth2 + - br-prv + transformations_2: + - action: add-br + name: br-eth0 + - action: add-port + bridge: br-eth0 + name: eth0 + - action: add-br + name: br-eth1 + - action: add-port + bridge: br-eth1 + name: eth1 + - action: add-br + name: br-eth2 + - action: add-port + bridge: br-eth2 + name: eth2 + - action: add-br + name: br-eth3 + - action: add-port + bridge: br-eth3 + name: eth3 + - action: add-br + name: br-eth4 + - action: add-port + bridge: br-eth4 + name: eth4 + - action: add-br + name: br-eth5 + - action: add-port + bridge: br-eth5 + name: eth5 + - action: add-br + name: br-mgmt + - action: add-br + name: br-storage + - action: add-br + name: br-fw-admin + - action: add-br + name: br-prv + - action: add-patch + bridges: + - br-eth2 + - br-storage + tags: + - 220 + - 0 + vlan_ids: + - 220 + - 0 + - action: add-patch + bridges: + - br-eth2 + - br-mgmt + tags: + - 320 + - 0 + vlan_ids: + - 320 + - 0 + - action: add-patch + bridges: + - br-eth0 + - br-fw-admin + - action: add-patch + bridges: + - br-eth2 + - br-prv +opnfv: + compute: {} + controller: {} +network: + networking_parameters: + base_mac: fa:16:3e:00:00:00 + dns_nameservers: + - 10.118.32.193 + floating_ranges: + - - 10.118.34.226 + - 10.118.34.230 + gre_id_range: + - 2 + - 65535 + internal_cidr: 192.168.111.0/24 + internal_gateway: 192.168.111.1 + net_l23_provider: ovs + segmentation_type: vlan + vlan_range: + - 2022 + - 2023 + networks: + - cidr: 10.118.34.192/24 + gateway: 10.118.34.193 + ip_ranges: + - - 10.118.34.220 + - 10.118.34.225 + meta: + assign_vip: true + cidr: 10.118.34.192/24 + configurable: true + floating_range_var: floating_ranges + ip_range: + - 10.118.34.220 + - 10.118.34.225 + map_priority: 1 + name: public + notation: ip_ranges + render_addr_mask: public + render_type: null + use_gateway: true + vlan_start: null + name: public + vlan_start: null + - cidr: 192.168.0.0/24 + gateway: null + ip_ranges: + - - 192.168.0.2 + - 192.168.0.254 + meta: + assign_vip: true + cidr: 192.168.0.0/24 + configurable: true + map_priority: 2 + name: management + notation: cidr + render_addr_mask: internal + render_type: cidr + use_gateway: false + vlan_start: 320 + name: management + vlan_start: 320 + - cidr: 192.168.1.0/24 + gateway: null + ip_ranges: + - - 192.168.1.2 + - 192.168.1.254 + meta: + assign_vip: false + cidr: 192.168.1.0/24 + configurable: true + map_priority: 2 + name: storage + notation: cidr + render_addr_mask: storage + render_type: cidr + use_gateway: false + vlan_start: 220 + name: storage + vlan_start: 220 + - cidr: null + gateway: null + ip_ranges: [] + meta: + assign_vip: false + configurable: false + map_priority: 2 + name: private + neutron_vlan_range: true + notation: null + render_addr_mask: null + render_type: null + seg_type: vlan + use_gateway: false + vlan_start: null + name: private + vlan_start: null + - cidr: 10.40.0.0/24 + gateway: null + ip_ranges: + - - 10.40.0.3 + - 10.40.0.254 + meta: + assign_vip: false + configurable: false + map_priority: 0 + notation: ip_ranges + render_addr_mask: null + render_type: null + unmovable: true + use_gateway: true + name: fuelweb_admin + vlan_start: null +settings: + editable: + access: + email: + description: Email address for Administrator + label: email + type: text + value: admin@localhost + weight: 40 + metadata: + label: Access + weight: 10 + password: + description: Password for Administrator + label: password + type: password + value: admin + weight: 20 + tenant: + description: Tenant (project) name for Administrator + label: tenant + regex: + error: Invalid tenant name + source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).* + type: text + value: admin + weight: 30 + user: + description: Username for Administrator + label: username + regex: + error: Invalid username + source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).* + type: text + value: admin + weight: 10 + additional_components: + ceilometer: + description: If selected, Ceilometer component will be installed + label: Install Ceilometer + type: checkbox + value: false + weight: 40 + heat: + description: '' + label: '' + type: hidden + value: true + weight: 30 + metadata: + label: Additional Components + weight: 20 + murano: + description: If selected, Murano component will be installed + label: Install Murano + restrictions: + - cluster:net_provider != 'neutron' + type: checkbox + value: false + weight: 20 + sahara: + description: If selected, Sahara component will be installed + label: Install Sahara + type: checkbox + value: false + weight: 10 + common: + auth_key: + description: Public key(s) to include in authorized_keys on deployed nodes + label: Public Key + type: text + value: '' + weight: 70 + auto_assign_floating_ip: + description: If selected, OpenStack will automatically assign a floating IP + to a new instance + label: Auto assign floating IP + restrictions: + - cluster:net_provider == 'neutron' + type: checkbox + value: false + weight: 40 + compute_scheduler_driver: + label: Scheduler driver + type: radio + value: nova.scheduler.filter_scheduler.FilterScheduler + values: + - data: nova.scheduler.filter_scheduler.FilterScheduler + description: Currently the most advanced OpenStack scheduler. See the OpenStack + documentation for details. + label: Filter scheduler + - data: nova.scheduler.simple.SimpleScheduler + description: This is 'naive' scheduler which tries to find the least loaded + host + label: Simple scheduler + weight: 40 + debug: + description: Debug logging mode provides more information, but requires more + disk space. + label: OpenStack debug logging + type: checkbox + value: false + weight: 20 + disable_offload: + description: If set, generic segmentation offload (gso) and generic receive + offload (gro) on physical nics will be disabled. See ethtool man. + label: Disable generic offload on physical nics + restrictions: + - action: hide + condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type + == 'gre' + type: checkbox + value: true + weight: 80 + libvirt_type: + label: Hypervisor type + type: radio + value: kvm + values: + - data: kvm + description: Choose this type of hypervisor if you run OpenStack on hardware + label: KVM + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + - data: qemu + description: Choose this type of hypervisor if you run OpenStack on virtual + hosts. + label: QEMU + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + - data: vcenter + description: Choose this type of hypervisor if you run OpenStack in a vCenter + environment. + label: vCenter + restrictions: + - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider + == 'neutron' + weight: 30 + metadata: + label: Common + weight: 30 + nova_quota: + description: Quotas are used to limit CPU and memory usage for tenants. Enabling + quotas will increase load on the Nova database. + label: Nova quotas + type: checkbox + value: false + weight: 25 + resume_guests_state_on_host_boot: + description: Whether to resume previous guests state when the host reboots. + If enabled, this option causes guests assigned to the host to resume their + previous state. If the guest was running a restart will be attempted when + nova-compute starts. If the guest was not running previously, a restart + will not be attempted. + label: Resume guests state on host boot + type: checkbox + value: true + weight: 60 + use_cow_images: + description: For most cases you will want qcow format. If it's disabled, raw + image format will be used to run VMs. OpenStack with raw format currently + does not support snapshotting. + label: Use qcow format for images + type: checkbox + value: true + weight: 50 + corosync: + group: + description: '' + label: Group + type: text + value: 226.94.1.1 + weight: 10 + metadata: + label: Corosync + restrictions: + - action: hide + condition: 'true' + weight: 50 + port: + description: '' + label: Port + type: text + value: '12000' + weight: 20 + verified: + description: Set True only if multicast is configured correctly on router. + label: Need to pass network verification. + type: checkbox + value: false + weight: 10 + external_dns: + dns_list: + description: List of upstream DNS servers, separated by comma + label: DNS list + type: text + value: 10.118.32.193 + weight: 10 + metadata: + label: Upstream DNS + weight: 90 + external_ntp: + metadata: + label: Upstream NTP + weight: 100 + ntp_list: + description: List of upstream NTP servers, separated by comma + label: NTP servers list + type: text + value: 10.118.34.219 + weight: 10 + kernel_params: + kernel: + description: Default kernel parameters + label: Initial parameters + type: text + value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset + weight: 45 + metadata: + label: Kernel parameters + weight: 40 + neutron_mellanox: + metadata: + enabled: true + label: Mellanox Neutron components + toggleable: false + weight: 50 + plugin: + label: Mellanox drivers and SR-IOV plugin + type: radio + value: disabled + values: + - data: disabled + description: If selected, Mellanox drivers, Neutron and Cinder plugin will + not be installed. + label: Mellanox drivers and plugins disabled + restrictions: + - settings:storage.iser.value == true + - data: drivers_only + description: If selected, Mellanox Ethernet drivers will be installed to + support networking over Mellanox NIC. Mellanox Neutron plugin will not + be installed. + label: Install only Mellanox drivers + restrictions: + - settings:common.libvirt_type.value != 'kvm' + - data: ethernet + description: If selected, both Mellanox Ethernet drivers and Mellanox network + acceleration (Neutron) plugin will be installed. + label: Install Mellanox drivers and SR-IOV plugin + restrictions: + - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider + == 'neutron' and networking_parameters:segmentation_type == 'vlan') + weight: 60 + vf_num: + description: Note that one virtual function will be reserved to the storage + network, in case of choosing iSER. + label: Number of virtual NICs + restrictions: + - settings:neutron_mellanox.plugin.value != 'ethernet' + type: text + value: '16' + weight: 70 + nsx_plugin: + connector_type: + description: Default network transport type to use + label: NSX connector type + type: select + value: stt + values: + - data: gre + label: GRE + - data: ipsec_gre + label: GRE over IPSec + - data: stt + label: STT + - data: ipsec_stt + label: STT over IPSec + - data: bridge + label: Bridge + weight: 80 + l3_gw_service_uuid: + description: UUID for the default L3 gateway service to use with this cluster + label: L3 service UUID + regex: + error: Invalid L3 gateway service UUID + source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}' + type: text + value: '' + weight: 50 + metadata: + enabled: false + label: VMware NSX + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider + != 'nsx' + weight: 20 + nsx_controllers: + description: One or more IPv4[:port] addresses of NSX controller node, separated + by comma (e.g. 10.30.30.2,192.168.110.254:443) + label: NSX controller endpoint + regex: + error: Invalid controller endpoints, specify valid IPv4[:port] pair + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$ + type: text + value: '' + weight: 60 + nsx_password: + description: Password for Administrator + label: NSX password + regex: + error: Empty password + source: \S + type: password + value: '' + weight: 30 + nsx_username: + description: NSX administrator's username + label: NSX username + regex: + error: Empty username + source: \S + type: text + value: admin + weight: 20 + packages_url: + description: URL to NSX specific packages + label: URL to NSX bits + regex: + error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g. + http://10.20.0.2/nsx) + source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$ + type: text + value: '' + weight: 70 + replication_mode: + description: '' + label: NSX cluster has Service nodes + type: checkbox + value: true + weight: 90 + transport_zone_uuid: + description: UUID of the pre-existing default NSX Transport zone + label: Transport zone UUID + regex: + error: Invalid transport zone UUID + source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}' + type: text + value: '' + weight: 40 + provision: + metadata: + label: Provision + restrictions: + - action: hide + condition: not ('experimental' in version:feature_groups) + weight: 80 + method: + description: Which provision method to use for this cluster. + label: Provision method + type: radio + value: cobbler + values: + - data: image + description: Copying pre-built images on a disk. + label: Image + - data: cobbler + description: Install from scratch using anaconda or debian-installer. + label: Classic (use anaconda or debian-installer) + public_network_assignment: + assign_to_all_nodes: + description: When disabled, public network will be assigned to controllers + and zabbix-server only + label: Assign public network to all nodes + type: checkbox + value: false + weight: 10 + metadata: + label: Public network assignment + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' + weight: 50 + storage: + ephemeral_ceph: + description: Configures Nova to store ephemeral volumes in RBD. This works + best if Ceph is enabled for volumes and images, too. Enables live migration + of all types of Ceph backed VMs (without this option, live migration will + only work with VMs launched from Cinder volumes). + label: Ceph RBD for ephemeral volumes (Nova) + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + type: checkbox + value: true + weight: 75 + images_ceph: + description: Configures Glance to use the Ceph RBD backend to store images. + If enabled, this option will prevent Swift from installing. + label: Ceph RBD for images (Glance) + type: checkbox + value: true + weight: 30 + images_vcenter: + description: Configures Glance to use the vCenter/ESXi backend to store images. + If enabled, this option will prevent Swift from installing. + label: VMWare vCenter/ESXi datastore for images (Glance) + restrictions: + - settings:common.libvirt_type.value != 'vcenter' + type: checkbox + value: false + weight: 35 + iser: + description: 'High performance block storage: Cinder volumes over iSER protocol + (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC, + and will use a dedicated virtual function for the storage network.' + label: iSER protocol for volumes (Cinder) + restrictions: + - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value + != 'kvm' + type: checkbox + value: false + weight: 11 + metadata: + label: Storage + weight: 60 + objects_ceph: + description: Configures RadosGW front end for Ceph RBD. This exposes S3 and + Swift API Interfaces. If enabled, this option will prevent Swift from installing. + label: Ceph RadosGW for objects (Swift API) + restrictions: + - settings:storage.images_ceph.value == false + type: checkbox + value: false + weight: 80 + osd_pool_size: + description: Configures the default number of object replicas in Ceph. This + number must be equal to or lower than the number of deployed 'Storage - + Ceph OSD' nodes. + label: Ceph object replication factor + regex: + error: Invalid number + source: ^[1-9]\d*$ + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + type: text + value: '2' + weight: 85 + vc_datacenter: + description: Inventory path to a datacenter. If you want to use ESXi host + as datastore, it should be "ha-datacenter". + label: Datacenter name + regex: + error: Empty datacenter + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 65 + vc_datastore: + description: Datastore associated with the datacenter. + label: Datastore name + regex: + error: Empty datastore + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 60 + vc_host: + description: IP Address of vCenter/ESXi + label: vCenter/ESXi IP + regex: + error: Specify valid IPv4 address + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$ + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 45 + vc_image_dir: + description: The name of the directory where the glance images will be stored + in the VMware datastore. + label: Datastore Images directory + regex: + error: Empty images directory + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: /openstack_glance + weight: 70 + vc_password: + description: vCenter/ESXi admin password + label: Password + regex: + error: Empty password + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: password + value: '' + weight: 55 + vc_user: + description: vCenter/ESXi admin username + label: Username + regex: + error: Empty username + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 50 + volumes_ceph: + description: Configures Cinder to store volumes in Ceph RBD images. + label: Ceph RBD for volumes (Cinder) + restrictions: + - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value + == 'vcenter' + type: checkbox + value: true + weight: 20 + volumes_lvm: + description: Requires at least one Storage - Cinder LVM node. + label: Cinder LVM over iSCSI for volumes + restrictions: + - settings:storage.volumes_ceph.value == true + type: checkbox + value: false + weight: 10 + volumes_vmdk: + description: Configures Cinder to store volumes via VMware vCenter. + label: VMware vCenter for volumes (Cinder) + restrictions: + - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value + == true + type: checkbox + value: false + weight: 15 + syslog: + metadata: + label: Syslog + weight: 50 + syslog_port: + description: Remote syslog port + label: Port + regex: + error: Invalid Syslog port + source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + type: text + value: '514' + weight: 20 + syslog_server: + description: Remote syslog hostname + label: Hostname + type: text + value: '' + weight: 10 + syslog_transport: + label: Syslog transport protocol + type: radio + value: tcp + values: + - data: udp + description: '' + label: UDP + - data: tcp + description: '' + label: TCP + weight: 30 + vcenter: + cluster: + description: vCenter cluster name. If you have multiple clusters, use comma + to separate names + label: Cluster + regex: + error: Invalid cluster list + source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$ + type: text + value: '' + weight: 40 + datastore_regex: + description: The Datastore regexp setting specifies the data stores to use + with Compute. For example, "nas.*". If you want to use all available datastores, + leave this field blank + label: Datastore regexp + regex: + error: Invalid datastore regexp + source: ^(\S.*\S|\S|)$ + type: text + value: '' + weight: 50 + host_ip: + description: IP Address of vCenter + label: vCenter IP + regex: + error: Specify valid IPv4 address + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$ + type: text + value: '' + weight: 10 + metadata: + label: vCenter + restrictions: + - action: hide + condition: settings:common.libvirt_type.value != 'vcenter' + weight: 20 + use_vcenter: + description: '' + label: '' + type: hidden + value: true + weight: 5 + vc_password: + description: vCenter admin password + label: Password + regex: + error: Empty password + source: \S + type: password + value: admin + weight: 30 + vc_user: + description: vCenter admin username + label: Username + regex: + error: Empty username + source: \S + type: text + value: admin + weight: 20 + vlan_interface: + description: Physical ESXi host ethernet adapter for VLAN networking (e.g. + vmnic1). If empty "vmnic0" is used by default + label: ESXi VLAN interface + restrictions: + - action: hide + condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager + != 'VlanManager' + type: text + value: '' + weight: 60 + zabbix: + metadata: + label: Zabbix Access + restrictions: + - action: hide + condition: not ('experimental' in version:feature_groups) + weight: 70 + password: + description: Password for Zabbix Administrator + label: password + type: password + value: zabbix + weight: 20 + username: + description: Username for Zabbix Administrator + label: username + type: text + value: admin + weight: 10 \ No newline at end of file diff --git a/fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dha.yaml b/fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dha.yaml new file mode 100644 index 0000000..562d6cd --- /dev/null +++ b/fuel/deploy/baremetal/conf/ericsson_montreal_lab/ha/dha.yaml @@ -0,0 +1,54 @@ +title: Deployment Hardware Adapter (DHA) +# DHA API version supported +version: 1.1 +created: Mon May 4 09:03:46 UTC 2015 +comment: Test environment Ericsson Montreal + +# Adapter to use for this definition +adapter: hp + +# Node list. +# Mandatory property is id, all other properties are adapter specific. + +nodes: +- id: 1 + pxeMac: 14:58:D0:54:7A:D8 + ipmiIp: 10.118.32.198 + ipmiUser: + ipmiPass: +- id: 2 + pxeMac: 14:58:D0:55:E2:E0 + ipmiIp: 10.118.32.202 + ipmiUser: + ipmiPass: +- id: 3 + pxeMac: 9C:B6:54:8A:25:C0 + ipmiIp: 10.118.32.213 + ipmiUser: + ipmiPass: +- id: 4 + pxeMac: 14:58:D0:54:28:80 + ipmiIp: 10.118.32.201 + ipmiUser: + ipmiPass: +- id: 5 + pxeMac: 14:58:D0:54:E7:88 + ipmiIp: 10.118.32.203 + ipmiUser: + ipmiPass: +- id: 6 + pxeMac: 14:58:D0:54:7A:28 + ipmiIp: 10.118.32.205 + ipmiUser: + ipmiPass: +# Adding the Fuel node as node id 7 which may not be correct - please +# adjust as needed. +- id: 7 + libvirtName: vFuel + libvirtTemplate: baremetal/vms/fuel.xml + isFuel: yes + username: root + password: r00tme + +disks: + fuel: 30G \ No newline at end of file diff --git a/fuel/deploy/baremetal/conf/ericsson_montreal_lab/multinode/dea.yaml b/fuel/deploy/baremetal/conf/ericsson_montreal_lab/multinode/dea.yaml new file mode 100644 index 0000000..328dd6b --- /dev/null +++ b/fuel/deploy/baremetal/conf/ericsson_montreal_lab/multinode/dea.yaml @@ -0,0 +1,987 @@ +title: Deployment Environment Adapter (DEA) +# DEA API version supported +version: 1.1 +created: Tue May 5 15:33:07 UTC 2015 +comment: Test environment Ericsson Montreal +environment_name: opnfv +environment_mode: multinode +wanted_release: Juno on Ubuntu 12.04.4 +nodes: +- id: 1 + interfaces: interfaces_1 + transformations: transformations_1 + role: ceph-osd,controller +- id: 2 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +- id: 3 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +- id: 4 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +- id: 5 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +- id: 6 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +fuel: + ADMIN_NETWORK: + ipaddress: 10.40.0.2 + netmask: 255.255.255.0 + dhcp_pool_start: 10.40.0.3 + dhcp_pool_end: 10.40.0.254 + DNS_UPSTREAM: 10.118.32.193 + DNS_DOMAIN: opnfvericsson.ca + DNS_SEARCH: opnfvericsson.ca + FUEL_ACCESS: + user: admin + password: admin + HOSTNAME: opnfv + NTP1: 10.118.34.219 + NTP2: + NTP3: +interfaces: + interfaces_1: + eth0: + - fuelweb_admin + eth2: + - public + - management + - storage + - private +transformations: + transformations_1: + - action: add-br + name: br-eth0 + - action: add-port + bridge: br-eth0 + name: eth0 + - action: add-br + name: br-eth1 + - action: add-port + bridge: br-eth1 + name: eth1 + - action: add-br + name: br-eth2 + - action: add-port + bridge: br-eth2 + name: eth2 + - action: add-br + name: br-eth3 + - action: add-port + bridge: br-eth3 + name: eth3 + - action: add-br + name: br-eth4 + - action: add-port + bridge: br-eth4 + name: eth4 + - action: add-br + name: br-eth5 + - action: add-port + bridge: br-eth5 + name: eth5 + - action: add-br + name: br-ex + - action: add-br + name: br-mgmt + - action: add-br + name: br-storage + - action: add-br + name: br-fw-admin + - action: add-br + name: br-prv + - action: add-patch + bridges: + - br-eth2 + - br-storage + tags: + - 220 + - 0 + vlan_ids: + - 220 + - 0 + - action: add-patch + bridges: + - br-eth2 + - br-mgmt + tags: + - 320 + - 0 + vlan_ids: + - 320 + - 0 + - action: add-patch + bridges: + - br-eth0 + - br-fw-admin + - action: add-patch + bridges: + - br-eth2 + - br-ex + - action: add-patch + bridges: + - br-eth2 + - br-prv + transformations_2: + - action: add-br + name: br-eth0 + - action: add-port + bridge: br-eth0 + name: eth0 + - action: add-br + name: br-eth1 + - action: add-port + bridge: br-eth1 + name: eth1 + - action: add-br + name: br-eth2 + - action: add-port + bridge: br-eth2 + name: eth2 + - action: add-br + name: br-eth3 + - action: add-port + bridge: br-eth3 + name: eth3 + - action: add-br + name: br-eth4 + - action: add-port + bridge: br-eth4 + name: eth4 + - action: add-br + name: br-eth5 + - action: add-port + bridge: br-eth5 + name: eth5 + - action: add-br + name: br-mgmt + - action: add-br + name: br-storage + - action: add-br + name: br-fw-admin + - action: add-br + name: br-prv + - action: add-patch + bridges: + - br-eth2 + - br-storage + tags: + - 220 + - 0 + vlan_ids: + - 220 + - 0 + - action: add-patch + bridges: + - br-eth2 + - br-mgmt + tags: + - 320 + - 0 + vlan_ids: + - 320 + - 0 + - action: add-patch + bridges: + - br-eth0 + - br-fw-admin + - action: add-patch + bridges: + - br-eth2 + - br-prv +opnfv: + compute: {} + controller: {} +network: + networking_parameters: + base_mac: fa:16:3e:00:00:00 + dns_nameservers: + - 10.118.32.193 + floating_ranges: + - - 10.118.36.48 + - 10.118.36.62 + gre_id_range: + - 2 + - 65535 + internal_cidr: 192.168.111.0/24 + internal_gateway: 192.168.111.1 + net_l23_provider: ovs + segmentation_type: vlan + vlan_range: + - 2022 + - 2023 + networks: + - cidr: 10.118.36.32/27 + gateway: 10.118.36.1 + ip_ranges: + - - 10.118.36.33 + - 10.118.36.47 + meta: + assign_vip: true + cidr: 172.16.0.0/24 + configurable: true + floating_range_var: floating_ranges + ip_range: + - 172.16.0.2 + - 172.16.0.126 + map_priority: 1 + name: public + notation: ip_ranges + render_addr_mask: public + render_type: null + use_gateway: true + vlan_start: null + name: public + vlan_start: null + - cidr: 192.168.0.0/24 + gateway: null + ip_ranges: + - - 192.168.0.2 + - 192.168.0.254 + meta: + assign_vip: true + cidr: 192.168.0.0/24 + configurable: true + map_priority: 2 + name: management + notation: cidr + render_addr_mask: internal + render_type: cidr + use_gateway: false + vlan_start: 320 + name: management + vlan_start: 320 + - cidr: 192.168.1.0/24 + gateway: null + ip_ranges: + - - 192.168.1.2 + - 192.168.1.254 + meta: + assign_vip: false + cidr: 192.168.1.0/24 + configurable: true + map_priority: 2 + name: storage + notation: cidr + render_addr_mask: storage + render_type: cidr + use_gateway: false + vlan_start: 220 + name: storage + vlan_start: 220 + - cidr: null + gateway: null + ip_ranges: [] + meta: + assign_vip: false + configurable: false + map_priority: 2 + name: private + neutron_vlan_range: true + notation: null + render_addr_mask: null + render_type: null + seg_type: vlan + use_gateway: false + vlan_start: null + name: private + vlan_start: null + - cidr: 10.40.0.0/24 + gateway: null + ip_ranges: + - - 10.40.0.3 + - 10.40.0.254 + meta: + assign_vip: false + configurable: false + map_priority: 0 + notation: ip_ranges + render_addr_mask: null + render_type: null + unmovable: true + use_gateway: true + name: fuelweb_admin + vlan_start: null +settings: + editable: + access: + email: + description: Email address for Administrator + label: email + type: text + value: admin@localhost + weight: 40 + metadata: + label: Access + weight: 10 + password: + description: Password for Administrator + label: password + type: password + value: admin + weight: 20 + tenant: + description: Tenant (project) name for Administrator + label: tenant + regex: + error: Invalid tenant name + source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).* + type: text + value: admin + weight: 30 + user: + description: Username for Administrator + label: username + regex: + error: Invalid username + source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).* + type: text + value: admin + weight: 10 + additional_components: + ceilometer: + description: If selected, Ceilometer component will be installed + label: Install Ceilometer + type: checkbox + value: false + weight: 40 + heat: + description: '' + label: '' + type: hidden + value: true + weight: 30 + metadata: + label: Additional Components + weight: 20 + murano: + description: If selected, Murano component will be installed + label: Install Murano + restrictions: + - cluster:net_provider != 'neutron' + type: checkbox + value: false + weight: 20 + sahara: + description: If selected, Sahara component will be installed + label: Install Sahara + type: checkbox + value: false + weight: 10 + common: + auth_key: + description: Public key(s) to include in authorized_keys on deployed nodes + label: Public Key + type: text + value: '' + weight: 70 + auto_assign_floating_ip: + description: If selected, OpenStack will automatically assign a floating IP + to a new instance + label: Auto assign floating IP + restrictions: + - cluster:net_provider == 'neutron' + type: checkbox + value: false + weight: 40 + compute_scheduler_driver: + label: Scheduler driver + type: radio + value: nova.scheduler.filter_scheduler.FilterScheduler + values: + - data: nova.scheduler.filter_scheduler.FilterScheduler + description: Currently the most advanced OpenStack scheduler. See the OpenStack + documentation for details. + label: Filter scheduler + - data: nova.scheduler.simple.SimpleScheduler + description: This is 'naive' scheduler which tries to find the least loaded + host + label: Simple scheduler + weight: 40 + debug: + description: Debug logging mode provides more information, but requires more + disk space. + label: OpenStack debug logging + type: checkbox + value: false + weight: 20 + disable_offload: + description: If set, generic segmentation offload (gso) and generic receive + offload (gro) on physical nics will be disabled. See ethtool man. + label: Disable generic offload on physical nics + restrictions: + - action: hide + condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type + == 'gre' + type: checkbox + value: true + weight: 80 + libvirt_type: + label: Hypervisor type + type: radio + value: kvm + values: + - data: kvm + description: Choose this type of hypervisor if you run OpenStack on hardware + label: KVM + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + - data: qemu + description: Choose this type of hypervisor if you run OpenStack on virtual + hosts. + label: QEMU + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + - data: vcenter + description: Choose this type of hypervisor if you run OpenStack in a vCenter + environment. + label: vCenter + restrictions: + - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider + == 'neutron' + weight: 30 + metadata: + label: Common + weight: 30 + nova_quota: + description: Quotas are used to limit CPU and memory usage for tenants. Enabling + quotas will increase load on the Nova database. + label: Nova quotas + type: checkbox + value: false + weight: 25 + resume_guests_state_on_host_boot: + description: Whether to resume previous guests state when the host reboots. + If enabled, this option causes guests assigned to the host to resume their + previous state. If the guest was running a restart will be attempted when + nova-compute starts. If the guest was not running previously, a restart + will not be attempted. + label: Resume guests state on host boot + type: checkbox + value: true + weight: 60 + use_cow_images: + description: For most cases you will want qcow format. If it's disabled, raw + image format will be used to run VMs. OpenStack with raw format currently + does not support snapshotting. + label: Use qcow format for images + type: checkbox + value: true + weight: 50 + corosync: + group: + description: '' + label: Group + type: text + value: 226.94.1.1 + weight: 10 + metadata: + label: Corosync + restrictions: + - action: hide + condition: 'true' + weight: 50 + port: + description: '' + label: Port + type: text + value: '12000' + weight: 20 + verified: + description: Set True only if multicast is configured correctly on router. + label: Need to pass network verification. + type: checkbox + value: false + weight: 10 + external_dns: + dns_list: + description: List of upstream DNS servers, separated by comma + label: DNS list + type: text + value: 10.118.32.193 + weight: 10 + metadata: + label: Upstream DNS + weight: 90 + external_ntp: + metadata: + label: Upstream NTP + weight: 100 + ntp_list: + description: List of upstream NTP servers, separated by comma + label: NTP servers list + type: text + value: 10.118.34.219 + weight: 10 + kernel_params: + kernel: + description: Default kernel parameters + label: Initial parameters + type: text + value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset + weight: 45 + metadata: + label: Kernel parameters + weight: 40 + neutron_mellanox: + metadata: + enabled: true + label: Mellanox Neutron components + toggleable: false + weight: 50 + plugin: + label: Mellanox drivers and SR-IOV plugin + type: radio + value: disabled + values: + - data: disabled + description: If selected, Mellanox drivers, Neutron and Cinder plugin will + not be installed. + label: Mellanox drivers and plugins disabled + restrictions: + - settings:storage.iser.value == true + - data: drivers_only + description: If selected, Mellanox Ethernet drivers will be installed to + support networking over Mellanox NIC. Mellanox Neutron plugin will not + be installed. + label: Install only Mellanox drivers + restrictions: + - settings:common.libvirt_type.value != 'kvm' + - data: ethernet + description: If selected, both Mellanox Ethernet drivers and Mellanox network + acceleration (Neutron) plugin will be installed. + label: Install Mellanox drivers and SR-IOV plugin + restrictions: + - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider + == 'neutron' and networking_parameters:segmentation_type == 'vlan') + weight: 60 + vf_num: + description: Note that one virtual function will be reserved to the storage + network, in case of choosing iSER. + label: Number of virtual NICs + restrictions: + - settings:neutron_mellanox.plugin.value != 'ethernet' + type: text + value: '16' + weight: 70 + nsx_plugin: + connector_type: + description: Default network transport type to use + label: NSX connector type + type: select + value: stt + values: + - data: gre + label: GRE + - data: ipsec_gre + label: GRE over IPSec + - data: stt + label: STT + - data: ipsec_stt + label: STT over IPSec + - data: bridge + label: Bridge + weight: 80 + l3_gw_service_uuid: + description: UUID for the default L3 gateway service to use with this cluster + label: L3 service UUID + regex: + error: Invalid L3 gateway service UUID + source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}' + type: text + value: '' + weight: 50 + metadata: + enabled: false + label: VMware NSX + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider + != 'nsx' + weight: 20 + nsx_controllers: + description: One or more IPv4[:port] addresses of NSX controller node, separated + by comma (e.g. 10.30.30.2,192.168.110.254:443) + label: NSX controller endpoint + regex: + error: Invalid controller endpoints, specify valid IPv4[:port] pair + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$ + type: text + value: '' + weight: 60 + nsx_password: + description: Password for Administrator + label: NSX password + regex: + error: Empty password + source: \S + type: password + value: '' + weight: 30 + nsx_username: + description: NSX administrator's username + label: NSX username + regex: + error: Empty username + source: \S + type: text + value: admin + weight: 20 + packages_url: + description: URL to NSX specific packages + label: URL to NSX bits + regex: + error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g. + http://10.20.0.2/nsx) + source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$ + type: text + value: '' + weight: 70 + replication_mode: + description: '' + label: NSX cluster has Service nodes + type: checkbox + value: true + weight: 90 + transport_zone_uuid: + description: UUID of the pre-existing default NSX Transport zone + label: Transport zone UUID + regex: + error: Invalid transport zone UUID + source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}' + type: text + value: '' + weight: 40 + provision: + metadata: + label: Provision + restrictions: + - action: hide + condition: not ('experimental' in version:feature_groups) + weight: 80 + method: + description: Which provision method to use for this cluster. + label: Provision method + type: radio + value: cobbler + values: + - data: image + description: Copying pre-built images on a disk. + label: Image + - data: cobbler + description: Install from scratch using anaconda or debian-installer. + label: Classic (use anaconda or debian-installer) + public_network_assignment: + assign_to_all_nodes: + description: When disabled, public network will be assigned to controllers + and zabbix-server only + label: Assign public network to all nodes + type: checkbox + value: false + weight: 10 + metadata: + label: Public network assignment + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' + weight: 50 + storage: + ephemeral_ceph: + description: Configures Nova to store ephemeral volumes in RBD. This works + best if Ceph is enabled for volumes and images, too. Enables live migration + of all types of Ceph backed VMs (without this option, live migration will + only work with VMs launched from Cinder volumes). + label: Ceph RBD for ephemeral volumes (Nova) + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + type: checkbox + value: true + weight: 75 + images_ceph: + description: Configures Glance to use the Ceph RBD backend to store images. + If enabled, this option will prevent Swift from installing. + label: Ceph RBD for images (Glance) + type: checkbox + value: true + weight: 30 + images_vcenter: + description: Configures Glance to use the vCenter/ESXi backend to store images. + If enabled, this option will prevent Swift from installing. + label: VMWare vCenter/ESXi datastore for images (Glance) + restrictions: + - settings:common.libvirt_type.value != 'vcenter' + type: checkbox + value: false + weight: 35 + iser: + description: 'High performance block storage: Cinder volumes over iSER protocol + (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC, + and will use a dedicated virtual function for the storage network.' + label: iSER protocol for volumes (Cinder) + restrictions: + - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value + != 'kvm' + type: checkbox + value: false + weight: 11 + metadata: + label: Storage + weight: 60 + objects_ceph: + description: Configures RadosGW front end for Ceph RBD. This exposes S3 and + Swift API Interfaces. If enabled, this option will prevent Swift from installing. + label: Ceph RadosGW for objects (Swift API) + restrictions: + - settings:storage.images_ceph.value == false + type: checkbox + value: false + weight: 80 + osd_pool_size: + description: Configures the default number of object replicas in Ceph. This + number must be equal to or lower than the number of deployed 'Storage - + Ceph OSD' nodes. + label: Ceph object replication factor + regex: + error: Invalid number + source: ^[1-9]\d*$ + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + type: text + value: '2' + weight: 85 + vc_datacenter: + description: Inventory path to a datacenter. If you want to use ESXi host + as datastore, it should be "ha-datacenter". + label: Datacenter name + regex: + error: Empty datacenter + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 65 + vc_datastore: + description: Datastore associated with the datacenter. + label: Datastore name + regex: + error: Empty datastore + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 60 + vc_host: + description: IP Address of vCenter/ESXi + label: vCenter/ESXi IP + regex: + error: Specify valid IPv4 address + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$ + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 45 + vc_image_dir: + description: The name of the directory where the glance images will be stored + in the VMware datastore. + label: Datastore Images directory + regex: + error: Empty images directory + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: /openstack_glance + weight: 70 + vc_password: + description: vCenter/ESXi admin password + label: Password + regex: + error: Empty password + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: password + value: '' + weight: 55 + vc_user: + description: vCenter/ESXi admin username + label: Username + regex: + error: Empty username + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 50 + volumes_ceph: + description: Configures Cinder to store volumes in Ceph RBD images. + label: Ceph RBD for volumes (Cinder) + restrictions: + - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value + == 'vcenter' + type: checkbox + value: true + weight: 20 + volumes_lvm: + description: Requires at least one Storage - Cinder LVM node. + label: Cinder LVM over iSCSI for volumes + restrictions: + - settings:storage.volumes_ceph.value == true + type: checkbox + value: false + weight: 10 + volumes_vmdk: + description: Configures Cinder to store volumes via VMware vCenter. + label: VMware vCenter for volumes (Cinder) + restrictions: + - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value + == true + type: checkbox + value: false + weight: 15 + syslog: + metadata: + label: Syslog + weight: 50 + syslog_port: + description: Remote syslog port + label: Port + regex: + error: Invalid Syslog port + source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + type: text + value: '514' + weight: 20 + syslog_server: + description: Remote syslog hostname + label: Hostname + type: text + value: '' + weight: 10 + syslog_transport: + label: Syslog transport protocol + type: radio + value: tcp + values: + - data: udp + description: '' + label: UDP + - data: tcp + description: '' + label: TCP + weight: 30 + vcenter: + cluster: + description: vCenter cluster name. If you have multiple clusters, use comma + to separate names + label: Cluster + regex: + error: Invalid cluster list + source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$ + type: text + value: '' + weight: 40 + datastore_regex: + description: The Datastore regexp setting specifies the data stores to use + with Compute. For example, "nas.*". If you want to use all available datastores, + leave this field blank + label: Datastore regexp + regex: + error: Invalid datastore regexp + source: ^(\S.*\S|\S|)$ + type: text + value: '' + weight: 50 + host_ip: + description: IP Address of vCenter + label: vCenter IP + regex: + error: Specify valid IPv4 address + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$ + type: text + value: '' + weight: 10 + metadata: + label: vCenter + restrictions: + - action: hide + condition: settings:common.libvirt_type.value != 'vcenter' + weight: 20 + use_vcenter: + description: '' + label: '' + type: hidden + value: true + weight: 5 + vc_password: + description: vCenter admin password + label: Password + regex: + error: Empty password + source: \S + type: password + value: admin + weight: 30 + vc_user: + description: vCenter admin username + label: Username + regex: + error: Empty username + source: \S + type: text + value: admin + weight: 20 + vlan_interface: + description: Physical ESXi host ethernet adapter for VLAN networking (e.g. + vmnic1). If empty "vmnic0" is used by default + label: ESXi VLAN interface + restrictions: + - action: hide + condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager + != 'VlanManager' + type: text + value: '' + weight: 60 + zabbix: + metadata: + label: Zabbix Access + restrictions: + - action: hide + condition: not ('experimental' in version:feature_groups) + weight: 70 + password: + description: Password for Zabbix Administrator + label: password + type: password + value: zabbix + weight: 20 + username: + description: Username for Zabbix Administrator + label: username + type: text + value: admin + weight: 10 \ No newline at end of file diff --git a/fuel/deploy/baremetal/conf/ericsson_montreal_lab/multinode/dha.yaml b/fuel/deploy/baremetal/conf/ericsson_montreal_lab/multinode/dha.yaml new file mode 100644 index 0000000..562d6cd --- /dev/null +++ b/fuel/deploy/baremetal/conf/ericsson_montreal_lab/multinode/dha.yaml @@ -0,0 +1,54 @@ +title: Deployment Hardware Adapter (DHA) +# DHA API version supported +version: 1.1 +created: Mon May 4 09:03:46 UTC 2015 +comment: Test environment Ericsson Montreal + +# Adapter to use for this definition +adapter: hp + +# Node list. +# Mandatory property is id, all other properties are adapter specific. + +nodes: +- id: 1 + pxeMac: 14:58:D0:54:7A:D8 + ipmiIp: 10.118.32.198 + ipmiUser: + ipmiPass: +- id: 2 + pxeMac: 14:58:D0:55:E2:E0 + ipmiIp: 10.118.32.202 + ipmiUser: + ipmiPass: +- id: 3 + pxeMac: 9C:B6:54:8A:25:C0 + ipmiIp: 10.118.32.213 + ipmiUser: + ipmiPass: +- id: 4 + pxeMac: 14:58:D0:54:28:80 + ipmiIp: 10.118.32.201 + ipmiUser: + ipmiPass: +- id: 5 + pxeMac: 14:58:D0:54:E7:88 + ipmiIp: 10.118.32.203 + ipmiUser: + ipmiPass: +- id: 6 + pxeMac: 14:58:D0:54:7A:28 + ipmiIp: 10.118.32.205 + ipmiUser: + ipmiPass: +# Adding the Fuel node as node id 7 which may not be correct - please +# adjust as needed. +- id: 7 + libvirtName: vFuel + libvirtTemplate: baremetal/vms/fuel.xml + isFuel: yes + username: root + password: r00tme + +disks: + fuel: 30G \ No newline at end of file diff --git a/fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dea.yaml b/fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dea.yaml new file mode 100644 index 0000000..2528229 --- /dev/null +++ b/fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dea.yaml @@ -0,0 +1,950 @@ +title: Deployment Environment Adapter (DEA) +# DEA API version supported +version: 1.1 +created: Thu May 21 13:34:13 CEST 2015 +comment: HA deployment with Ceph +environment_name: opnfv +environment_mode: ha +wanted_release: Juno on Ubuntu 12.04.4 +nodes: +- id: 1 + interfaces: interfaces_1 + transformations: transformations_1 + role: ceph-osd,controller +- id: 2 + interfaces: interfaces_1 + transformations: transformations_1 + role: ceph-osd,controller +- id: 3 + interfaces: interfaces_1 + transformations: transformations_1 + role: ceph-osd,controller +- id: 4 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +- id: 5 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +fuel: + ADMIN_NETWORK: + ipaddress: 10.20.0.2 + netmask: 255.255.0.0 + dhcp_pool_start: 10.20.0.3 + dhcp_pool_end: 10.20.0.254 + DNS_UPSTREAM: 8.8.8.8 + DNS_DOMAIN: domain.tld + DNS_SEARCH: domain.tld + FUEL_ACCESS: + user: admin + password: admin + HOSTNAME: opnfv + NTP1: 0.pool.ntp.org + NTP2: 1.pool.ntp.org + NTP3: 2.pool.ntp.org +interfaces: + interfaces_1: + eth0: + - public + eth1: + - fuelweb_admin + - management + - storage + - private +transformations: + transformations_1: + - action: add-br + name: br-eth0 + - action: add-port + bridge: br-eth0 + name: eth0 + - action: add-br + name: br-eth1 + - action: add-port + bridge: br-eth1 + name: eth1 + - action: add-br + name: br-ex + - action: add-br + name: br-mgmt + - action: add-br + name: br-storage + - action: add-br + name: br-fw-admin + - action: add-patch + bridges: + - br-eth1 + - br-storage + tags: + - 301 + - 0 + vlan_ids: + - 301 + - 0 + - action: add-patch + bridges: + - br-eth1 + - br-mgmt + tags: + - 300 + - 0 + vlan_ids: + - 300 + - 0 + - action: add-patch + bridges: + - br-eth1 + - br-fw-admin + trunks: + - 0 + - action: add-patch + bridges: + - br-eth0 + - br-ex + trunks: + - 0 + - action: add-br + name: br-prv + - action: add-patch + bridges: + - br-eth1 + - br-prv + transformations_2: + - action: add-br + name: br-eth0 + - action: add-port + bridge: br-eth0 + name: eth0 + - action: add-br + name: br-eth1 + - action: add-port + bridge: br-eth1 + name: eth1 + - action: add-br + name: br-mgmt + - action: add-br + name: br-storage + - action: add-br + name: br-fw-admin + - action: add-patch + bridges: + - br-eth1 + - br-storage + tags: + - 301 + - 0 + vlan_ids: + - 301 + - 0 + - action: add-patch + bridges: + - br-eth1 + - br-mgmt + tags: + - 300 + - 0 + vlan_ids: + - 300 + - 0 + - action: add-patch + bridges: + - br-eth1 + - br-fw-admin + trunks: + - 0 + - action: add-br + name: br-prv + - action: add-patch + bridges: + - br-eth1 + - br-prv +opnfv: + compute: {} + controller: {} +network: + networking_parameters: + base_mac: fa:16:3e:00:00:00 + dns_nameservers: + - 8.8.4.4 + - 8.8.8.8 + floating_ranges: + - - 172.30.9.80 + - 172.30.9.89 + gre_id_range: + - 2 + - 65535 + internal_cidr: 192.168.111.0/24 + internal_gateway: 192.168.111.1 + net_l23_provider: ovs + segmentation_type: vlan + vlan_range: + - 1000 + - 1010 + networks: + - cidr: 172.30.9.0/24 + gateway: 172.30.9.1 + ip_ranges: + - - 172.30.9.70 + - 172.30.9.70 + meta: + assign_vip: true + cidr: 172.16.0.0/24 + configurable: true + floating_range_var: floating_ranges + ip_range: + - 172.16.0.2 + - 172.16.0.126 + map_priority: 1 + name: public + notation: ip_ranges + render_addr_mask: public + render_type: null + use_gateway: true + vlan_start: null + name: public + vlan_start: null + - cidr: 192.168.0.0/24 + gateway: null + ip_ranges: + - - 192.168.0.2 + - 192.168.0.254 + meta: + assign_vip: true + cidr: 192.168.0.0/24 + configurable: true + map_priority: 2 + name: management + notation: cidr + render_addr_mask: internal + render_type: cidr + use_gateway: false + vlan_start: 101 + name: management + vlan_start: 300 + - cidr: 192.168.1.0/24 + gateway: null + ip_ranges: + - - 192.168.1.2 + - 192.168.1.254 + meta: + assign_vip: false + cidr: 192.168.1.0/24 + configurable: true + map_priority: 2 + name: storage + notation: cidr + render_addr_mask: storage + render_type: cidr + use_gateway: false + vlan_start: 102 + name: storage + vlan_start: 301 + - cidr: null + gateway: null + ip_ranges: [] + meta: + assign_vip: false + configurable: false + map_priority: 2 + name: private + neutron_vlan_range: true + notation: null + render_addr_mask: null + render_type: null + seg_type: vlan + use_gateway: false + vlan_start: null + name: private + vlan_start: null + - cidr: 10.20.0.0/16 + gateway: null + ip_ranges: + - - 10.20.0.3 + - 10.20.255.254 + meta: + assign_vip: false + configurable: false + map_priority: 0 + notation: ip_ranges + render_addr_mask: null + render_type: null + unmovable: true + use_gateway: true + name: fuelweb_admin + vlan_start: null +settings: + editable: + access: + email: + description: Email address for Administrator + label: email + type: text + value: admin@localhost + weight: 40 + metadata: + label: Access + weight: 10 + password: + description: Password for Administrator + label: password + type: password + value: admin + weight: 20 + tenant: + description: Tenant (project) name for Administrator + label: tenant + regex: + error: Invalid tenant name + source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).* + type: text + value: admin + weight: 30 + user: + description: Username for Administrator + label: username + regex: + error: Invalid username + source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).* + type: text + value: admin + weight: 10 + additional_components: + ceilometer: + description: If selected, Ceilometer component will be installed + label: Install Ceilometer + type: checkbox + value: false + weight: 40 + heat: + description: '' + label: '' + type: hidden + value: true + weight: 30 + metadata: + label: Additional Components + weight: 20 + murano: + description: If selected, Murano component will be installed + label: Install Murano + restrictions: + - cluster:net_provider != 'neutron' + type: checkbox + value: false + weight: 20 + sahara: + description: If selected, Sahara component will be installed + label: Install Sahara + type: checkbox + value: false + weight: 10 + common: + auth_key: + description: Public key(s) to include in authorized_keys on deployed nodes + label: Public Key + type: text + value: '' + weight: 70 + auto_assign_floating_ip: + description: If selected, OpenStack will automatically assign a floating IP + to a new instance + label: Auto assign floating IP + restrictions: + - cluster:net_provider == 'neutron' + type: checkbox + value: false + weight: 40 + compute_scheduler_driver: + label: Scheduler driver + type: radio + value: nova.scheduler.filter_scheduler.FilterScheduler + values: + - data: nova.scheduler.filter_scheduler.FilterScheduler + description: Currently the most advanced OpenStack scheduler. See the OpenStack + documentation for details. + label: Filter scheduler + - data: nova.scheduler.simple.SimpleScheduler + description: This is 'naive' scheduler which tries to find the least loaded + host + label: Simple scheduler + weight: 40 + debug: + description: Debug logging mode provides more information, but requires more + disk space. + label: OpenStack debug logging + type: checkbox + value: false + weight: 20 + disable_offload: + description: If set, generic segmentation offload (gso) and generic receive + offload (gro) on physical nics will be disabled. See ethtool man. + label: Disable generic offload on physical nics + restrictions: + - action: hide + condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type + == 'gre' + type: checkbox + value: true + weight: 80 + libvirt_type: + label: Hypervisor type + type: radio + value: kvm + values: + - data: kvm + description: Choose this type of hypervisor if you run OpenStack on hardware + label: KVM + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + - data: qemu + description: Choose this type of hypervisor if you run OpenStack on virtual + hosts. + label: QEMU + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + - data: vcenter + description: Choose this type of hypervisor if you run OpenStack in a vCenter + environment. + label: vCenter + restrictions: + - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider + == 'neutron' + weight: 30 + metadata: + label: Common + weight: 30 + nova_quota: + description: Quotas are used to limit CPU and memory usage for tenants. Enabling + quotas will increase load on the Nova database. + label: Nova quotas + type: checkbox + value: false + weight: 25 + resume_guests_state_on_host_boot: + description: Whether to resume previous guests state when the host reboots. + If enabled, this option causes guests assigned to the host to resume their + previous state. If the guest was running a restart will be attempted when + nova-compute starts. If the guest was not running previously, a restart + will not be attempted. + label: Resume guests state on host boot + type: checkbox + value: true + weight: 60 + use_cow_images: + description: For most cases you will want qcow format. If it's disabled, raw + image format will be used to run VMs. OpenStack with raw format currently + does not support snapshotting. + label: Use qcow format for images + type: checkbox + value: true + weight: 50 + corosync: + group: + description: '' + label: Group + type: text + value: 226.94.1.1 + weight: 10 + metadata: + label: Corosync + restrictions: + - action: hide + condition: 'true' + weight: 50 + port: + description: '' + label: Port + type: text + value: '12000' + weight: 20 + verified: + description: Set True only if multicast is configured correctly on router. + label: Need to pass network verification. + type: checkbox + value: false + weight: 10 + external_dns: + dns_list: + description: List of upstream DNS servers, separated by comma + label: DNS list + type: text + value: 8.8.8.8, 8.8.4.4 + weight: 10 + metadata: + label: Upstream DNS + weight: 90 + external_ntp: + metadata: + label: Upstream NTP + weight: 100 + ntp_list: + description: List of upstream NTP servers, separated by comma + label: NTP servers list + type: text + value: 0.pool.ntp.org, 1.pool.ntp.org + weight: 10 + kernel_params: + kernel: + description: Default kernel parameters + label: Initial parameters + type: text + value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset + weight: 45 + metadata: + label: Kernel parameters + weight: 40 + neutron_mellanox: + metadata: + enabled: true + label: Mellanox Neutron components + toggleable: false + weight: 50 + plugin: + label: Mellanox drivers and SR-IOV plugin + type: radio + value: disabled + values: + - data: disabled + description: If selected, Mellanox drivers, Neutron and Cinder plugin will + not be installed. + label: Mellanox drivers and plugins disabled + restrictions: + - settings:storage.iser.value == true + - data: drivers_only + description: If selected, Mellanox Ethernet drivers will be installed to + support networking over Mellanox NIC. Mellanox Neutron plugin will not + be installed. + label: Install only Mellanox drivers + restrictions: + - settings:common.libvirt_type.value != 'kvm' + - data: ethernet + description: If selected, both Mellanox Ethernet drivers and Mellanox network + acceleration (Neutron) plugin will be installed. + label: Install Mellanox drivers and SR-IOV plugin + restrictions: + - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider + == 'neutron' and networking_parameters:segmentation_type == 'vlan') + weight: 60 + vf_num: + description: Note that one virtual function will be reserved to the storage + network, in case of choosing iSER. + label: Number of virtual NICs + restrictions: + - settings:neutron_mellanox.plugin.value != 'ethernet' + type: text + value: '16' + weight: 70 + nsx_plugin: + connector_type: + description: Default network transport type to use + label: NSX connector type + type: select + value: stt + values: + - data: gre + label: GRE + - data: ipsec_gre + label: GRE over IPSec + - data: stt + label: STT + - data: ipsec_stt + label: STT over IPSec + - data: bridge + label: Bridge + weight: 80 + l3_gw_service_uuid: + description: UUID for the default L3 gateway service to use with this cluster + label: L3 service UUID + regex: + error: Invalid L3 gateway service UUID + source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}' + type: text + value: '' + weight: 50 + metadata: + enabled: false + label: VMware NSX + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider + != 'nsx' + weight: 20 + nsx_controllers: + description: One or more IPv4[:port] addresses of NSX controller node, separated + by comma (e.g. 10.30.30.2,192.168.110.254:443) + label: NSX controller endpoint + regex: + error: Invalid controller endpoints, specify valid IPv4[:port] pair + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$ + type: text + value: '' + weight: 60 + nsx_password: + description: Password for Administrator + label: NSX password + regex: + error: Empty password + source: \S + type: password + value: '' + weight: 30 + nsx_username: + description: NSX administrator's username + label: NSX username + regex: + error: Empty username + source: \S + type: text + value: admin + weight: 20 + packages_url: + description: URL to NSX specific packages + label: URL to NSX bits + regex: + error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g. + http://10.20.0.2/nsx) + source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$ + type: text + value: '' + weight: 70 + replication_mode: + description: '' + label: NSX cluster has Service nodes + type: checkbox + value: true + weight: 90 + transport_zone_uuid: + description: UUID of the pre-existing default NSX Transport zone + label: Transport zone UUID + regex: + error: Invalid transport zone UUID + source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}' + type: text + value: '' + weight: 40 + provision: + metadata: + label: Provision + restrictions: + - action: hide + condition: not ('experimental' in version:feature_groups) + weight: 80 + method: + description: Which provision method to use for this cluster. + label: Provision method + type: radio + value: cobbler + values: + - data: image + description: Copying pre-built images on a disk. + label: Image + - data: cobbler + description: Install from scratch using anaconda or debian-installer. + label: Classic (use anaconda or debian-installer) + public_network_assignment: + assign_to_all_nodes: + description: When disabled, public network will be assigned to controllers + and zabbix-server only + label: Assign public network to all nodes + type: checkbox + value: false + weight: 10 + metadata: + label: Public network assignment + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' + weight: 50 + storage: + ephemeral_ceph: + description: Configures Nova to store ephemeral volumes in RBD. This works + best if Ceph is enabled for volumes and images, too. Enables live migration + of all types of Ceph backed VMs (without this option, live migration will + only work with VMs launched from Cinder volumes). + label: Ceph RBD for ephemeral volumes (Nova) + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + type: checkbox + value: true + weight: 75 + images_ceph: + description: Configures Glance to use the Ceph RBD backend to store images. + If enabled, this option will prevent Swift from installing. + label: Ceph RBD for images (Glance) + type: checkbox + value: true + weight: 30 + images_vcenter: + description: Configures Glance to use the vCenter/ESXi backend to store images. + If enabled, this option will prevent Swift from installing. + label: VMWare vCenter/ESXi datastore for images (Glance) + restrictions: + - settings:common.libvirt_type.value != 'vcenter' + type: checkbox + value: false + weight: 35 + iser: + description: 'High performance block storage: Cinder volumes over iSER protocol + (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC, + and will use a dedicated virtual function for the storage network.' + label: iSER protocol for volumes (Cinder) + restrictions: + - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value + != 'kvm' + type: checkbox + value: false + weight: 11 + metadata: + label: Storage + weight: 60 + objects_ceph: + description: Configures RadosGW front end for Ceph RBD. This exposes S3 and + Swift API Interfaces. If enabled, this option will prevent Swift from installing. + label: Ceph RadosGW for objects (Swift API) + restrictions: + - settings:storage.images_ceph.value == false + type: checkbox + value: false + weight: 80 + osd_pool_size: + description: Configures the default number of object replicas in Ceph. This + number must be equal to or lower than the number of deployed 'Storage - + Ceph OSD' nodes. + label: Ceph object replication factor + regex: + error: Invalid number + source: ^[1-9]\d*$ + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + type: text + value: '2' + weight: 85 + vc_datacenter: + description: Inventory path to a datacenter. If you want to use ESXi host + as datastore, it should be "ha-datacenter". + label: Datacenter name + regex: + error: Empty datacenter + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 65 + vc_datastore: + description: Datastore associated with the datacenter. + label: Datastore name + regex: + error: Empty datastore + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 60 + vc_host: + description: IP Address of vCenter/ESXi + label: vCenter/ESXi IP + regex: + error: Specify valid IPv4 address + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$ + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 45 + vc_image_dir: + description: The name of the directory where the glance images will be stored + in the VMware datastore. + label: Datastore Images directory + regex: + error: Empty images directory + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: /openstack_glance + weight: 70 + vc_password: + description: vCenter/ESXi admin password + label: Password + regex: + error: Empty password + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: password + value: '' + weight: 55 + vc_user: + description: vCenter/ESXi admin username + label: Username + regex: + error: Empty username + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 50 + volumes_ceph: + description: Configures Cinder to store volumes in Ceph RBD images. + label: Ceph RBD for volumes (Cinder) + restrictions: + - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value + == 'vcenter' + type: checkbox + value: true + weight: 20 + volumes_lvm: + description: Requires at least one Storage - Cinder LVM node. + label: Cinder LVM over iSCSI for volumes + restrictions: + - settings:storage.volumes_ceph.value == true + type: checkbox + value: false + weight: 10 + volumes_vmdk: + description: Configures Cinder to store volumes via VMware vCenter. + label: VMware vCenter for volumes (Cinder) + restrictions: + - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value + == true + type: checkbox + value: false + weight: 15 + syslog: + metadata: + label: Syslog + weight: 50 + syslog_port: + description: Remote syslog port + label: Port + regex: + error: Invalid Syslog port + source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + type: text + value: '514' + weight: 20 + syslog_server: + description: Remote syslog hostname + label: Hostname + type: text + value: '' + weight: 10 + syslog_transport: + label: Syslog transport protocol + type: radio + value: tcp + values: + - data: udp + description: '' + label: UDP + - data: tcp + description: '' + label: TCP + weight: 30 + vcenter: + cluster: + description: vCenter cluster name. If you have multiple clusters, use comma + to separate names + label: Cluster + regex: + error: Invalid cluster list + source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$ + type: text + value: '' + weight: 40 + datastore_regex: + description: The Datastore regexp setting specifies the data stores to use + with Compute. For example, "nas.*". If you want to use all available datastores, + leave this field blank + label: Datastore regexp + regex: + error: Invalid datastore regexp + source: ^(\S.*\S|\S|)$ + type: text + value: '' + weight: 50 + host_ip: + description: IP Address of vCenter + label: vCenter IP + regex: + error: Specify valid IPv4 address + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$ + type: text + value: '' + weight: 10 + metadata: + label: vCenter + restrictions: + - action: hide + condition: settings:common.libvirt_type.value != 'vcenter' + weight: 20 + use_vcenter: + description: '' + label: '' + type: hidden + value: true + weight: 5 + vc_password: + description: vCenter admin password + label: Password + regex: + error: Empty password + source: \S + type: password + value: admin + weight: 30 + vc_user: + description: vCenter admin username + label: Username + regex: + error: Empty username + source: \S + type: text + value: admin + weight: 20 + vlan_interface: + description: Physical ESXi host ethernet adapter for VLAN networking (e.g. + vmnic1). If empty "vmnic0" is used by default + label: ESXi VLAN interface + restrictions: + - action: hide + condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager + != 'VlanManager' + type: text + value: '' + weight: 60 + zabbix: + metadata: + label: Zabbix Access + restrictions: + - action: hide + condition: not ('experimental' in version:feature_groups) + weight: 70 + password: + description: Password for Zabbix Administrator + label: password + type: password + value: zabbix + weight: 20 + username: + description: Username for Zabbix Administrator + label: username + type: text + value: admin + weight: 10 \ No newline at end of file diff --git a/fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dha.yaml b/fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dha.yaml new file mode 100644 index 0000000..5acd389 --- /dev/null +++ b/fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dha.yaml @@ -0,0 +1,49 @@ +title: Deployment Hardware Adapter (DHA) +# DHA API version supported +version: 1.1 +created: Fri May 8 08:03:49 UTC 2015 +comment: Config for LF Pod1 + +# Adapter to use for this definition +adapter: ipmi + +# Node list. +# Mandatory property is id, all other properties are adapter specific. + +nodes: +- id: 1 + pxeMac: 00:25:b5:b0:00:ef + ipmiIp: 172.30.8.69 + ipmiUser: admin + ipmiPass: octopus +- id: 2 + pxeMac: 00:25:b5:b0:00:cf + ipmiIp: 172.30.8.78 + ipmiUser: admin + ipmiPass: octopus +- id: 3 + pxeMac: 00:25:b5:b0:00:8f + ipmiIp: 172.30.8.68 + ipmiUser: admin + ipmiPass: octopus +- id: 4 + pxeMac: 00:25:b5:b0:00:6f + ipmiIp: 172.30.8.77 + ipmiUser: admin + ipmiPass: octopus +- id: 5 + pxeMac: 00:25:b5:b0:00:4f + ipmiIp: 172.30.8.67 + ipmiUser: admin + ipmiPass: octopus +# Adding the Fuel node as node id 6 which may not be correct - please +# adjust as needed. +- id: 6 + libvirtName: vFuel + libvirtTemplate: baremetal/vms/fuel_lf.xml + isFuel: yes + username: root + password: r00tme + +disks: + fuel: 30G \ No newline at end of file diff --git a/fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dea.yaml b/fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dea.yaml new file mode 100644 index 0000000..2387443 --- /dev/null +++ b/fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dea.yaml @@ -0,0 +1,950 @@ +title: Deployment Environment Adapter (DEA) +# DEA API version supported +version: 1.1 +created: Tue May 5 15:33:07 UTC 2015 +comment: Test environment Ericsson Montreal +environment_name: opnfv +environment_mode: multinode +wanted_release: Juno on Ubuntu 12.04.4 +nodes: +- id: 1 + interfaces: interfaces_1 + transformations: transformations_1 + role: ceph-osd,controller +- id: 2 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +- id: 3 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +- id: 4 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +- id: 5 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +fuel: + ADMIN_NETWORK: + ipaddress: 10.20.0.2 + netmask: 255.255.0.0 + dhcp_pool_start: 10.20.0.3 + dhcp_pool_end: 10.20.0.254 + DNS_UPSTREAM: 8.8.8.8 + DNS_DOMAIN: domain.tld + DNS_SEARCH: domain.tld + FUEL_ACCESS: + user: admin + password: admin + HOSTNAME: opnfv + NTP1: 0.pool.ntp.org + NTP2: 1.pool.ntp.org + NTP3: 2.pool.ntp.org +interfaces: + interfaces_1: + eth0: + - public + eth1: + - fuelweb_admin + - management + - storage + - private +transformations: + transformations_1: + - action: add-br + name: br-eth0 + - action: add-port + bridge: br-eth0 + name: eth0 + - action: add-br + name: br-eth1 + - action: add-port + bridge: br-eth1 + name: eth1 + - action: add-br + name: br-ex + - action: add-br + name: br-mgmt + - action: add-br + name: br-storage + - action: add-br + name: br-fw-admin + - action: add-patch + bridges: + - br-eth1 + - br-storage + tags: + - 301 + - 0 + vlan_ids: + - 301 + - 0 + - action: add-patch + bridges: + - br-eth1 + - br-mgmt + tags: + - 300 + - 0 + vlan_ids: + - 300 + - 0 + - action: add-patch + bridges: + - br-eth1 + - br-fw-admin + trunks: + - 0 + - action: add-patch + bridges: + - br-eth0 + - br-ex + trunks: + - 0 + - action: add-br + name: br-prv + - action: add-patch + bridges: + - br-eth1 + - br-prv + transformations_2: + - action: add-br + name: br-eth0 + - action: add-port + bridge: br-eth0 + name: eth0 + - action: add-br + name: br-eth1 + - action: add-port + bridge: br-eth1 + name: eth1 + - action: add-br + name: br-mgmt + - action: add-br + name: br-storage + - action: add-br + name: br-fw-admin + - action: add-patch + bridges: + - br-eth1 + - br-storage + tags: + - 301 + - 0 + vlan_ids: + - 301 + - 0 + - action: add-patch + bridges: + - br-eth1 + - br-mgmt + tags: + - 300 + - 0 + vlan_ids: + - 300 + - 0 + - action: add-patch + bridges: + - br-eth1 + - br-fw-admin + trunks: + - 0 + - action: add-br + name: br-prv + - action: add-patch + bridges: + - br-eth1 + - br-prv +opnfv: + compute: {} + controller: {} +network: + networking_parameters: + base_mac: fa:16:3e:00:00:00 + dns_nameservers: + - 8.8.4.4 + - 8.8.8.8 + floating_ranges: + - - 172.30.9.80 + - 172.30.9.89 + gre_id_range: + - 2 + - 65535 + internal_cidr: 192.168.111.0/24 + internal_gateway: 192.168.111.1 + net_l23_provider: ovs + segmentation_type: vlan + vlan_range: + - 1000 + - 1010 + networks: + - cidr: 172.30.9.0/24 + gateway: 172.30.9.1 + ip_ranges: + - - 172.30.9.70 + - 172.30.9.70 + meta: + assign_vip: true + cidr: 172.16.0.0/24 + configurable: true + floating_range_var: floating_ranges + ip_range: + - 172.16.0.2 + - 172.16.0.126 + map_priority: 1 + name: public + notation: ip_ranges + render_addr_mask: public + render_type: null + use_gateway: true + vlan_start: null + name: public + vlan_start: null + - cidr: 192.168.0.0/24 + gateway: null + ip_ranges: + - - 192.168.0.2 + - 192.168.0.254 + meta: + assign_vip: true + cidr: 192.168.0.0/24 + configurable: true + map_priority: 2 + name: management + notation: cidr + render_addr_mask: internal + render_type: cidr + use_gateway: false + vlan_start: 101 + name: management + vlan_start: 300 + - cidr: 192.168.1.0/24 + gateway: null + ip_ranges: + - - 192.168.1.2 + - 192.168.1.254 + meta: + assign_vip: false + cidr: 192.168.1.0/24 + configurable: true + map_priority: 2 + name: storage + notation: cidr + render_addr_mask: storage + render_type: cidr + use_gateway: false + vlan_start: 102 + name: storage + vlan_start: 301 + - cidr: null + gateway: null + ip_ranges: [] + meta: + assign_vip: false + configurable: false + map_priority: 2 + name: private + neutron_vlan_range: true + notation: null + render_addr_mask: null + render_type: null + seg_type: vlan + use_gateway: false + vlan_start: null + name: private + vlan_start: null + - cidr: 10.20.0.0/16 + gateway: null + ip_ranges: + - - 10.20.0.3 + - 10.20.255.254 + meta: + assign_vip: false + configurable: false + map_priority: 0 + notation: ip_ranges + render_addr_mask: null + render_type: null + unmovable: true + use_gateway: true + name: fuelweb_admin + vlan_start: null +settings: + editable: + access: + email: + description: Email address for Administrator + label: email + type: text + value: admin@localhost + weight: 40 + metadata: + label: Access + weight: 10 + password: + description: Password for Administrator + label: password + type: password + value: admin + weight: 20 + tenant: + description: Tenant (project) name for Administrator + label: tenant + regex: + error: Invalid tenant name + source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).* + type: text + value: admin + weight: 30 + user: + description: Username for Administrator + label: username + regex: + error: Invalid username + source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).* + type: text + value: admin + weight: 10 + additional_components: + ceilometer: + description: If selected, Ceilometer component will be installed + label: Install Ceilometer + type: checkbox + value: false + weight: 40 + heat: + description: '' + label: '' + type: hidden + value: true + weight: 30 + metadata: + label: Additional Components + weight: 20 + murano: + description: If selected, Murano component will be installed + label: Install Murano + restrictions: + - cluster:net_provider != 'neutron' + type: checkbox + value: false + weight: 20 + sahara: + description: If selected, Sahara component will be installed + label: Install Sahara + type: checkbox + value: false + weight: 10 + common: + auth_key: + description: Public key(s) to include in authorized_keys on deployed nodes + label: Public Key + type: text + value: '' + weight: 70 + auto_assign_floating_ip: + description: If selected, OpenStack will automatically assign a floating IP + to a new instance + label: Auto assign floating IP + restrictions: + - cluster:net_provider == 'neutron' + type: checkbox + value: false + weight: 40 + compute_scheduler_driver: + label: Scheduler driver + type: radio + value: nova.scheduler.filter_scheduler.FilterScheduler + values: + - data: nova.scheduler.filter_scheduler.FilterScheduler + description: Currently the most advanced OpenStack scheduler. See the OpenStack + documentation for details. + label: Filter scheduler + - data: nova.scheduler.simple.SimpleScheduler + description: This is 'naive' scheduler which tries to find the least loaded + host + label: Simple scheduler + weight: 40 + debug: + description: Debug logging mode provides more information, but requires more + disk space. + label: OpenStack debug logging + type: checkbox + value: false + weight: 20 + disable_offload: + description: If set, generic segmentation offload (gso) and generic receive + offload (gro) on physical nics will be disabled. See ethtool man. + label: Disable generic offload on physical nics + restrictions: + - action: hide + condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type + == 'gre' + type: checkbox + value: true + weight: 80 + libvirt_type: + label: Hypervisor type + type: radio + value: kvm + values: + - data: kvm + description: Choose this type of hypervisor if you run OpenStack on hardware + label: KVM + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + - data: qemu + description: Choose this type of hypervisor if you run OpenStack on virtual + hosts. + label: QEMU + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + - data: vcenter + description: Choose this type of hypervisor if you run OpenStack in a vCenter + environment. + label: vCenter + restrictions: + - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider + == 'neutron' + weight: 30 + metadata: + label: Common + weight: 30 + nova_quota: + description: Quotas are used to limit CPU and memory usage for tenants. Enabling + quotas will increase load on the Nova database. + label: Nova quotas + type: checkbox + value: false + weight: 25 + resume_guests_state_on_host_boot: + description: Whether to resume previous guests state when the host reboots. + If enabled, this option causes guests assigned to the host to resume their + previous state. If the guest was running a restart will be attempted when + nova-compute starts. If the guest was not running previously, a restart + will not be attempted. + label: Resume guests state on host boot + type: checkbox + value: true + weight: 60 + use_cow_images: + description: For most cases you will want qcow format. If it's disabled, raw + image format will be used to run VMs. OpenStack with raw format currently + does not support snapshotting. + label: Use qcow format for images + type: checkbox + value: true + weight: 50 + corosync: + group: + description: '' + label: Group + type: text + value: 226.94.1.1 + weight: 10 + metadata: + label: Corosync + restrictions: + - action: hide + condition: 'true' + weight: 50 + port: + description: '' + label: Port + type: text + value: '12000' + weight: 20 + verified: + description: Set True only if multicast is configured correctly on router. + label: Need to pass network verification. + type: checkbox + value: false + weight: 10 + external_dns: + dns_list: + description: List of upstream DNS servers, separated by comma + label: DNS list + type: text + value: 8.8.8.8, 8.8.4.4 + weight: 10 + metadata: + label: Upstream DNS + weight: 90 + external_ntp: + metadata: + label: Upstream NTP + weight: 100 + ntp_list: + description: List of upstream NTP servers, separated by comma + label: NTP servers list + type: text + value: 0.pool.ntp.org, 1.pool.ntp.org + weight: 10 + kernel_params: + kernel: + description: Default kernel parameters + label: Initial parameters + type: text + value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset + weight: 45 + metadata: + label: Kernel parameters + weight: 40 + neutron_mellanox: + metadata: + enabled: true + label: Mellanox Neutron components + toggleable: false + weight: 50 + plugin: + label: Mellanox drivers and SR-IOV plugin + type: radio + value: disabled + values: + - data: disabled + description: If selected, Mellanox drivers, Neutron and Cinder plugin will + not be installed. + label: Mellanox drivers and plugins disabled + restrictions: + - settings:storage.iser.value == true + - data: drivers_only + description: If selected, Mellanox Ethernet drivers will be installed to + support networking over Mellanox NIC. Mellanox Neutron plugin will not + be installed. + label: Install only Mellanox drivers + restrictions: + - settings:common.libvirt_type.value != 'kvm' + - data: ethernet + description: If selected, both Mellanox Ethernet drivers and Mellanox network + acceleration (Neutron) plugin will be installed. + label: Install Mellanox drivers and SR-IOV plugin + restrictions: + - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider + == 'neutron' and networking_parameters:segmentation_type == 'vlan') + weight: 60 + vf_num: + description: Note that one virtual function will be reserved to the storage + network, in case of choosing iSER. + label: Number of virtual NICs + restrictions: + - settings:neutron_mellanox.plugin.value != 'ethernet' + type: text + value: '16' + weight: 70 + nsx_plugin: + connector_type: + description: Default network transport type to use + label: NSX connector type + type: select + value: stt + values: + - data: gre + label: GRE + - data: ipsec_gre + label: GRE over IPSec + - data: stt + label: STT + - data: ipsec_stt + label: STT over IPSec + - data: bridge + label: Bridge + weight: 80 + l3_gw_service_uuid: + description: UUID for the default L3 gateway service to use with this cluster + label: L3 service UUID + regex: + error: Invalid L3 gateway service UUID + source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}' + type: text + value: '' + weight: 50 + metadata: + enabled: false + label: VMware NSX + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider + != 'nsx' + weight: 20 + nsx_controllers: + description: One or more IPv4[:port] addresses of NSX controller node, separated + by comma (e.g. 10.30.30.2,192.168.110.254:443) + label: NSX controller endpoint + regex: + error: Invalid controller endpoints, specify valid IPv4[:port] pair + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$ + type: text + value: '' + weight: 60 + nsx_password: + description: Password for Administrator + label: NSX password + regex: + error: Empty password + source: \S + type: password + value: '' + weight: 30 + nsx_username: + description: NSX administrator's username + label: NSX username + regex: + error: Empty username + source: \S + type: text + value: admin + weight: 20 + packages_url: + description: URL to NSX specific packages + label: URL to NSX bits + regex: + error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g. + http://10.20.0.2/nsx) + source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$ + type: text + value: '' + weight: 70 + replication_mode: + description: '' + label: NSX cluster has Service nodes + type: checkbox + value: true + weight: 90 + transport_zone_uuid: + description: UUID of the pre-existing default NSX Transport zone + label: Transport zone UUID + regex: + error: Invalid transport zone UUID + source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}' + type: text + value: '' + weight: 40 + provision: + metadata: + label: Provision + restrictions: + - action: hide + condition: not ('experimental' in version:feature_groups) + weight: 80 + method: + description: Which provision method to use for this cluster. + label: Provision method + type: radio + value: cobbler + values: + - data: image + description: Copying pre-built images on a disk. + label: Image + - data: cobbler + description: Install from scratch using anaconda or debian-installer. + label: Classic (use anaconda or debian-installer) + public_network_assignment: + assign_to_all_nodes: + description: When disabled, public network will be assigned to controllers + and zabbix-server only + label: Assign public network to all nodes + type: checkbox + value: false + weight: 10 + metadata: + label: Public network assignment + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' + weight: 50 + storage: + ephemeral_ceph: + description: Configures Nova to store ephemeral volumes in RBD. This works + best if Ceph is enabled for volumes and images, too. Enables live migration + of all types of Ceph backed VMs (without this option, live migration will + only work with VMs launched from Cinder volumes). + label: Ceph RBD for ephemeral volumes (Nova) + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + type: checkbox + value: true + weight: 75 + images_ceph: + description: Configures Glance to use the Ceph RBD backend to store images. + If enabled, this option will prevent Swift from installing. + label: Ceph RBD for images (Glance) + type: checkbox + value: true + weight: 30 + images_vcenter: + description: Configures Glance to use the vCenter/ESXi backend to store images. + If enabled, this option will prevent Swift from installing. + label: VMWare vCenter/ESXi datastore for images (Glance) + restrictions: + - settings:common.libvirt_type.value != 'vcenter' + type: checkbox + value: false + weight: 35 + iser: + description: 'High performance block storage: Cinder volumes over iSER protocol + (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC, + and will use a dedicated virtual function for the storage network.' + label: iSER protocol for volumes (Cinder) + restrictions: + - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value + != 'kvm' + type: checkbox + value: false + weight: 11 + metadata: + label: Storage + weight: 60 + objects_ceph: + description: Configures RadosGW front end for Ceph RBD. This exposes S3 and + Swift API Interfaces. If enabled, this option will prevent Swift from installing. + label: Ceph RadosGW for objects (Swift API) + restrictions: + - settings:storage.images_ceph.value == false + type: checkbox + value: false + weight: 80 + osd_pool_size: + description: Configures the default number of object replicas in Ceph. This + number must be equal to or lower than the number of deployed 'Storage - + Ceph OSD' nodes. + label: Ceph object replication factor + regex: + error: Invalid number + source: ^[1-9]\d*$ + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + type: text + value: '2' + weight: 85 + vc_datacenter: + description: Inventory path to a datacenter. If you want to use ESXi host + as datastore, it should be "ha-datacenter". + label: Datacenter name + regex: + error: Empty datacenter + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 65 + vc_datastore: + description: Datastore associated with the datacenter. + label: Datastore name + regex: + error: Empty datastore + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 60 + vc_host: + description: IP Address of vCenter/ESXi + label: vCenter/ESXi IP + regex: + error: Specify valid IPv4 address + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$ + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 45 + vc_image_dir: + description: The name of the directory where the glance images will be stored + in the VMware datastore. + label: Datastore Images directory + regex: + error: Empty images directory + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: /openstack_glance + weight: 70 + vc_password: + description: vCenter/ESXi admin password + label: Password + regex: + error: Empty password + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: password + value: '' + weight: 55 + vc_user: + description: vCenter/ESXi admin username + label: Username + regex: + error: Empty username + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 50 + volumes_ceph: + description: Configures Cinder to store volumes in Ceph RBD images. + label: Ceph RBD for volumes (Cinder) + restrictions: + - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value + == 'vcenter' + type: checkbox + value: true + weight: 20 + volumes_lvm: + description: Requires at least one Storage - Cinder LVM node. + label: Cinder LVM over iSCSI for volumes + restrictions: + - settings:storage.volumes_ceph.value == true + type: checkbox + value: false + weight: 10 + volumes_vmdk: + description: Configures Cinder to store volumes via VMware vCenter. + label: VMware vCenter for volumes (Cinder) + restrictions: + - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value + == true + type: checkbox + value: false + weight: 15 + syslog: + metadata: + label: Syslog + weight: 50 + syslog_port: + description: Remote syslog port + label: Port + regex: + error: Invalid Syslog port + source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + type: text + value: '514' + weight: 20 + syslog_server: + description: Remote syslog hostname + label: Hostname + type: text + value: '' + weight: 10 + syslog_transport: + label: Syslog transport protocol + type: radio + value: tcp + values: + - data: udp + description: '' + label: UDP + - data: tcp + description: '' + label: TCP + weight: 30 + vcenter: + cluster: + description: vCenter cluster name. If you have multiple clusters, use comma + to separate names + label: Cluster + regex: + error: Invalid cluster list + source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$ + type: text + value: '' + weight: 40 + datastore_regex: + description: The Datastore regexp setting specifies the data stores to use + with Compute. For example, "nas.*". If you want to use all available datastores, + leave this field blank + label: Datastore regexp + regex: + error: Invalid datastore regexp + source: ^(\S.*\S|\S|)$ + type: text + value: '' + weight: 50 + host_ip: + description: IP Address of vCenter + label: vCenter IP + regex: + error: Specify valid IPv4 address + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$ + type: text + value: '' + weight: 10 + metadata: + label: vCenter + restrictions: + - action: hide + condition: settings:common.libvirt_type.value != 'vcenter' + weight: 20 + use_vcenter: + description: '' + label: '' + type: hidden + value: true + weight: 5 + vc_password: + description: vCenter admin password + label: Password + regex: + error: Empty password + source: \S + type: password + value: admin + weight: 30 + vc_user: + description: vCenter admin username + label: Username + regex: + error: Empty username + source: \S + type: text + value: admin + weight: 20 + vlan_interface: + description: Physical ESXi host ethernet adapter for VLAN networking (e.g. + vmnic1). If empty "vmnic0" is used by default + label: ESXi VLAN interface + restrictions: + - action: hide + condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager + != 'VlanManager' + type: text + value: '' + weight: 60 + zabbix: + metadata: + label: Zabbix Access + restrictions: + - action: hide + condition: not ('experimental' in version:feature_groups) + weight: 70 + password: + description: Password for Zabbix Administrator + label: password + type: password + value: zabbix + weight: 20 + username: + description: Username for Zabbix Administrator + label: username + type: text + value: admin + weight: 10 \ No newline at end of file diff --git a/fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dha.yaml b/fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dha.yaml new file mode 100644 index 0000000..5acd389 --- /dev/null +++ b/fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dha.yaml @@ -0,0 +1,49 @@ +title: Deployment Hardware Adapter (DHA) +# DHA API version supported +version: 1.1 +created: Fri May 8 08:03:49 UTC 2015 +comment: Config for LF Pod1 + +# Adapter to use for this definition +adapter: ipmi + +# Node list. +# Mandatory property is id, all other properties are adapter specific. + +nodes: +- id: 1 + pxeMac: 00:25:b5:b0:00:ef + ipmiIp: 172.30.8.69 + ipmiUser: admin + ipmiPass: octopus +- id: 2 + pxeMac: 00:25:b5:b0:00:cf + ipmiIp: 172.30.8.78 + ipmiUser: admin + ipmiPass: octopus +- id: 3 + pxeMac: 00:25:b5:b0:00:8f + ipmiIp: 172.30.8.68 + ipmiUser: admin + ipmiPass: octopus +- id: 4 + pxeMac: 00:25:b5:b0:00:6f + ipmiIp: 172.30.8.77 + ipmiUser: admin + ipmiPass: octopus +- id: 5 + pxeMac: 00:25:b5:b0:00:4f + ipmiIp: 172.30.8.67 + ipmiUser: admin + ipmiPass: octopus +# Adding the Fuel node as node id 6 which may not be correct - please +# adjust as needed. +- id: 6 + libvirtName: vFuel + libvirtTemplate: baremetal/vms/fuel_lf.xml + isFuel: yes + username: root + password: r00tme + +disks: + fuel: 30G \ No newline at end of file diff --git a/fuel/deploy/baremetal/dea.yaml b/fuel/deploy/baremetal/dea.yaml deleted file mode 100644 index eb3019c..0000000 --- a/fuel/deploy/baremetal/dea.yaml +++ /dev/null @@ -1,982 +0,0 @@ -title: Deployment Environment Adapter (DEA) -# DEA API version supported -version: 1.1 -created: Tue May 5 15:33:07 UTC 2015 -comment: Test environment Ericsson Montreal -environment_name: opnfv -environment_mode: multinode -wanted_release: Juno on Ubuntu 12.04.4 -nodes: -- id: 1 - interfaces: interface1 - transformations: controller1 - role: controller -- id: 2 - interfaces: interface1 - transformations: compute1 - role: compute -fuel: - ADMIN_NETWORK: - ipaddress: 10.40.0.2 - netmask: 255.255.255.0 - dhcp_pool_start: 10.40.0.3 - dhcp_pool_end: 10.40.0.254 - DNS_UPSTREAM: 10.118.32.193 - DNS_DOMAIN: opnfvericsson.ca - DNS_SEARCH: opnfvericsson.ca - FUEL_ACCESS: - user: admin - password: admin - HOSTNAME: opnfv - NTP1: 0.ca.pool.ntp.org - NTP2: 1.ca.pool.ntp.org - NTP3: 2.ca.pool.ntp.org -interfaces: - interface1: - eth0: - - fuelweb_admin - eth2: - - public - - management - - storage - - private -transformations: - controller1: - - action: add-br - name: br-eth0 - - action: add-port - bridge: br-eth0 - name: eth0 - - action: add-br - name: br-eth1 - - action: add-port - bridge: br-eth1 - name: eth1 - - action: add-br - name: br-eth2 - - action: add-port - bridge: br-eth2 - name: eth2 - - action: add-br - name: br-eth3 - - action: add-port - bridge: br-eth3 - name: eth3 - - action: add-br - name: br-eth4 - - action: add-port - bridge: br-eth4 - name: eth4 - - action: add-br - name: br-eth5 - - action: add-port - bridge: br-eth5 - name: eth5 - - action: add-br - name: br-ex - - action: add-br - name: br-mgmt - - action: add-br - name: br-storage - - action: add-br - name: br-fw-admin - - action: add-patch - bridges: - - br-eth2 - - br-storage - tags: - - 220 - - 0 - vlan_ids: - - 220 - - 0 - - action: add-patch - bridges: - - br-eth2 - - br-mgmt - tags: - - 320 - - 0 - vlan_ids: - - 320 - - 0 - - action: add-patch - bridges: - - br-eth0 - - br-fw-admin - trunks: - - 0 - - action: add-patch - bridges: - - br-eth2 - - br-ex - tags: - - 120 - - 0 - vlan_ids: - - 120 - - 0 - - action: add-br - name: br-prv - - action: add-patch - bridges: - - br-eth2 - - br-prv - compute1: - - action: add-br - name: br-eth0 - - action: add-port - bridge: br-eth0 - name: eth0 - - action: add-br - name: br-eth1 - - action: add-port - bridge: br-eth1 - name: eth1 - - action: add-br - name: br-eth2 - - action: add-port - bridge: br-eth2 - name: eth2 - - action: add-br - name: br-eth3 - - action: add-port - bridge: br-eth3 - name: eth3 - - action: add-br - name: br-eth4 - - action: add-port - bridge: br-eth4 - name: eth4 - - action: add-br - name: br-eth5 - - action: add-port - bridge: br-eth5 - name: eth5 - - action: add-br - name: br-mgmt - - action: add-br - name: br-storage - - action: add-br - name: br-fw-admin - - action: add-patch - bridges: - - br-eth2 - - br-storage - tags: - - 220 - - 0 - vlan_ids: - - 220 - - 0 - - action: add-patch - bridges: - - br-eth2 - - br-mgmt - tags: - - 320 - - 0 - vlan_ids: - - 320 - - 0 - - action: add-patch - bridges: - - br-eth0 - - br-fw-admin - trunks: - - 0 - - action: add-br - name: br-prv - - action: add-patch - bridges: - - br-eth2 - - br-prv -opnfv: - compute: {} - controller: {} -network: - networking_parameters: - base_mac: fa:16:3e:00:00:00 - dns_nameservers: - - 10.118.32.193 - - 8.8.8.8 - floating_ranges: - - - 172.16.0.130 - - 172.16.0.254 - gre_id_range: - - 2 - - 65535 - internal_cidr: 192.168.111.0/24 - internal_gateway: 192.168.111.1 - net_l23_provider: ovs - segmentation_type: vlan - vlan_range: - - 2022 - - 2023 - networks: - - cidr: 172.16.0.0/24 - gateway: 172.16.0.1 - ip_ranges: - - - 172.16.0.2 - - 172.16.0.126 - meta: - assign_vip: true - cidr: 172.16.0.0/24 - configurable: true - floating_range_var: floating_ranges - ip_range: - - 172.16.0.2 - - 172.16.0.126 - map_priority: 1 - name: public - notation: ip_ranges - render_addr_mask: public - render_type: null - use_gateway: true - vlan_start: null - name: public - vlan_start: 120 - - cidr: 192.168.0.0/24 - gateway: null - ip_ranges: - - - 192.168.0.2 - - 192.168.0.254 - meta: - assign_vip: true - cidr: 192.168.0.0/24 - configurable: true - map_priority: 2 - name: management - notation: cidr - render_addr_mask: internal - render_type: cidr - use_gateway: false - vlan_start: 101 - name: management - vlan_start: 320 - - cidr: 192.168.1.0/24 - gateway: null - ip_ranges: - - - 192.168.1.2 - - 192.168.1.254 - meta: - assign_vip: false - cidr: 192.168.1.0/24 - configurable: true - map_priority: 2 - name: storage - notation: cidr - render_addr_mask: storage - render_type: cidr - use_gateway: false - vlan_start: 102 - name: storage - vlan_start: 220 - - cidr: null - gateway: null - ip_ranges: [] - meta: - assign_vip: false - configurable: false - map_priority: 2 - name: private - neutron_vlan_range: true - notation: null - render_addr_mask: null - render_type: null - seg_type: vlan - use_gateway: false - vlan_start: null - name: private - vlan_start: null - - cidr: 10.40.0.0/24 - gateway: null - ip_ranges: - - - 10.40.0.3 - - 10.40.0.254 - meta: - assign_vip: false - configurable: false - map_priority: 0 - notation: ip_ranges - render_addr_mask: null - render_type: null - unmovable: true - use_gateway: true - name: fuelweb_admin - vlan_start: null -settings: - editable: - access: - email: - description: Email address for Administrator - label: email - type: text - value: admin@localhost - weight: 40 - metadata: - label: Access - weight: 10 - password: - description: Password for Administrator - label: password - type: password - value: admin - weight: 20 - tenant: - description: Tenant (project) name for Administrator - label: tenant - regex: - error: Invalid tenant name - source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).* - type: text - value: admin - weight: 30 - user: - description: Username for Administrator - label: username - regex: - error: Invalid username - source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).* - type: text - value: admin - weight: 10 - additional_components: - ceilometer: - description: If selected, Ceilometer component will be installed - label: Install Ceilometer - type: checkbox - value: false - weight: 40 - heat: - description: '' - label: '' - type: hidden - value: true - weight: 30 - metadata: - label: Additional Components - weight: 20 - murano: - description: If selected, Murano component will be installed - label: Install Murano - restrictions: - - cluster:net_provider != 'neutron' - type: checkbox - value: false - weight: 20 - sahara: - description: If selected, Sahara component will be installed - label: Install Sahara - type: checkbox - value: false - weight: 10 - common: - auth_key: - description: Public key(s) to include in authorized_keys on deployed nodes - label: Public Key - type: text - value: '' - weight: 70 - auto_assign_floating_ip: - description: If selected, OpenStack will automatically assign a floating IP - to a new instance - label: Auto assign floating IP - restrictions: - - cluster:net_provider == 'neutron' - type: checkbox - value: false - weight: 40 - compute_scheduler_driver: - label: Scheduler driver - type: radio - value: nova.scheduler.filter_scheduler.FilterScheduler - values: - - data: nova.scheduler.filter_scheduler.FilterScheduler - description: Currently the most advanced OpenStack scheduler. See the OpenStack - documentation for details. - label: Filter scheduler - - data: nova.scheduler.simple.SimpleScheduler - description: This is 'naive' scheduler which tries to find the least loaded - host - label: Simple scheduler - weight: 40 - debug: - description: Debug logging mode provides more information, but requires more - disk space. - label: OpenStack debug logging - type: checkbox - value: false - weight: 20 - disable_offload: - description: If set, generic segmentation offload (gso) and generic receive - offload (gro) on physical nics will be disabled. See ethtool man. - label: Disable generic offload on physical nics - restrictions: - - action: hide - condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type - == 'gre' - type: checkbox - value: true - weight: 80 - libvirt_type: - label: Hypervisor type - type: radio - value: kvm - values: - - data: kvm - description: Choose this type of hypervisor if you run OpenStack on hardware - label: KVM - restrictions: - - settings:common.libvirt_type.value == 'vcenter' - - data: qemu - description: Choose this type of hypervisor if you run OpenStack on virtual - hosts. - label: QEMU - restrictions: - - settings:common.libvirt_type.value == 'vcenter' - - data: vcenter - description: Choose this type of hypervisor if you run OpenStack in a vCenter - environment. - label: vCenter - restrictions: - - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider - == 'neutron' - weight: 30 - metadata: - label: Common - weight: 30 - nova_quota: - description: Quotas are used to limit CPU and memory usage for tenants. Enabling - quotas will increase load on the Nova database. - label: Nova quotas - type: checkbox - value: false - weight: 25 - resume_guests_state_on_host_boot: - description: Whether to resume previous guests state when the host reboots. - If enabled, this option causes guests assigned to the host to resume their - previous state. If the guest was running a restart will be attempted when - nova-compute starts. If the guest was not running previously, a restart - will not be attempted. - label: Resume guests state on host boot - type: checkbox - value: true - weight: 60 - use_cow_images: - description: For most cases you will want qcow format. If it's disabled, raw - image format will be used to run VMs. OpenStack with raw format currently - does not support snapshotting. - label: Use qcow format for images - type: checkbox - value: true - weight: 50 - corosync: - group: - description: '' - label: Group - type: text - value: 226.94.1.1 - weight: 10 - metadata: - label: Corosync - restrictions: - - action: hide - condition: 'true' - weight: 50 - port: - description: '' - label: Port - type: text - value: '12000' - weight: 20 - verified: - description: Set True only if multicast is configured correctly on router. - label: Need to pass network verification. - type: checkbox - value: false - weight: 10 - external_dns: - dns_list: - description: List of upstream DNS servers, separated by comma - label: DNS list - type: text - value: 10.118.32.193, 8.8.8.8 - weight: 10 - metadata: - label: Upstream DNS - weight: 90 - external_ntp: - metadata: - label: Upstream NTP - weight: 100 - ntp_list: - description: List of upstream NTP servers, separated by comma - label: NTP servers list - type: text - value: 0.pool.ntp.org, 1.pool.ntp.org - weight: 10 - kernel_params: - kernel: - description: Default kernel parameters - label: Initial parameters - type: text - value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset - weight: 45 - metadata: - label: Kernel parameters - weight: 40 - neutron_mellanox: - metadata: - enabled: true - label: Mellanox Neutron components - toggleable: false - weight: 50 - plugin: - label: Mellanox drivers and SR-IOV plugin - type: radio - value: disabled - values: - - data: disabled - description: If selected, Mellanox drivers, Neutron and Cinder plugin will - not be installed. - label: Mellanox drivers and plugins disabled - restrictions: - - settings:storage.iser.value == true - - data: drivers_only - description: If selected, Mellanox Ethernet drivers will be installed to - support networking over Mellanox NIC. Mellanox Neutron plugin will not - be installed. - label: Install only Mellanox drivers - restrictions: - - settings:common.libvirt_type.value != 'kvm' - - data: ethernet - description: If selected, both Mellanox Ethernet drivers and Mellanox network - acceleration (Neutron) plugin will be installed. - label: Install Mellanox drivers and SR-IOV plugin - restrictions: - - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider - == 'neutron' and networking_parameters:segmentation_type == 'vlan') - weight: 60 - vf_num: - description: Note that one virtual function will be reserved to the storage - network, in case of choosing iSER. - label: Number of virtual NICs - restrictions: - - settings:neutron_mellanox.plugin.value != 'ethernet' - type: text - value: '16' - weight: 70 - nsx_plugin: - connector_type: - description: Default network transport type to use - label: NSX connector type - type: select - value: stt - values: - - data: gre - label: GRE - - data: ipsec_gre - label: GRE over IPSec - - data: stt - label: STT - - data: ipsec_stt - label: STT over IPSec - - data: bridge - label: Bridge - weight: 80 - l3_gw_service_uuid: - description: UUID for the default L3 gateway service to use with this cluster - label: L3 service UUID - regex: - error: Invalid L3 gateway service UUID - source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}' - type: text - value: '' - weight: 50 - metadata: - enabled: false - label: VMware NSX - restrictions: - - action: hide - condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider - != 'nsx' - weight: 20 - nsx_controllers: - description: One or more IPv4[:port] addresses of NSX controller node, separated - by comma (e.g. 10.40.30.2,192.168.110.254:443) - label: NSX controller endpoint - regex: - error: Invalid controller endpoints, specify valid IPv4[:port] pair - source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$ - type: text - value: '' - weight: 60 - nsx_password: - description: Password for Administrator - label: NSX password - regex: - error: Empty password - source: \S - type: password - value: '' - weight: 30 - nsx_username: - description: NSX administrator's username - label: NSX username - regex: - error: Empty username - source: \S - type: text - value: admin - weight: 20 - packages_url: - description: URL to NSX specific packages - label: URL to NSX bits - regex: - error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g. - http://10.20.0.2/nsx) - source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$ - type: text - value: '' - weight: 70 - replication_mode: - description: '' - label: NSX cluster has Service nodes - type: checkbox - value: true - weight: 90 - transport_zone_uuid: - description: UUID of the pre-existing default NSX Transport zone - label: Transport zone UUID - regex: - error: Invalid transport zone UUID - source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}' - type: text - value: '' - weight: 40 - provision: - metadata: - label: Provision - restrictions: - - action: hide - condition: not ('experimental' in version:feature_groups) - weight: 80 - method: - description: Which provision method to use for this cluster. - label: Provision method - type: radio - value: cobbler - values: - - data: image - description: Copying pre-built images on a disk. - label: Image - - data: cobbler - description: Install from scratch using anaconda or debian-installer. - label: Classic (use anaconda or debian-installer) - public_network_assignment: - assign_to_all_nodes: - description: When disabled, public network will be assigned to controllers - and zabbix-server only - label: Assign public network to all nodes - type: checkbox - value: false - weight: 10 - metadata: - label: Public network assignment - restrictions: - - action: hide - condition: cluster:net_provider != 'neutron' - weight: 50 - storage: - ephemeral_ceph: - description: Configures Nova to store ephemeral volumes in RBD. This works - best if Ceph is enabled for volumes and images, too. Enables live migration - of all types of Ceph backed VMs (without this option, live migration will - only work with VMs launched from Cinder volumes). - label: Ceph RBD for ephemeral volumes (Nova) - restrictions: - - settings:common.libvirt_type.value == 'vcenter' - type: checkbox - value: false - weight: 75 - images_ceph: - description: Configures Glance to use the Ceph RBD backend to store images. - If enabled, this option will prevent Swift from installing. - label: Ceph RBD for images (Glance) - type: checkbox - value: false - weight: 30 - images_vcenter: - description: Configures Glance to use the vCenter/ESXi backend to store images. - If enabled, this option will prevent Swift from installing. - label: VMWare vCenter/ESXi datastore for images (Glance) - restrictions: - - settings:common.libvirt_type.value != 'vcenter' - type: checkbox - value: false - weight: 35 - iser: - description: 'High performance block storage: Cinder volumes over iSER protocol - (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC, - and will use a dedicated virtual function for the storage network.' - label: iSER protocol for volumes (Cinder) - restrictions: - - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value - != 'kvm' - type: checkbox - value: false - weight: 11 - metadata: - label: Storage - weight: 60 - objects_ceph: - description: Configures RadosGW front end for Ceph RBD. This exposes S3 and - Swift API Interfaces. If enabled, this option will prevent Swift from installing. - label: Ceph RadosGW for objects (Swift API) - restrictions: - - settings:storage.images_ceph.value == false - type: checkbox - value: false - weight: 80 - osd_pool_size: - description: Configures the default number of object replicas in Ceph. This - number must be equal to or lower than the number of deployed 'Storage - - Ceph OSD' nodes. - label: Ceph object replication factor - regex: - error: Invalid number - source: ^[1-9]\d*$ - restrictions: - - settings:common.libvirt_type.value == 'vcenter' - type: text - value: '2' - weight: 85 - vc_datacenter: - description: Inventory path to a datacenter. If you want to use ESXi host - as datastore, it should be "ha-datacenter". - label: Datacenter name - regex: - error: Empty datacenter - source: \S - restrictions: - - action: hide - condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value - != 'vcenter' - type: text - value: '' - weight: 65 - vc_datastore: - description: Datastore associated with the datacenter. - label: Datastore name - regex: - error: Empty datastore - source: \S - restrictions: - - action: hide - condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value - != 'vcenter' - type: text - value: '' - weight: 60 - vc_host: - description: IP Address of vCenter/ESXi - label: vCenter/ESXi IP - regex: - error: Specify valid IPv4 address - source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$ - restrictions: - - action: hide - condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value - != 'vcenter' - type: text - value: '' - weight: 45 - vc_image_dir: - description: The name of the directory where the glance images will be stored - in the VMware datastore. - label: Datastore Images directory - regex: - error: Empty images directory - source: \S - restrictions: - - action: hide - condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value - != 'vcenter' - type: text - value: /openstack_glance - weight: 70 - vc_password: - description: vCenter/ESXi admin password - label: Password - regex: - error: Empty password - source: \S - restrictions: - - action: hide - condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value - != 'vcenter' - type: password - value: '' - weight: 55 - vc_user: - description: vCenter/ESXi admin username - label: Username - regex: - error: Empty username - source: \S - restrictions: - - action: hide - condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value - != 'vcenter' - type: text - value: '' - weight: 50 - volumes_ceph: - description: Configures Cinder to store volumes in Ceph RBD images. - label: Ceph RBD for volumes (Cinder) - restrictions: - - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value - == 'vcenter' - type: checkbox - value: false - weight: 20 - volumes_lvm: - description: Requires at least one Storage - Cinder LVM node. - label: Cinder LVM over iSCSI for volumes - restrictions: - - settings:storage.volumes_ceph.value == true - type: checkbox - value: false - weight: 10 - volumes_vmdk: - description: Configures Cinder to store volumes via VMware vCenter. - label: VMware vCenter for volumes (Cinder) - restrictions: - - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value - == true - type: checkbox - value: false - weight: 15 - syslog: - metadata: - label: Syslog - weight: 50 - syslog_port: - description: Remote syslog port - label: Port - regex: - error: Invalid Syslog port - source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ - type: text - value: '514' - weight: 20 - syslog_server: - description: Remote syslog hostname - label: Hostname - type: text - value: '' - weight: 10 - syslog_transport: - label: Syslog transport protocol - type: radio - value: tcp - values: - - data: udp - description: '' - label: UDP - - data: tcp - description: '' - label: TCP - weight: 30 - vcenter: - cluster: - description: vCenter cluster name. If you have multiple clusters, use comma - to separate names - label: Cluster - regex: - error: Invalid cluster list - source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$ - type: text - value: '' - weight: 40 - datastore_regex: - description: The Datastore regexp setting specifies the data stores to use - with Compute. For example, "nas.*". If you want to use all available datastores, - leave this field blank - label: Datastore regexp - regex: - error: Invalid datastore regexp - source: ^(\S.*\S|\S|)$ - type: text - value: '' - weight: 50 - host_ip: - description: IP Address of vCenter - label: vCenter IP - regex: - error: Specify valid IPv4 address - source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$ - type: text - value: '' - weight: 10 - metadata: - label: vCenter - restrictions: - - action: hide - condition: settings:common.libvirt_type.value != 'vcenter' - weight: 20 - use_vcenter: - description: '' - label: '' - type: hidden - value: true - weight: 5 - vc_password: - description: vCenter admin password - label: Password - regex: - error: Empty password - source: \S - type: password - value: admin - weight: 30 - vc_user: - description: vCenter admin username - label: Username - regex: - error: Empty username - source: \S - type: text - value: admin - weight: 20 - vlan_interface: - description: Physical ESXi host ethernet adapter for VLAN networking (e.g. - vmnic1). If empty "vmnic0" is used by default - label: ESXi VLAN interface - restrictions: - - action: hide - condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager - != 'VlanManager' - type: text - value: '' - weight: 60 - zabbix: - metadata: - label: Zabbix Access - restrictions: - - action: hide - condition: not ('experimental' in version:feature_groups) - weight: 70 - password: - description: Password for Zabbix Administrator - label: password - type: password - value: zabbix - weight: 20 - username: - description: Username for Zabbix Administrator - label: username - type: text - value: admin - weight: 10 diff --git a/fuel/deploy/baremetal/dha.yaml b/fuel/deploy/baremetal/dha.yaml deleted file mode 100644 index 6240f07..0000000 --- a/fuel/deploy/baremetal/dha.yaml +++ /dev/null @@ -1,53 +0,0 @@ -title: Deployment Hardware Adapter (DHA) -# DHA API version supported -version: 1.1 -created: Mon May 4 09:03:46 UTC 2015 -comment: Test environment Ericsson Montreal - -# Adapter to use for this definition -adapter: ipmi - -# Node list. -# Mandatory properties are id and role. -# The MAC address of the PXE boot interface for Fuel is not -# mandatory to be defined. -# All other properties are adapter specific. - -nodes: -- id: 1 - pxeMac: 14:58:D0:54:7A:28 - ipmiIp: 10.118.32.205 - ipmiUser: username - ipmiPass: password -- id: 2 - pxeMac: 14:58:D0:55:E2:E0 - ipmiIp: 10.118.32.202 - ipmiUser: username - ipmiPass: password -# Adding the Fuel node as node id 3 which may not be correct - please -# adjust as needed. -- id: 3 - libvirtName: vFuel - libvirtTemplate: vFuel - isFuel: yes - username: root - password: r00tme - -# Deployment power on strategy -# all: Turn on all nodes at once. There will be no correlation -# between the DHA and DEA node numbering. MAC addresses -# will be used to select the node roles though. -# sequence: Turn on the nodes in sequence starting with the lowest order -# node and wait for the node to be detected by Fuel. Not until -# the node has been detected and assigned a role will the next -# node be turned on. -powerOnStrategy: sequence - -# If fuelCustomInstall is set to true, Fuel is assumed to be installed by -# calling the DHA adapter function "dha_fuelCustomInstall()" with two -# arguments: node ID and the ISO file name to deploy. The custom install -# function is then to handle all necessary logic to boot the Fuel master -# from the ISO and then return. -# Allowed values: true, false -fuelCustomInstall: true - diff --git a/fuel/deploy/baremetal/vm/vFuel b/fuel/deploy/baremetal/vm/vFuel deleted file mode 100644 index 1b4f4eb..0000000 --- a/fuel/deploy/baremetal/vm/vFuel +++ /dev/null @@ -1,87 +0,0 @@ - - vFuel - 8290304 - 8290304 - 2 - - /machine - - - hvm - - - - - - - - - - - SandyBridge - - - - - - - destroy - restart - restart - - - - - - /usr/bin/kvm - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/fuel/deploy/baremetal/vms/fuel.xml b/fuel/deploy/baremetal/vms/fuel.xml new file mode 100644 index 0000000..9f1eeac --- /dev/null +++ b/fuel/deploy/baremetal/vms/fuel.xml @@ -0,0 +1,87 @@ + + fuel + 8290304 + 8290304 + 2 + + /machine + + + hvm + + + + + + + + + + + SandyBridge + + + + + + + destroy + restart + restart + + + + + + /usr/bin/kvm + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/fuel/deploy/baremetal/vms/fuel_lf.xml b/fuel/deploy/baremetal/vms/fuel_lf.xml new file mode 100644 index 0000000..2dd9738 --- /dev/null +++ b/fuel/deploy/baremetal/vms/fuel_lf.xml @@ -0,0 +1,93 @@ + + vFuel + 8290304 + 8290304 + 4 + + /machine + + + hvm + + + + + + + + + + + SandyBridge + + + + + + + destroy + restart + restart + + + + + + /usr/libexec/qemu-kvm + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + system_u:object_r:svirt_image_t:s0:c52,c932 + + \ No newline at end of file diff --git a/fuel/deploy/cloud/configure_nodes.py b/fuel/deploy/cloud/configure_nodes.py index 4d1315a..a2f2a10 100644 --- a/fuel/deploy/cloud/configure_nodes.py +++ b/fuel/deploy/cloud/configure_nodes.py @@ -26,7 +26,7 @@ class ConfigureNodes(object): log('Configure nodes') for node_id, roles_blade in self.node_id_roles_dict.iteritems(): exec_cmd('fuel node set --node-id %s --role %s --env %s' - % (node_id, ','.join(roles_blade[0]), self.env_id)) + % (node_id, roles_blade[0], self.env_id)) self.download_deployment_config() for node_id, roles_blade in self.node_id_roles_dict.iteritems(): @@ -37,8 +37,7 @@ class ConfigureNodes(object): self.upload_deployment_config() def modify_node_network_schemes(self, node_id, roles_blade): - log('Modify node network transformations in environment %s' - % self.env_id) + log('Modify network transformations for node %s' % node_id) type = self.dea.get_node_property(roles_blade[1], 'transformations') transformations = self.dea.get_transformations(type) @@ -53,7 +52,6 @@ class ConfigureNodes(object): with io.open(node_file, 'w') as stream: yaml.dump(node, stream, default_flow_style=False) - def download_deployment_config(self): log('Download deployment config for environment %s' % self.env_id) exec_cmd('fuel deployment --env %s --default --dir %s' diff --git a/fuel/deploy/cloud/deploy.py b/fuel/deploy/cloud/deploy.py index c8714f8..c423834 100644 --- a/fuel/deploy/cloud/deploy.py +++ b/fuel/deploy/cloud/deploy.py @@ -19,6 +19,8 @@ parse = common.parse err = common.err check_file_exists = common.check_file_exists log = common.log +commafy = common.commafy +ArgParser = common.ArgParser class Deploy(object): @@ -57,8 +59,8 @@ class Deploy(object): log('Deleting node %s' % node[N['id']]) exec_cmd('fuel node --node-id %s --delete-from-db' % node[N['id']]) - exec_cmd('dockerctl shell cobbler cobbler system remove ' - '--name node-%s' % node[N['id']]) + exec_cmd('cobbler system remove --name node-%s' + % node[N['id']], False) def check_previous_installation(self): log('Check previous installation') @@ -120,7 +122,7 @@ class Deploy(object): self.node_ids_dict[blade] = node[N['id']] def discovery_waiting_loop(self, discovered_macs): - WAIT_LOOP = 180 + WAIT_LOOP = 320 SLEEP_TIME = 10 all_discovered = False for i in range(WAIT_LOOP): @@ -147,13 +149,8 @@ class Deploy(object): def assign_roles_to_cluster_node_ids(self): self.node_id_roles_dict = {} for blade, node_id in self.node_ids_dict.iteritems(): - role_list = [] - role = self.dea.get_node_role(blade) - if role == 'controller': - role_list.extend(['controller', 'mongo']) - elif role == 'compute': - role_list.extend(['compute']) - self.node_id_roles_dict[node_id] = (role_list, blade) + roles = commafy(self.dea.get_node_role(blade)) + self.node_id_roles_dict[node_id] = (roles, blade) def configure_environment(self): config_env = ConfigureEnvironment(self.dea, YAML_CONF_DIR, @@ -175,25 +172,16 @@ class Deploy(object): self.configure_environment() self.deploy_cloud() -def usage(): - print ''' - Usage: - python deploy.py - - Example: - python deploy.py dea.yaml macs.yaml - ''' - def parse_arguments(): - if len(sys.argv) != 3: - log('Incorrect number of arguments') - usage() - sys.exit(1) - dea_file = sys.argv[-2] - macs_file = sys.argv[-1] - check_file_exists(dea_file) - check_file_exists(macs_file) - return dea_file, macs_file + parser = ArgParser(prog='python %s' % __file__) + parser.add_argument('dea_file', action='store', + help='Deployment Environment Adapter: dea.yaml') + parser.add_argument('macs_file', action='store', + help='Blade MAC addresses: macs.yaml') + args = parser.parse_args() + check_file_exists(args.dea_file) + check_file_exists(args.macs_file) + return (args.dea_file, args.macs_file) def main(): diff --git a/fuel/deploy/cloud/deployment.py b/fuel/deploy/cloud/deployment.py index cf56c36..0054c5b 100644 --- a/fuel/deploy/cloud/deployment.py +++ b/fuel/deploy/cloud/deployment.py @@ -31,7 +31,7 @@ class Deployment(object): % (self.yaml_config_dir, self.env_id) if os.path.exists(deployment_dir): shutil.rmtree(deployment_dir) - exec_cmd('fuel --env %s deployment --default --dir %s' + exec_cmd('fuel deployment --env %s --download --dir %s' % (self.env_id, self.yaml_config_dir)) def upload_deployment_info(self): @@ -75,7 +75,8 @@ class Deployment(object): if env[0][E['status']] == 'operational': ready = True break - elif env[0][E['status']] == 'error': + elif (env[0][E['status']] == 'error' + or env[0][E['status']] == 'stopped'): break else: time.sleep(SLEEP_TIME) @@ -102,10 +103,9 @@ class Deployment(object): def health_check(self): log('Now running sanity and smoke health checks') - exec_cmd('fuel health --env %s --check sanity,smoke --force' - % self.env_id) - log('Health checks passed !') - + log(exec_cmd('fuel health --env %s --check sanity,smoke --force' + % self.env_id)) + def deploy(self): self.config_opnfv() self.run_deploy() diff --git a/fuel/deploy/common.py b/fuel/deploy/common.py index 6dbda67..dc12637 100644 --- a/fuel/deploy/common.py +++ b/fuel/deploy/common.py @@ -2,6 +2,7 @@ import subprocess import sys import os import logging +import argparse N = {'id': 0, 'status': 1, 'name': 2, 'cluster': 3, 'ip': 4, 'mac': 5, 'roles': 6, 'pending_roles': 7, 'online': 8} @@ -73,6 +74,19 @@ def check_dir_exists(dir_path): if not os.path.isdir(dir_path): err('ERROR: Directory %s not found\n' % dir_path) +def create_dir_if_not_exists(dir_path): + if not os.path.isdir(dir_path): + log('Creating directory %s' % dir_path) + os.makedirs(dir_path) + +def commafy(comma_separated_list): + l = [c.strip() for c in comma_separated_list.split(',')] + return ','.join(l) + +def delete_file(file): + if os.path.exists(file): + os.remove(file) + def check_if_root(): r = exec_cmd('whoami') if r != 'root': @@ -80,3 +94,10 @@ def check_if_root(): def log(message): LOG.debug('%s\n' % message) + +class ArgParser(argparse.ArgumentParser): + def error(self, message): + sys.stderr.write('ERROR: %s\n' % message) + self.print_help() + sys.exit(2) + diff --git a/fuel/deploy/dea.py b/fuel/deploy/dea.py index 8066b6a..61ebea3 100644 --- a/fuel/deploy/dea.py +++ b/fuel/deploy/dea.py @@ -48,6 +48,8 @@ class DeploymentEnvironmentAdapter(object): return node[property_name] def get_node_role(self, node_id): + role_list = [] + return self.get_node_property(node_id, 'role') def get_node_ids(self): diff --git a/fuel/deploy/deploy.py b/fuel/deploy/deploy.py index 9d1a3d2..3305aed 100644 --- a/fuel/deploy/deploy.py +++ b/fuel/deploy/deploy.py @@ -1,33 +1,38 @@ -import sys import os import shutil import io import re import netaddr +import uuid +import yaml from dea import DeploymentEnvironmentAdapter from dha import DeploymentHardwareAdapter from install_fuel_master import InstallFuelMaster from deploy_env import CloudDeploy +from setup_execution_environment import ExecutionEnvironment import common log = common.log exec_cmd = common.exec_cmd err = common.err check_file_exists = common.check_file_exists +check_dir_exists = common.check_dir_exists +create_dir_if_not_exists = common.create_dir_if_not_exists check_if_root = common.check_if_root +ArgParser = common.ArgParser FUEL_VM = 'fuel' -TMP_DIR = '%s/fueltmp' % os.getenv('HOME') PATCH_DIR = 'fuel_patch' WORK_DIR = 'deploy' +CWD = os.getcwd() class cd: def __init__(self, new_path): self.new_path = os.path.expanduser(new_path) def __enter__(self): - self.saved_path = os.getcwd() + self.saved_path = CWD os.chdir(self.new_path) def __exit__(self, etype, value, traceback): @@ -36,8 +41,11 @@ class cd: class AutoDeploy(object): - def __init__(self, without_fuel, iso_file, dea_file, dha_file): + def __init__(self, without_fuel, storage_dir, pxe_bridge, iso_file, + dea_file, dha_file): self.without_fuel = without_fuel + self.storage_dir = storage_dir + self.pxe_bridge = pxe_bridge self.iso_file = iso_file self.dea_file = dea_file self.dha_file = dha_file @@ -45,22 +53,8 @@ class AutoDeploy(object): self.dha = DeploymentHardwareAdapter(dha_file) self.fuel_conf = {} self.fuel_node_id = self.dha.get_fuel_node_id() - self.fuel_custom = self.dha.use_fuel_custom_install() self.fuel_username, self.fuel_password = self.dha.get_fuel_access() - - def setup_dir(self, dir): - self.cleanup_dir(dir) - os.makedirs(dir) - - def cleanup_dir(self, dir): - if os.path.isdir(dir): - shutil.rmtree(dir) - - def power_off_blades(self): - node_ids = self.dha.get_all_node_ids() - node_ids = list(set(node_ids) - set([self.fuel_node_id])) - for node_id in node_ids: - self.dha.node_power_off(node_id) + self.tmp_dir = None def modify_ip(self, ip_addr, index, val): ip_str = str(netaddr.IPAddress(ip_addr)) @@ -77,11 +71,9 @@ class AutoDeploy(object): self.fuel_conf['showmenu'] = 'yes' def install_fuel_master(self): - if self.without_fuel: - log('Not Installing Fuel Master') - return log('Install Fuel Master') - new_iso = '%s/deploy-%s' % (TMP_DIR, os.path.basename(self.iso_file)) + new_iso = '%s/deploy-%s' \ + % (self.tmp_dir, os.path.basename(self.iso_file)) self.patch_iso(new_iso) self.iso_file = new_iso self.install_iso() @@ -91,23 +83,18 @@ class AutoDeploy(object): self.fuel_conf['ip'], self.fuel_username, self.fuel_password, self.fuel_node_id, self.iso_file, WORK_DIR) - if self.fuel_custom: - log('Custom Fuel install') - fuel.custom_install() - else: - log('Ordinary Fuel install') - fuel.install() + fuel.install() def patch_iso(self, new_iso): - tmp_orig_dir = '%s/origiso' % TMP_DIR - tmp_new_dir = '%s/newiso' % TMP_DIR + tmp_orig_dir = '%s/origiso' % self.tmp_dir + tmp_new_dir = '%s/newiso' % self.tmp_dir self.copy(tmp_orig_dir, tmp_new_dir) self.patch(tmp_new_dir, new_iso) def copy(self, tmp_orig_dir, tmp_new_dir): log('Copying...') - self.setup_dir(tmp_orig_dir) - self.setup_dir(tmp_new_dir) + os.makedirs(tmp_orig_dir) + os.makedirs(tmp_new_dir) exec_cmd('fuseiso %s %s' % (self.iso_file, tmp_orig_dir)) with cd(tmp_orig_dir): exec_cmd('find . | cpio -pd %s' % tmp_new_dir) @@ -118,7 +105,7 @@ class AutoDeploy(object): def patch(self, tmp_new_dir, new_iso): log('Patching...') - patch_dir = '%s/%s' % (os.getcwd(), PATCH_DIR) + patch_dir = '%s/%s' % (CWD, PATCH_DIR) ks_path = '%s/ks.cfg.patch' % patch_dir with cd(tmp_new_dir): @@ -153,46 +140,81 @@ class AutoDeploy(object): self.fuel_password, self.dea_file, WORK_DIR) dep.deploy() + def setup_execution_environment(self): + exec_env = ExecutionEnvironment(self.storage_dir, self.pxe_bridge, + self.dha_file, self.dea) + exec_env.setup_environment() + + def create_tmp_dir(self): + self.tmp_dir = '%s/fueltmp-%s' % (CWD, str(uuid.uuid1())) + os.makedirs(self.tmp_dir) + def deploy(self): check_if_root() - self.setup_dir(TMP_DIR) self.collect_fuel_info() - self.power_off_blades() - self.install_fuel_master() - self.cleanup_dir(TMP_DIR) + if not self.without_fuel: + self.setup_execution_environment() + self.create_tmp_dir() + self.install_fuel_master() + shutil.rmtree(self.tmp_dir) self.deploy_env() -def usage(): - print ''' - Usage: - python deploy.py [-nf] - - Optional arguments: - -nf Do not install Fuel master - ''' +def check_bridge(pxe_bridge, dha_path): + with io.open(dha_path) as yaml_file: + dha_struct = yaml.load(yaml_file) + if dha_struct['adapter'] != 'libvirt': + log('Using Linux Bridge %s for booting up the Fuel Master VM' + % pxe_bridge) + r = exec_cmd('ip link show %s' % pxe_bridge) + if pxe_bridge in r and 'state UP' not in r: + err('Linux Bridge {0} is not Active, ' + 'bring it UP first: [ip link set dev {0} up]' % pxe_bridge) def parse_arguments(): - if (len(sys.argv) < 4 or len(sys.argv) > 5 - or (len(sys.argv) == 5 and sys.argv[1] != '-nf')): - log('Incorrect number of arguments') - usage() - sys.exit(1) - without_fuel = False - if len(sys.argv) == 5 and sys.argv[1] == '-nf': - without_fuel = True - iso_file = sys.argv[-3] - dea_file = sys.argv[-2] - dha_file = sys.argv[-1] - check_file_exists(iso_file) - check_file_exists(dea_file) - check_file_exists(dha_file) - return (without_fuel, iso_file, dea_file, dha_file) + parser = ArgParser(prog='python %s' % __file__) + parser.add_argument('-nf', dest='without_fuel', action='store_true', + default=False, + help='Do not install Fuel Master (and Node VMs when ' + 'using libvirt)') + parser.add_argument('iso_file', nargs='?', action='store', + default='%s/OPNFV.iso' % CWD, + help='ISO File [default: OPNFV.iso]') + parser.add_argument('dea_file', action='store', + help='Deployment Environment Adapter: dea.yaml') + parser.add_argument('dha_file', action='store', + help='Deployment Hardware Adapter: dha.yaml') + parser.add_argument('storage_dir', nargs='?', action='store', + default='%s/images' % CWD, + help='Storage Directory [default: images]') + parser.add_argument('pxe_bridge', nargs='?', action='store', + default='pxebr', + help='Linux Bridge for booting up the Fuel Master VM ' + '[default: pxebr]') + + args = parser.parse_args() + + check_file_exists(args.dea_file) + check_file_exists(args.dha_file) + + if not args.without_fuel: + log('Using OPNFV ISO file: %s' % args.iso_file) + check_file_exists(args.iso_file) + log('Using image directory: %s' % args.storage_dir) + create_dir_if_not_exists(args.storage_dir) + log('Using bridge %s to boot up Fuel Master VM on it' + % args.pxe_bridge) + check_bridge(args.pxe_bridge, args.dha_file) + + return (args.without_fuel, args.storage_dir, args.pxe_bridge, + args.iso_file, args.dea_file, args.dha_file) -def main(): - without_fuel, iso_file, dea_file, dha_file = parse_arguments() +def main(): + without_fuel, storage_dir, pxe_bridge, iso_file, dea_file, dha_file = \ + parse_arguments() - d = AutoDeploy(without_fuel, iso_file, dea_file, dha_file) + d = AutoDeploy(without_fuel, storage_dir, pxe_bridge, iso_file, + dea_file, dha_file) d.deploy() if __name__ == '__main__': diff --git a/fuel/deploy/deploy_env.py b/fuel/deploy/deploy_env.py index 9bc8fbb..48aec18 100644 --- a/fuel/deploy/deploy_env.py +++ b/fuel/deploy/deploy_env.py @@ -53,7 +53,7 @@ class CloudDeploy(object): def set_boot_order(self, boot_order_list): for node_id in self.node_ids: - self.dha.node_set_boot_order(node_id, boot_order_list) + self.dha.node_set_boot_order(node_id, boot_order_list[:]) def get_mac_addresses(self): macs_per_node = {} @@ -67,8 +67,8 @@ class CloudDeploy(object): deploy_app = '%s/%s' % (self.work_dir, deploy_app) dea_file = '%s/%s' % (self.work_dir, os.path.basename(self.dea_file)) macs_file = '%s/%s' % (self.work_dir, os.path.basename(self.macs_file)) - with self.ssh: - self.ssh.run('python %s %s %s' % (deploy_app, dea_file, macs_file)) + with self.ssh as s: + s.run('python %s %s %s' % (deploy_app, dea_file, macs_file)) def deploy(self): diff --git a/fuel/deploy/dha_adapters/hardware_adapter.py b/fuel/deploy/dha_adapters/hardware_adapter.py index 884e9ce..a8d0121 100644 --- a/fuel/deploy/dha_adapters/hardware_adapter.py +++ b/fuel/deploy/dha_adapters/hardware_adapter.py @@ -34,18 +34,15 @@ class HardwareAdapter(object): node_ids.sort() return node_ids - def use_fuel_custom_install(self): - return self.dha_struct['fuelCustomInstall'] - def get_node_property(self, node_id, property_name): for node in self.dha_struct['nodes']: if node['id'] == node_id and property_name in node: return node[property_name] - def node_can_zero_mbr(self, node_id): - return self.get_node_property(node_id, 'nodeCanZeroMBR') - def get_fuel_access(self): for node in self.dha_struct['nodes']: if 'isFuel' in node and node['isFuel']: return node['username'], node['password'] + + def get_disks(self): + return self.dha_struct['disks'] \ No newline at end of file diff --git a/fuel/deploy/dha_adapters/hp_adapter.py b/fuel/deploy/dha_adapters/hp_adapter.py index 8fc38ad..8cfec34 100644 --- a/fuel/deploy/dha_adapters/hp_adapter.py +++ b/fuel/deploy/dha_adapters/hp_adapter.py @@ -19,7 +19,7 @@ class HpAdapter(IpmiAdapter): log('Set boot order %s on Node %s' % (boot_order_list, node_id)) ip, username, password = self.get_access_info(node_id) ssh = SSHClient(ip, username, password) - for order, dev in enumerate(boot_order_list): - with ssh as s: + with ssh as s: + for order, dev in enumerate(boot_order_list): s.exec_cmd('set %s/%s bootorder=%s' % (ROOT, DEV[dev], order+1)) diff --git a/fuel/deploy/dha_adapters/ipmi_adapter.py b/fuel/deploy/dha_adapters/ipmi_adapter.py index d97fd2d..1bef898 100644 --- a/fuel/deploy/dha_adapters/ipmi_adapter.py +++ b/fuel/deploy/dha_adapters/ipmi_adapter.py @@ -1,8 +1,10 @@ import common +import time from hardware_adapter import HardwareAdapter log = common.log exec_cmd = common.exec_cmd +err = common.err class IpmiAdapter(HardwareAdapter): @@ -27,28 +29,72 @@ class IpmiAdapter(HardwareAdapter): return mac_list def node_power_on(self, node_id): + WAIT_LOOP = 200 + SLEEP_TIME = 3 log('Power ON Node %s' % node_id) cmd_prefix = self.ipmi_cmd(node_id) state = exec_cmd('%s chassis power status' % cmd_prefix) if state == 'Chassis Power is off': exec_cmd('%s chassis power on' % cmd_prefix) + done = False + for i in range(WAIT_LOOP): + state, _ = exec_cmd('%s chassis power status' % cmd_prefix, + False) + if state == 'Chassis Power is on': + done = True + break + else: + time.sleep(SLEEP_TIME) + if not done: + err('Could Not Power ON Node %s' % node_id) def node_power_off(self, node_id): + WAIT_LOOP = 200 + SLEEP_TIME = 3 log('Power OFF Node %s' % node_id) cmd_prefix = self.ipmi_cmd(node_id) state = exec_cmd('%s chassis power status' % cmd_prefix) if state == 'Chassis Power is on': + done = False exec_cmd('%s chassis power off' % cmd_prefix) + for i in range(WAIT_LOOP): + state, _ = exec_cmd('%s chassis power status' % cmd_prefix, + False) + if state == 'Chassis Power is off': + done = True + break + else: + time.sleep(SLEEP_TIME) + if not done: + err('Could Not Power OFF Node %s' % node_id) def node_reset(self, node_id): - log('Reset Node %s' % node_id) + WAIT_LOOP = 600 + log('RESET Node %s' % node_id) cmd_prefix = self.ipmi_cmd(node_id) state = exec_cmd('%s chassis power status' % cmd_prefix) if state == 'Chassis Power is on': + was_shut_off = False + done = False exec_cmd('%s chassis power reset' % cmd_prefix) + for i in range(WAIT_LOOP): + state, _ = exec_cmd('%s chassis power status' % cmd_prefix, + False) + if state == 'Chassis Power is off': + was_shut_off = True + elif state == 'Chassis Power is on' and was_shut_off: + done = True + break + time.sleep(1) + if not done: + err('Could Not RESET Node %s' % node_id) + else: + err('Cannot RESET Node %s because it\'s not Active, state: %s' + % (node_id, state)) def node_set_boot_order(self, node_id, boot_order_list): log('Set boot order %s on Node %s' % (boot_order_list, node_id)) + boot_order_list.reverse() cmd_prefix = self.ipmi_cmd(node_id) for dev in boot_order_list: if dev == 'pxe': @@ -58,4 +104,4 @@ class IpmiAdapter(HardwareAdapter): exec_cmd('%s chassis bootdev cdrom' % cmd_prefix) elif dev == 'disk': exec_cmd('%s chassis bootdev disk options=persistent' - % cmd_prefix) + % cmd_prefix) \ No newline at end of file diff --git a/fuel/deploy/dha_adapters/libvirt_adapter.py b/fuel/deploy/dha_adapters/libvirt_adapter.py index dde4946..1eca548 100644 --- a/fuel/deploy/dha_adapters/libvirt_adapter.py +++ b/fuel/deploy/dha_adapters/libvirt_adapter.py @@ -96,12 +96,6 @@ class LibvirtAdapter(HardwareAdapter): exec_cmd('virsh change-media %s --insert %s %s' % (vm_name, device, iso_file)) - def get_disks(self): - return self.dha_struct['disks'] - - def get_node_role(self, node_id): - return self.get_node_property(node_id, 'role') - def get_node_pxe_mac(self, node_id): mac_list = [] vm_name = self.get_node_property(node_id, 'libvirtName') diff --git a/fuel/deploy/environments/__init__.py b/fuel/deploy/environments/__init__.py new file mode 100644 index 0000000..c274feb --- /dev/null +++ b/fuel/deploy/environments/__init__.py @@ -0,0 +1 @@ +__author__ = 'eszicse' diff --git a/fuel/deploy/environments/execution_environment.py b/fuel/deploy/environments/execution_environment.py new file mode 100644 index 0000000..4f612a6 --- /dev/null +++ b/fuel/deploy/environments/execution_environment.py @@ -0,0 +1,67 @@ +from lxml import etree + +import common +from dha_adapters.libvirt_adapter import LibvirtAdapter + +exec_cmd = common.exec_cmd +err = common.err +log = common.log +check_dir_exists = common.check_dir_exists +check_file_exists = common.check_file_exists +check_if_root = common.check_if_root + +class ExecutionEnvironment(object): + + def __init__(self, storage_dir, dha_file, root_dir): + self.storage_dir = storage_dir + self.dha = LibvirtAdapter(dha_file) + self.root_dir = root_dir + self.parser = etree.XMLParser(remove_blank_text=True) + self.fuel_node_id = self.dha.get_fuel_node_id() + + def delete_vm(self, node_id): + vm_name = self.dha.get_node_property(node_id, 'libvirtName') + r, c = exec_cmd('virsh dumpxml %s' % vm_name, False) + if c: + return + self.undefine_vm_delete_disk(r, vm_name) + + def undefine_vm_delete_disk(self, printout, vm_name): + disk_files = [] + xml_dump = etree.fromstring(printout, self.parser) + disks = xml_dump.xpath('/domain/devices/disk') + for disk in disks: + sources = disk.xpath('source') + for source in sources: + source_file = source.get('file') + if source_file: + disk_files.append(source_file) + log('Deleting VM %s with disks %s' % (vm_name, disk_files)) + exec_cmd('virsh destroy %s' % vm_name, False) + exec_cmd('virsh undefine %s' % vm_name, False) + for file in disk_files: + exec_cmd('rm -f %s' % file) + + def define_vm(self, vm_name, temp_vm_file, disk_path): + log('Creating VM %s with disks %s' % (vm_name, disk_path)) + with open(temp_vm_file) as f: + vm_xml = etree.parse(f) + names = vm_xml.xpath('/domain/name') + for name in names: + name.text = vm_name + uuids = vm_xml.xpath('/domain/uuid') + for uuid in uuids: + uuid.getparent().remove(uuid) + disks = vm_xml.xpath('/domain/devices/disk') + for disk in disks: + if (disk.get('type') == 'file' + and disk.get('device') == 'disk'): + sources = disk.xpath('source') + for source in sources: + disk.remove(source) + source = etree.Element('source') + source.set('file', disk_path) + disk.append(source) + with open(temp_vm_file, 'w') as f: + vm_xml.write(f, pretty_print=True, xml_declaration=True) + exec_cmd('virsh define %s' % temp_vm_file) \ No newline at end of file diff --git a/fuel/deploy/environments/libvirt_environment.py b/fuel/deploy/environments/libvirt_environment.py new file mode 100644 index 0000000..e156fd2 --- /dev/null +++ b/fuel/deploy/environments/libvirt_environment.py @@ -0,0 +1,93 @@ +from lxml import etree +import glob + +import common +from execution_environment import ExecutionEnvironment + +exec_cmd = common.exec_cmd +err = common.err +log = common.log +check_dir_exists = common.check_dir_exists +check_file_exists = common.check_file_exists +check_if_root = common.check_if_root + +NET_DIR = 'libvirt/networks' + +class LibvirtEnvironment(ExecutionEnvironment): + + def __init__(self, storage_dir, dha_file, dea, root_dir): + super(LibvirtEnvironment, self).__init__( + storage_dir, dha_file, root_dir) + self.dea = dea + self.network_dir = '%s/%s' % (self.root_dir, NET_DIR) + self.node_ids = self.dha.get_all_node_ids() + self.net_names = self.collect_net_names() + + def create_storage(self, node_id, disk_path, disk_sizes): + if node_id == self.fuel_node_id: + disk_size = disk_sizes['fuel'] + else: + roles = self.dea.get_node_role(node_id) + role = 'controller' if 'controller' in roles else 'compute' + disk_size = disk_sizes[role] + exec_cmd('fallocate -l %s %s' % (disk_size, disk_path)) + + def create_vms(self): + temp_dir = exec_cmd('mktemp -d') + disk_sizes = self.dha.get_disks() + for node_id in self.node_ids: + vm_name = self.dha.get_node_property(node_id, 'libvirtName') + vm_template = '%s/%s' % (self.root_dir, + self.dha.get_node_property( + node_id, 'libvirtTemplate')) + check_file_exists(vm_template) + disk_path = '%s/%s.raw' % (self.storage_dir, vm_name) + self.create_storage(node_id, disk_path, disk_sizes) + temp_vm_file = '%s/%s' % (temp_dir, vm_name) + exec_cmd('cp %s %s' % (vm_template, temp_vm_file)) + self.define_vm(vm_name, temp_vm_file, disk_path) + exec_cmd('rm -fr %s' % temp_dir) + + def create_networks(self): + for net_file in glob.glob('%s/*' % self.network_dir): + exec_cmd('virsh net-define %s' % net_file) + for net in self.net_names: + log('Creating network %s' % net) + exec_cmd('virsh net-autostart %s' % net) + exec_cmd('virsh net-start %s' % net) + + def delete_networks(self): + for net in self.net_names: + log('Deleting network %s' % net) + exec_cmd('virsh net-destroy %s' % net, False) + exec_cmd('virsh net-undefine %s' % net, False) + + def get_net_name(self, net_file): + with open(net_file) as f: + net_xml = etree.parse(f) + name_list = net_xml.xpath('/network/name') + for name in name_list: + net_name = name.text + return net_name + + def collect_net_names(self): + net_list = [] + for net_file in glob.glob('%s/*' % self.network_dir): + name = self.get_net_name(net_file) + net_list.append(name) + return net_list + + def delete_vms(self): + for node_id in self.node_ids: + self.delete_vm(node_id) + + def setup_environment(self): + check_if_root() + check_dir_exists(self.network_dir) + self.cleanup_environment() + self.create_vms() + self.create_networks() + + def cleanup_environment(self): + self.delete_vms() + self.delete_networks() diff --git a/fuel/deploy/environments/virtual_fuel.py b/fuel/deploy/environments/virtual_fuel.py new file mode 100644 index 0000000..1f939f0 --- /dev/null +++ b/fuel/deploy/environments/virtual_fuel.py @@ -0,0 +1,60 @@ +from lxml import etree + +import common +from execution_environment import ExecutionEnvironment + +exec_cmd = common.exec_cmd +log = common.log +check_file_exists = common.check_file_exists +check_if_root = common.check_if_root + +class VirtualFuel(ExecutionEnvironment): + + def __init__(self, storage_dir, pxe_bridge, dha_file, root_dir): + super(VirtualFuel, self).__init__( + storage_dir, dha_file, root_dir) + self.pxe_bridge = pxe_bridge + + def set_vm_nic(self, temp_vm_file): + with open(temp_vm_file) as f: + vm_xml = etree.parse(f) + interfaces = vm_xml.xpath('/domain/devices/interface') + for interface in interfaces: + interface.getparent().remove(interface) + interface = etree.Element('interface') + interface.set('type', 'bridge') + source = etree.SubElement(interface, 'source') + source.set('bridge', self.pxe_bridge) + model = etree.SubElement(interface, 'model') + model.set('type', 'virtio') + devices = vm_xml.xpath('/domain/devices') + if devices: + device = devices[0] + device.append(interface) + with open(temp_vm_file, 'w') as f: + vm_xml.write(f, pretty_print=True, xml_declaration=True) + + def create_vm(self): + temp_dir = exec_cmd('mktemp -d') + vm_name = self.dha.get_node_property(self.fuel_node_id, 'libvirtName') + vm_template = '%s/%s' % (self.root_dir, + self.dha.get_node_property( + self.fuel_node_id, 'libvirtTemplate')) + check_file_exists(vm_template) + disk_path = '%s/%s.raw' % (self.storage_dir, vm_name) + disk_sizes = self.dha.get_disks() + disk_size = disk_sizes['fuel'] + exec_cmd('fallocate -l %s %s' % (disk_size, disk_path)) + temp_vm_file = '%s/%s' % (temp_dir, vm_name) + exec_cmd('cp %s %s' % (vm_template, temp_vm_file)) + self.set_vm_nic(temp_vm_file) + self.define_vm(vm_name, temp_vm_file, disk_path) + exec_cmd('rm -fr %s' % temp_dir) + + def setup_environment(self): + check_if_root() + self.cleanup_environment() + self.create_vm() + + def cleanup_environment(self): + self.delete_vm(self.fuel_node_id) diff --git a/fuel/deploy/install-ubuntu-packages.sh b/fuel/deploy/install-ubuntu-packages.sh deleted file mode 100755 index 1ebd7c0..0000000 --- a/fuel/deploy/install-ubuntu-packages.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -############################################################################## -# Copyright (c) 2015 Ericsson AB and others. -# stefan.k.berg@ericsson.com -# jonas.bjurel@ericsson.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -# Tools for installation on the libvirt server/base host -# -apt-get install -y libvirt-bin qemu-kvm tightvncserver virt-manager \ - sshpass fuseiso genisoimage blackbox xterm python-yaml python-netaddr \ - python-paramiko python-lxml python-pip -pip install scp -restart libvirt-bin \ No newline at end of file diff --git a/fuel/deploy/install_fuel_master.py b/fuel/deploy/install_fuel_master.py index bb8e7e1..ea24ff0 100644 --- a/fuel/deploy/install_fuel_master.py +++ b/fuel/deploy/install_fuel_master.py @@ -32,15 +32,6 @@ class InstallFuelMaster(object): self.dha.node_power_off(self.fuel_node_id) - self.zero_mbr_set_boot_order() - - self.proceed_with_installation() - - def custom_install(self): - log('Start Custom Fuel Installation') - - self.dha.node_power_off(self.fuel_node_id) - log('Zero the MBR') self.dha.node_zero_mbr(self.fuel_node_id) @@ -68,7 +59,7 @@ class InstallFuelMaster(object): log('Let the Fuel deployment continue') log('Found FUEL menu as PID %s, now killing it' % fuel_menu_pid) - self.ssh_exec_cmd('kill %s' % fuel_menu_pid) + self.ssh_exec_cmd('kill %s' % fuel_menu_pid, False) log('Wait until installation complete') self.wait_until_installation_completed() @@ -81,18 +72,6 @@ class InstallFuelMaster(object): log('Fuel Master installed successfully !') - def zero_mbr_set_boot_order(self): - if self.dha.node_can_zero_mbr(self.fuel_node_id): - log('Fuel Node %s capable of zeroing MBR so doing that...' - % self.fuel_node_id) - self.dha.node_zero_mbr(self.fuel_node_id) - self.dha.node_set_boot_order(self.fuel_node_id, ['disk', 'iso']) - elif self.dha.node_can_set_boot_order_live(self.fuel_node_id): - log('Node %s can change ISO boot order live' % self.fuel_node_id) - self.dha.node_set_boot_order(self.fuel_node_id, ['iso', 'disk']) - else: - err('No way to install Fuel node') - def wait_for_node_up(self): WAIT_LOOP = 60 SLEEP_TIME = 10 @@ -103,8 +82,8 @@ class InstallFuelMaster(object): success = True break except Exception as e: - log('EXCEPTION [%s] received when SSH-ing into Fuel VM %s ... ' - 'sleeping %s seconds' % (e, self.fuel_ip, SLEEP_TIME)) + log('Trying to SSH into Fuel VM %s ... sleeping %s seconds' + % (self.fuel_ip, SLEEP_TIME)) time.sleep(SLEEP_TIME) finally: self.ssh.close() @@ -138,9 +117,9 @@ class InstallFuelMaster(object): break return fuel_menu_pid - def ssh_exec_cmd(self, cmd): + def ssh_exec_cmd(self, cmd, check=True): with self.ssh: - ret = self.ssh.exec_cmd(cmd) + ret = self.ssh.exec_cmd(cmd, check=check) return ret def inject_own_astute_yaml(self): @@ -159,7 +138,7 @@ class InstallFuelMaster(object): self.work_dir, os.path.basename(self.dea_file))) def wait_until_installation_completed(self): - WAIT_LOOP = 180 + WAIT_LOOP = 320 SLEEP_TIME = 10 CMD = 'ps -ef | grep %s | grep -v grep' % BOOTSTRAP_ADMIN diff --git a/fuel/deploy/libvirt/conf/ha/dea.yaml b/fuel/deploy/libvirt/conf/ha/dea.yaml new file mode 100644 index 0000000..907bf90 --- /dev/null +++ b/fuel/deploy/libvirt/conf/ha/dea.yaml @@ -0,0 +1,976 @@ +title: Deployment Environment Adapter (DEA) +# DEA API version supported +version: 1.1 +created: Sat Apr 25 16:26:22 UTC 2015 +comment: Small libvirt setup +environment_name: opnfv_virt +environment_mode: ha +wanted_release: Juno on Ubuntu 12.04.4 +nodes: +- id: 1 + interfaces: interfaces_1 + transformations: transformations_1 + role: ceph-osd,controller +- id: 2 + interfaces: interfaces_1 + transformations: transformations_1 + role: ceph-osd,controller +- id: 3 + interfaces: interfaces_1 + transformations: transformations_1 + role: ceph-osd,controller +- id: 4 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +- id: 5 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +- id: 6 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +fuel: + ADMIN_NETWORK: + ipaddress: 10.20.0.2 + netmask: 255.255.255.0 + dhcp_pool_start: 10.20.0.3 + dhcp_pool_end: 10.20.0.254 + DNS_UPSTREAM: 8.8.8.8 + DNS_DOMAIN: domain.tld + DNS_SEARCH: domain.tld + FUEL_ACCESS: + user: admin + password: admin + HOSTNAME: opnfv_virt + NTP1: 0.pool.ntp.org + NTP2: 1.pool.ntp.org + NTP3: 2.pool.ntp.org +interfaces: + interfaces_1: + eth0: + - fuelweb_admin + - management + eth1: + - storage + eth2: + - private + eth3: + - public +transformations: + transformations_1: + - action: add-br + name: br-eth0 + - action: add-port + bridge: br-eth0 + name: eth0 + - action: add-br + name: br-eth1 + - action: add-port + bridge: br-eth1 + name: eth1 + - action: add-br + name: br-eth2 + - action: add-port + bridge: br-eth2 + name: eth2 + - action: add-br + name: br-eth3 + - action: add-port + bridge: br-eth3 + name: eth3 + - action: add-br + name: br-ex + - action: add-br + name: br-mgmt + - action: add-br + name: br-storage + - action: add-br + name: br-fw-admin + - action: add-patch + bridges: + - br-eth1 + - br-storage + tags: + - 102 + - 0 + vlan_ids: + - 102 + - 0 + - action: add-patch + bridges: + - br-eth0 + - br-mgmt + tags: + - 101 + - 0 + vlan_ids: + - 101 + - 0 + - action: add-patch + bridges: + - br-eth0 + - br-fw-admin + trunks: + - 0 + - action: add-patch + bridges: + - br-eth3 + - br-ex + trunks: + - 0 + - action: add-br + name: br-prv + - action: add-patch + bridges: + - br-eth2 + - br-prv + transformations_2: + - action: add-br + name: br-eth0 + - action: add-port + bridge: br-eth0 + name: eth0 + - action: add-br + name: br-eth1 + - action: add-port + bridge: br-eth1 + name: eth1 + - action: add-br + name: br-eth2 + - action: add-port + bridge: br-eth2 + name: eth2 + - action: add-br + name: br-eth3 + - action: add-port + bridge: br-eth3 + name: eth3 + - action: add-br + name: br-mgmt + - action: add-br + name: br-storage + - action: add-br + name: br-fw-admin + - action: add-patch + bridges: + - br-eth1 + - br-storage + tags: + - 102 + - 0 + vlan_ids: + - 102 + - 0 + - action: add-patch + bridges: + - br-eth0 + - br-mgmt + tags: + - 101 + - 0 + vlan_ids: + - 101 + - 0 + - action: add-patch + bridges: + - br-eth0 + - br-fw-admin + trunks: + - 0 + - action: add-br + name: br-prv + - action: add-patch + bridges: + - br-eth2 + - br-prv +opnfv: + compute: {} + controller: {} +network: + networking_parameters: + base_mac: fa:16:3e:00:00:00 + dns_nameservers: + - 8.8.4.4 + - 8.8.8.8 + floating_ranges: + - - 172.16.0.130 + - 172.16.0.254 + gre_id_range: + - 2 + - 65535 + internal_cidr: 192.168.111.0/24 + internal_gateway: 192.168.111.1 + net_l23_provider: ovs + segmentation_type: vlan + vlan_range: + - 1000 + - 1030 + networks: + - cidr: 172.16.0.0/24 + gateway: 172.16.0.1 + ip_ranges: + - - 172.16.0.2 + - 172.16.0.126 + meta: + assign_vip: true + cidr: 172.16.0.0/24 + configurable: true + floating_range_var: floating_ranges + ip_range: + - 172.16.0.2 + - 172.16.0.126 + map_priority: 1 + name: public + notation: ip_ranges + render_addr_mask: public + render_type: null + use_gateway: true + vlan_start: null + name: public + vlan_start: null + - cidr: 192.168.0.0/24 + gateway: null + ip_ranges: + - - 192.168.0.1 + - 192.168.0.254 + meta: + assign_vip: true + cidr: 192.168.0.0/24 + configurable: true + map_priority: 2 + name: management + notation: cidr + render_addr_mask: internal + render_type: cidr + use_gateway: false + vlan_start: 101 + name: management + vlan_start: 101 + - cidr: 192.168.1.0/24 + gateway: null + ip_ranges: + - - 192.168.1.1 + - 192.168.1.254 + meta: + assign_vip: false + cidr: 192.168.1.0/24 + configurable: true + map_priority: 2 + name: storage + notation: cidr + render_addr_mask: storage + render_type: cidr + use_gateway: false + vlan_start: 102 + name: storage + vlan_start: 102 + - cidr: null + gateway: null + ip_ranges: [] + meta: + assign_vip: false + configurable: false + map_priority: 2 + name: private + neutron_vlan_range: true + notation: null + render_addr_mask: null + render_type: null + seg_type: vlan + use_gateway: false + vlan_start: null + name: private + vlan_start: null + - cidr: 10.20.0.0/24 + gateway: null + ip_ranges: + - - 10.20.0.3 + - 10.20.0.254 + meta: + assign_vip: false + configurable: false + map_priority: 0 + notation: ip_ranges + render_addr_mask: null + render_type: null + unmovable: true + use_gateway: true + name: fuelweb_admin + vlan_start: null +settings: + editable: + access: + email: + description: Email address for Administrator + label: email + type: text + value: admin@localhost + weight: 40 + metadata: + label: Access + weight: 10 + password: + description: Password for Administrator + label: password + type: password + value: admin + weight: 20 + tenant: + description: Tenant (project) name for Administrator + label: tenant + regex: + error: Invalid tenant name + source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).* + type: text + value: admin + weight: 30 + user: + description: Username for Administrator + label: username + regex: + error: Invalid username + source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).* + type: text + value: admin + weight: 10 + additional_components: + ceilometer: + description: If selected, Ceilometer component will be installed + label: Install Ceilometer + type: checkbox + value: false + weight: 40 + heat: + description: '' + label: '' + type: hidden + value: true + weight: 30 + metadata: + label: Additional Components + weight: 20 + murano: + description: If selected, Murano component will be installed + label: Install Murano + restrictions: + - cluster:net_provider != 'neutron' + type: checkbox + value: false + weight: 20 + sahara: + description: If selected, Sahara component will be installed + label: Install Sahara + type: checkbox + value: false + weight: 10 + common: + auth_key: + description: Public key(s) to include in authorized_keys on deployed nodes + label: Public Key + type: text + value: '' + weight: 70 + auto_assign_floating_ip: + description: If selected, OpenStack will automatically assign a floating IP + to a new instance + label: Auto assign floating IP + restrictions: + - cluster:net_provider == 'neutron' + type: checkbox + value: false + weight: 40 + compute_scheduler_driver: + label: Scheduler driver + type: radio + value: nova.scheduler.filter_scheduler.FilterScheduler + values: + - data: nova.scheduler.filter_scheduler.FilterScheduler + description: Currently the most advanced OpenStack scheduler. See the OpenStack + documentation for details. + label: Filter scheduler + - data: nova.scheduler.simple.SimpleScheduler + description: This is 'naive' scheduler which tries to find the least loaded + host + label: Simple scheduler + weight: 40 + debug: + description: Debug logging mode provides more information, but requires more + disk space. + label: OpenStack debug logging + type: checkbox + value: false + weight: 20 + disable_offload: + description: If set, generic segmentation offload (gso) and generic receive + offload (gro) on physical nics will be disabled. See ethtool man. + label: Disable generic offload on physical nics + restrictions: + - action: hide + condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type + == 'gre' + type: checkbox + value: true + weight: 80 + libvirt_type: + label: Hypervisor type + type: radio + value: kvm + values: + - data: kvm + description: Choose this type of hypervisor if you run OpenStack on hardware + label: KVM + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + - data: qemu + description: Choose this type of hypervisor if you run OpenStack on virtual + hosts. + label: QEMU + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + - data: vcenter + description: Choose this type of hypervisor if you run OpenStack in a vCenter + environment. + label: vCenter + restrictions: + - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider + == 'neutron' + weight: 30 + metadata: + label: Common + weight: 30 + nova_quota: + description: Quotas are used to limit CPU and memory usage for tenants. Enabling + quotas will increase load on the Nova database. + label: Nova quotas + type: checkbox + value: false + weight: 25 + resume_guests_state_on_host_boot: + description: Whether to resume previous guests state when the host reboots. + If enabled, this option causes guests assigned to the host to resume their + previous state. If the guest was running a restart will be attempted when + nova-compute starts. If the guest was not running previously, a restart + will not be attempted. + label: Resume guests state on host boot + type: checkbox + value: true + weight: 60 + use_cow_images: + description: For most cases you will want qcow format. If it's disabled, raw + image format will be used to run VMs. OpenStack with raw format currently + does not support snapshotting. + label: Use qcow format for images + type: checkbox + value: true + weight: 50 + corosync: + group: + description: '' + label: Group + type: text + value: 226.94.1.1 + weight: 10 + metadata: + label: Corosync + restrictions: + - action: hide + condition: 'true' + weight: 50 + port: + description: '' + label: Port + type: text + value: '12000' + weight: 20 + verified: + description: Set True only if multicast is configured correctly on router. + label: Need to pass network verification. + type: checkbox + value: false + weight: 10 + external_dns: + dns_list: + description: List of upstream DNS servers, separated by comma + label: DNS list + type: text + value: 8.8.8.8, 8.8.4.4 + weight: 10 + metadata: + label: Upstream DNS + weight: 90 + external_ntp: + metadata: + label: Upstream NTP + weight: 100 + ntp_list: + description: List of upstream NTP servers, separated by comma + label: NTP servers list + type: text + value: 0.pool.ntp.org, 1.pool.ntp.org + weight: 10 + kernel_params: + kernel: + description: Default kernel parameters + label: Initial parameters + type: text + value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset + weight: 45 + metadata: + label: Kernel parameters + weight: 40 + neutron_mellanox: + metadata: + enabled: true + label: Mellanox Neutron components + toggleable: false + weight: 50 + plugin: + label: Mellanox drivers and SR-IOV plugin + type: radio + value: disabled + values: + - data: disabled + description: If selected, Mellanox drivers, Neutron and Cinder plugin will + not be installed. + label: Mellanox drivers and plugins disabled + restrictions: + - settings:storage.iser.value == true + - data: drivers_only + description: If selected, Mellanox Ethernet drivers will be installed to + support networking over Mellanox NIC. Mellanox Neutron plugin will not + be installed. + label: Install only Mellanox drivers + restrictions: + - settings:common.libvirt_type.value != 'kvm' + - data: ethernet + description: If selected, both Mellanox Ethernet drivers and Mellanox network + acceleration (Neutron) plugin will be installed. + label: Install Mellanox drivers and SR-IOV plugin + restrictions: + - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider + == 'neutron' and networking_parameters:segmentation_type == 'vlan') + weight: 60 + vf_num: + description: Note that one virtual function will be reserved to the storage + network, in case of choosing iSER. + label: Number of virtual NICs + restrictions: + - settings:neutron_mellanox.plugin.value != 'ethernet' + type: text + value: '16' + weight: 70 + nsx_plugin: + connector_type: + description: Default network transport type to use + label: NSX connector type + type: select + value: stt + values: + - data: gre + label: GRE + - data: ipsec_gre + label: GRE over IPSec + - data: stt + label: STT + - data: ipsec_stt + label: STT over IPSec + - data: bridge + label: Bridge + weight: 80 + l3_gw_service_uuid: + description: UUID for the default L3 gateway service to use with this cluster + label: L3 service UUID + regex: + error: Invalid L3 gateway service UUID + source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}' + type: text + value: '' + weight: 50 + metadata: + enabled: false + label: VMware NSX + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider + != 'nsx' + weight: 20 + nsx_controllers: + description: One or more IPv4[:port] addresses of NSX controller node, separated + by comma (e.g. 10.30.30.2,192.168.110.254:443) + label: NSX controller endpoint + regex: + error: Invalid controller endpoints, specify valid IPv4[:port] pair + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$ + type: text + value: '' + weight: 60 + nsx_password: + description: Password for Administrator + label: NSX password + regex: + error: Empty password + source: \S + type: password + value: '' + weight: 30 + nsx_username: + description: NSX administrator's username + label: NSX username + regex: + error: Empty username + source: \S + type: text + value: admin + weight: 20 + packages_url: + description: URL to NSX specific packages + label: URL to NSX bits + regex: + error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g. + http://10.20.0.2/nsx) + source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$ + type: text + value: '' + weight: 70 + replication_mode: + description: '' + label: NSX cluster has Service nodes + type: checkbox + value: true + weight: 90 + transport_zone_uuid: + description: UUID of the pre-existing default NSX Transport zone + label: Transport zone UUID + regex: + error: Invalid transport zone UUID + source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}' + type: text + value: '' + weight: 40 + provision: + metadata: + label: Provision + restrictions: + - action: hide + condition: not ('experimental' in version:feature_groups) + weight: 80 + method: + description: Which provision method to use for this cluster. + label: Provision method + type: radio + value: cobbler + values: + - data: image + description: Copying pre-built images on a disk. + label: Image + - data: cobbler + description: Install from scratch using anaconda or debian-installer. + label: Classic (use anaconda or debian-installer) + public_network_assignment: + assign_to_all_nodes: + description: When disabled, public network will be assigned to controllers + and zabbix-server only + label: Assign public network to all nodes + type: checkbox + value: false + weight: 10 + metadata: + label: Public network assignment + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' + weight: 50 + storage: + ephemeral_ceph: + description: Configures Nova to store ephemeral volumes in RBD. This works + best if Ceph is enabled for volumes and images, too. Enables live migration + of all types of Ceph backed VMs (without this option, live migration will + only work with VMs launched from Cinder volumes). + label: Ceph RBD for ephemeral volumes (Nova) + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + type: checkbox + value: true + weight: 75 + images_ceph: + description: Configures Glance to use the Ceph RBD backend to store images. + If enabled, this option will prevent Swift from installing. + label: Ceph RBD for images (Glance) + type: checkbox + value: true + weight: 30 + images_vcenter: + description: Configures Glance to use the vCenter/ESXi backend to store images. + If enabled, this option will prevent Swift from installing. + label: VMWare vCenter/ESXi datastore for images (Glance) + restrictions: + - settings:common.libvirt_type.value != 'vcenter' + type: checkbox + value: false + weight: 35 + iser: + description: 'High performance block storage: Cinder volumes over iSER protocol + (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC, + and will use a dedicated virtual function for the storage network.' + label: iSER protocol for volumes (Cinder) + restrictions: + - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value + != 'kvm' + type: checkbox + value: false + weight: 11 + metadata: + label: Storage + weight: 60 + objects_ceph: + description: Configures RadosGW front end for Ceph RBD. This exposes S3 and + Swift API Interfaces. If enabled, this option will prevent Swift from installing. + label: Ceph RadosGW for objects (Swift API) + restrictions: + - settings:storage.images_ceph.value == false + type: checkbox + value: false + weight: 80 + osd_pool_size: + description: Configures the default number of object replicas in Ceph. This + number must be equal to or lower than the number of deployed 'Storage - + Ceph OSD' nodes. + label: Ceph object replication factor + regex: + error: Invalid number + source: ^[1-9]\d*$ + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + type: text + value: '2' + weight: 85 + vc_datacenter: + description: Inventory path to a datacenter. If you want to use ESXi host + as datastore, it should be "ha-datacenter". + label: Datacenter name + regex: + error: Empty datacenter + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 65 + vc_datastore: + description: Datastore associated with the datacenter. + label: Datastore name + regex: + error: Empty datastore + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 60 + vc_host: + description: IP Address of vCenter/ESXi + label: vCenter/ESXi IP + regex: + error: Specify valid IPv4 address + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$ + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 45 + vc_image_dir: + description: The name of the directory where the glance images will be stored + in the VMware datastore. + label: Datastore Images directory + regex: + error: Empty images directory + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: /openstack_glance + weight: 70 + vc_password: + description: vCenter/ESXi admin password + label: Password + regex: + error: Empty password + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: password + value: '' + weight: 55 + vc_user: + description: vCenter/ESXi admin username + label: Username + regex: + error: Empty username + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 50 + volumes_ceph: + description: Configures Cinder to store volumes in Ceph RBD images. + label: Ceph RBD for volumes (Cinder) + restrictions: + - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value + == 'vcenter' + type: checkbox + value: true + weight: 20 + volumes_lvm: + description: Requires at least one Storage - Cinder LVM node. + label: Cinder LVM over iSCSI for volumes + restrictions: + - settings:storage.volumes_ceph.value == true + type: checkbox + value: false + weight: 10 + volumes_vmdk: + description: Configures Cinder to store volumes via VMware vCenter. + label: VMware vCenter for volumes (Cinder) + restrictions: + - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value + == true + type: checkbox + value: false + weight: 15 + syslog: + metadata: + label: Syslog + weight: 50 + syslog_port: + description: Remote syslog port + label: Port + regex: + error: Invalid Syslog port + source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + type: text + value: '514' + weight: 20 + syslog_server: + description: Remote syslog hostname + label: Hostname + type: text + value: '' + weight: 10 + syslog_transport: + label: Syslog transport protocol + type: radio + value: tcp + values: + - data: udp + description: '' + label: UDP + - data: tcp + description: '' + label: TCP + weight: 30 + vcenter: + cluster: + description: vCenter cluster name. If you have multiple clusters, use comma + to separate names + label: Cluster + regex: + error: Invalid cluster list + source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$ + type: text + value: '' + weight: 40 + datastore_regex: + description: The Datastore regexp setting specifies the data stores to use + with Compute. For example, "nas.*". If you want to use all available datastores, + leave this field blank + label: Datastore regexp + regex: + error: Invalid datastore regexp + source: ^(\S.*\S|\S|)$ + type: text + value: '' + weight: 50 + host_ip: + description: IP Address of vCenter + label: vCenter IP + regex: + error: Specify valid IPv4 address + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$ + type: text + value: '' + weight: 10 + metadata: + label: vCenter + restrictions: + - action: hide + condition: settings:common.libvirt_type.value != 'vcenter' + weight: 20 + use_vcenter: + description: '' + label: '' + type: hidden + value: true + weight: 5 + vc_password: + description: vCenter admin password + label: Password + regex: + error: Empty password + source: \S + type: password + value: admin + weight: 30 + vc_user: + description: vCenter admin username + label: Username + regex: + error: Empty username + source: \S + type: text + value: admin + weight: 20 + vlan_interface: + description: Physical ESXi host ethernet adapter for VLAN networking (e.g. + vmnic1). If empty "vmnic0" is used by default + label: ESXi VLAN interface + restrictions: + - action: hide + condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager + != 'VlanManager' + type: text + value: '' + weight: 60 + zabbix: + metadata: + label: Zabbix Access + restrictions: + - action: hide + condition: not ('experimental' in version:feature_groups) + weight: 70 + password: + description: Password for Zabbix Administrator + label: password + type: password + value: zabbix + weight: 20 + username: + description: Username for Zabbix Administrator + label: username + type: text + value: admin + weight: 10 \ No newline at end of file diff --git a/fuel/deploy/libvirt/conf/ha/dha.yaml b/fuel/deploy/libvirt/conf/ha/dha.yaml new file mode 100644 index 0000000..d862f64 --- /dev/null +++ b/fuel/deploy/libvirt/conf/ha/dha.yaml @@ -0,0 +1,42 @@ +title: Deployment Hardware Adapter (DHA) +# DHA API version supported +version: 1.1 +created: Sat Apr 25 16:26:22 UTC 2015 +comment: Small libvirt setup + +# Adapter to use for this definition +adapter: libvirt + +# Node list. +# Mandatory property is id, all other properties are adapter specific. + +nodes: +- id: 1 + libvirtName: controller1 + libvirtTemplate: libvirt/vms/controller.xml +- id: 2 + libvirtName: compute1 + libvirtTemplate: libvirt/vms/compute.xml +- id: 3 + libvirtName: compute2 + libvirtTemplate: libvirt/vms/compute.xml +- id: 4 + libvirtName: compute3 + libvirtTemplate: libvirt/vms/compute.xml +- id: 5 + libvirtName: compute4 + libvirtTemplate: libvirt/vms/compute.xml +- id: 6 + libvirtName: compute5 + libvirtTemplate: libvirt/vms/compute.xml +- id: 7 + libvirtName: fuel-master + libvirtTemplate: libvirt/vms/fuel.xml + isFuel: yes + username: root + password: r00tme + +disks: + fuel: 30G + controller: 30G + compute: 30G diff --git a/fuel/deploy/libvirt/conf/multinode/dea.yaml b/fuel/deploy/libvirt/conf/multinode/dea.yaml new file mode 100644 index 0000000..dfd8382 --- /dev/null +++ b/fuel/deploy/libvirt/conf/multinode/dea.yaml @@ -0,0 +1,976 @@ +title: Deployment Environment Adapter (DEA) +# DEA API version supported +version: 1.1 +created: Sat Apr 25 16:26:22 UTC 2015 +comment: Small libvirt setup +environment_name: opnfv_virt +environment_mode: multinode +wanted_release: Juno on Ubuntu 12.04.4 +nodes: +- id: 1 + interfaces: interfaces_1 + transformations: transformations_1 + role: ceph-osd,controller +- id: 2 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +- id: 3 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +- id: 4 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +- id: 5 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +- id: 6 + interfaces: interfaces_1 + transformations: transformations_2 + role: ceph-osd,compute +fuel: + ADMIN_NETWORK: + ipaddress: 10.20.0.2 + netmask: 255.255.255.0 + dhcp_pool_start: 10.20.0.3 + dhcp_pool_end: 10.20.0.254 + DNS_UPSTREAM: 8.8.8.8 + DNS_DOMAIN: domain.tld + DNS_SEARCH: domain.tld + FUEL_ACCESS: + user: admin + password: admin + HOSTNAME: opnfv_virt + NTP1: 0.pool.ntp.org + NTP2: 1.pool.ntp.org + NTP3: 2.pool.ntp.org +interfaces: + interfaces_1: + eth0: + - fuelweb_admin + - management + eth1: + - storage + eth2: + - private + eth3: + - public +transformations: + transformations_1: + - action: add-br + name: br-eth0 + - action: add-port + bridge: br-eth0 + name: eth0 + - action: add-br + name: br-eth1 + - action: add-port + bridge: br-eth1 + name: eth1 + - action: add-br + name: br-eth2 + - action: add-port + bridge: br-eth2 + name: eth2 + - action: add-br + name: br-eth3 + - action: add-port + bridge: br-eth3 + name: eth3 + - action: add-br + name: br-ex + - action: add-br + name: br-mgmt + - action: add-br + name: br-storage + - action: add-br + name: br-fw-admin + - action: add-patch + bridges: + - br-eth1 + - br-storage + tags: + - 102 + - 0 + vlan_ids: + - 102 + - 0 + - action: add-patch + bridges: + - br-eth0 + - br-mgmt + tags: + - 101 + - 0 + vlan_ids: + - 101 + - 0 + - action: add-patch + bridges: + - br-eth0 + - br-fw-admin + trunks: + - 0 + - action: add-patch + bridges: + - br-eth3 + - br-ex + trunks: + - 0 + - action: add-br + name: br-prv + - action: add-patch + bridges: + - br-eth2 + - br-prv + transformations_2: + - action: add-br + name: br-eth0 + - action: add-port + bridge: br-eth0 + name: eth0 + - action: add-br + name: br-eth1 + - action: add-port + bridge: br-eth1 + name: eth1 + - action: add-br + name: br-eth2 + - action: add-port + bridge: br-eth2 + name: eth2 + - action: add-br + name: br-eth3 + - action: add-port + bridge: br-eth3 + name: eth3 + - action: add-br + name: br-mgmt + - action: add-br + name: br-storage + - action: add-br + name: br-fw-admin + - action: add-patch + bridges: + - br-eth1 + - br-storage + tags: + - 102 + - 0 + vlan_ids: + - 102 + - 0 + - action: add-patch + bridges: + - br-eth0 + - br-mgmt + tags: + - 101 + - 0 + vlan_ids: + - 101 + - 0 + - action: add-patch + bridges: + - br-eth0 + - br-fw-admin + trunks: + - 0 + - action: add-br + name: br-prv + - action: add-patch + bridges: + - br-eth2 + - br-prv +opnfv: + compute: {} + controller: {} +network: + networking_parameters: + base_mac: fa:16:3e:00:00:00 + dns_nameservers: + - 8.8.4.4 + - 8.8.8.8 + floating_ranges: + - - 172.16.0.130 + - 172.16.0.254 + gre_id_range: + - 2 + - 65535 + internal_cidr: 192.168.111.0/24 + internal_gateway: 192.168.111.1 + net_l23_provider: ovs + segmentation_type: vlan + vlan_range: + - 1000 + - 1030 + networks: + - cidr: 172.16.0.0/24 + gateway: 172.16.0.1 + ip_ranges: + - - 172.16.0.2 + - 172.16.0.126 + meta: + assign_vip: true + cidr: 172.16.0.0/24 + configurable: true + floating_range_var: floating_ranges + ip_range: + - 172.16.0.2 + - 172.16.0.126 + map_priority: 1 + name: public + notation: ip_ranges + render_addr_mask: public + render_type: null + use_gateway: true + vlan_start: null + name: public + vlan_start: null + - cidr: 192.168.0.0/24 + gateway: null + ip_ranges: + - - 192.168.0.1 + - 192.168.0.254 + meta: + assign_vip: true + cidr: 192.168.0.0/24 + configurable: true + map_priority: 2 + name: management + notation: cidr + render_addr_mask: internal + render_type: cidr + use_gateway: false + vlan_start: 101 + name: management + vlan_start: 101 + - cidr: 192.168.1.0/24 + gateway: null + ip_ranges: + - - 192.168.1.1 + - 192.168.1.254 + meta: + assign_vip: false + cidr: 192.168.1.0/24 + configurable: true + map_priority: 2 + name: storage + notation: cidr + render_addr_mask: storage + render_type: cidr + use_gateway: false + vlan_start: 102 + name: storage + vlan_start: 102 + - cidr: null + gateway: null + ip_ranges: [] + meta: + assign_vip: false + configurable: false + map_priority: 2 + name: private + neutron_vlan_range: true + notation: null + render_addr_mask: null + render_type: null + seg_type: vlan + use_gateway: false + vlan_start: null + name: private + vlan_start: null + - cidr: 10.20.0.0/24 + gateway: null + ip_ranges: + - - 10.20.0.3 + - 10.20.0.254 + meta: + assign_vip: false + configurable: false + map_priority: 0 + notation: ip_ranges + render_addr_mask: null + render_type: null + unmovable: true + use_gateway: true + name: fuelweb_admin + vlan_start: null +settings: + editable: + access: + email: + description: Email address for Administrator + label: email + type: text + value: admin@localhost + weight: 40 + metadata: + label: Access + weight: 10 + password: + description: Password for Administrator + label: password + type: password + value: admin + weight: 20 + tenant: + description: Tenant (project) name for Administrator + label: tenant + regex: + error: Invalid tenant name + source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).* + type: text + value: admin + weight: 30 + user: + description: Username for Administrator + label: username + regex: + error: Invalid username + source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).* + type: text + value: admin + weight: 10 + additional_components: + ceilometer: + description: If selected, Ceilometer component will be installed + label: Install Ceilometer + type: checkbox + value: false + weight: 40 + heat: + description: '' + label: '' + type: hidden + value: true + weight: 30 + metadata: + label: Additional Components + weight: 20 + murano: + description: If selected, Murano component will be installed + label: Install Murano + restrictions: + - cluster:net_provider != 'neutron' + type: checkbox + value: false + weight: 20 + sahara: + description: If selected, Sahara component will be installed + label: Install Sahara + type: checkbox + value: false + weight: 10 + common: + auth_key: + description: Public key(s) to include in authorized_keys on deployed nodes + label: Public Key + type: text + value: '' + weight: 70 + auto_assign_floating_ip: + description: If selected, OpenStack will automatically assign a floating IP + to a new instance + label: Auto assign floating IP + restrictions: + - cluster:net_provider == 'neutron' + type: checkbox + value: false + weight: 40 + compute_scheduler_driver: + label: Scheduler driver + type: radio + value: nova.scheduler.filter_scheduler.FilterScheduler + values: + - data: nova.scheduler.filter_scheduler.FilterScheduler + description: Currently the most advanced OpenStack scheduler. See the OpenStack + documentation for details. + label: Filter scheduler + - data: nova.scheduler.simple.SimpleScheduler + description: This is 'naive' scheduler which tries to find the least loaded + host + label: Simple scheduler + weight: 40 + debug: + description: Debug logging mode provides more information, but requires more + disk space. + label: OpenStack debug logging + type: checkbox + value: false + weight: 20 + disable_offload: + description: If set, generic segmentation offload (gso) and generic receive + offload (gro) on physical nics will be disabled. See ethtool man. + label: Disable generic offload on physical nics + restrictions: + - action: hide + condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type + == 'gre' + type: checkbox + value: true + weight: 80 + libvirt_type: + label: Hypervisor type + type: radio + value: kvm + values: + - data: kvm + description: Choose this type of hypervisor if you run OpenStack on hardware + label: KVM + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + - data: qemu + description: Choose this type of hypervisor if you run OpenStack on virtual + hosts. + label: QEMU + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + - data: vcenter + description: Choose this type of hypervisor if you run OpenStack in a vCenter + environment. + label: vCenter + restrictions: + - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider + == 'neutron' + weight: 30 + metadata: + label: Common + weight: 30 + nova_quota: + description: Quotas are used to limit CPU and memory usage for tenants. Enabling + quotas will increase load on the Nova database. + label: Nova quotas + type: checkbox + value: false + weight: 25 + resume_guests_state_on_host_boot: + description: Whether to resume previous guests state when the host reboots. + If enabled, this option causes guests assigned to the host to resume their + previous state. If the guest was running a restart will be attempted when + nova-compute starts. If the guest was not running previously, a restart + will not be attempted. + label: Resume guests state on host boot + type: checkbox + value: true + weight: 60 + use_cow_images: + description: For most cases you will want qcow format. If it's disabled, raw + image format will be used to run VMs. OpenStack with raw format currently + does not support snapshotting. + label: Use qcow format for images + type: checkbox + value: true + weight: 50 + corosync: + group: + description: '' + label: Group + type: text + value: 226.94.1.1 + weight: 10 + metadata: + label: Corosync + restrictions: + - action: hide + condition: 'true' + weight: 50 + port: + description: '' + label: Port + type: text + value: '12000' + weight: 20 + verified: + description: Set True only if multicast is configured correctly on router. + label: Need to pass network verification. + type: checkbox + value: false + weight: 10 + external_dns: + dns_list: + description: List of upstream DNS servers, separated by comma + label: DNS list + type: text + value: 8.8.8.8, 8.8.4.4 + weight: 10 + metadata: + label: Upstream DNS + weight: 90 + external_ntp: + metadata: + label: Upstream NTP + weight: 100 + ntp_list: + description: List of upstream NTP servers, separated by comma + label: NTP servers list + type: text + value: 0.pool.ntp.org, 1.pool.ntp.org + weight: 10 + kernel_params: + kernel: + description: Default kernel parameters + label: Initial parameters + type: text + value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset + weight: 45 + metadata: + label: Kernel parameters + weight: 40 + neutron_mellanox: + metadata: + enabled: true + label: Mellanox Neutron components + toggleable: false + weight: 50 + plugin: + label: Mellanox drivers and SR-IOV plugin + type: radio + value: disabled + values: + - data: disabled + description: If selected, Mellanox drivers, Neutron and Cinder plugin will + not be installed. + label: Mellanox drivers and plugins disabled + restrictions: + - settings:storage.iser.value == true + - data: drivers_only + description: If selected, Mellanox Ethernet drivers will be installed to + support networking over Mellanox NIC. Mellanox Neutron plugin will not + be installed. + label: Install only Mellanox drivers + restrictions: + - settings:common.libvirt_type.value != 'kvm' + - data: ethernet + description: If selected, both Mellanox Ethernet drivers and Mellanox network + acceleration (Neutron) plugin will be installed. + label: Install Mellanox drivers and SR-IOV plugin + restrictions: + - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider + == 'neutron' and networking_parameters:segmentation_type == 'vlan') + weight: 60 + vf_num: + description: Note that one virtual function will be reserved to the storage + network, in case of choosing iSER. + label: Number of virtual NICs + restrictions: + - settings:neutron_mellanox.plugin.value != 'ethernet' + type: text + value: '16' + weight: 70 + nsx_plugin: + connector_type: + description: Default network transport type to use + label: NSX connector type + type: select + value: stt + values: + - data: gre + label: GRE + - data: ipsec_gre + label: GRE over IPSec + - data: stt + label: STT + - data: ipsec_stt + label: STT over IPSec + - data: bridge + label: Bridge + weight: 80 + l3_gw_service_uuid: + description: UUID for the default L3 gateway service to use with this cluster + label: L3 service UUID + regex: + error: Invalid L3 gateway service UUID + source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}' + type: text + value: '' + weight: 50 + metadata: + enabled: false + label: VMware NSX + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider + != 'nsx' + weight: 20 + nsx_controllers: + description: One or more IPv4[:port] addresses of NSX controller node, separated + by comma (e.g. 10.30.30.2,192.168.110.254:443) + label: NSX controller endpoint + regex: + error: Invalid controller endpoints, specify valid IPv4[:port] pair + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$ + type: text + value: '' + weight: 60 + nsx_password: + description: Password for Administrator + label: NSX password + regex: + error: Empty password + source: \S + type: password + value: '' + weight: 30 + nsx_username: + description: NSX administrator's username + label: NSX username + regex: + error: Empty username + source: \S + type: text + value: admin + weight: 20 + packages_url: + description: URL to NSX specific packages + label: URL to NSX bits + regex: + error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g. + http://10.20.0.2/nsx) + source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$ + type: text + value: '' + weight: 70 + replication_mode: + description: '' + label: NSX cluster has Service nodes + type: checkbox + value: true + weight: 90 + transport_zone_uuid: + description: UUID of the pre-existing default NSX Transport zone + label: Transport zone UUID + regex: + error: Invalid transport zone UUID + source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}' + type: text + value: '' + weight: 40 + provision: + metadata: + label: Provision + restrictions: + - action: hide + condition: not ('experimental' in version:feature_groups) + weight: 80 + method: + description: Which provision method to use for this cluster. + label: Provision method + type: radio + value: cobbler + values: + - data: image + description: Copying pre-built images on a disk. + label: Image + - data: cobbler + description: Install from scratch using anaconda or debian-installer. + label: Classic (use anaconda or debian-installer) + public_network_assignment: + assign_to_all_nodes: + description: When disabled, public network will be assigned to controllers + and zabbix-server only + label: Assign public network to all nodes + type: checkbox + value: false + weight: 10 + metadata: + label: Public network assignment + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' + weight: 50 + storage: + ephemeral_ceph: + description: Configures Nova to store ephemeral volumes in RBD. This works + best if Ceph is enabled for volumes and images, too. Enables live migration + of all types of Ceph backed VMs (without this option, live migration will + only work with VMs launched from Cinder volumes). + label: Ceph RBD for ephemeral volumes (Nova) + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + type: checkbox + value: true + weight: 75 + images_ceph: + description: Configures Glance to use the Ceph RBD backend to store images. + If enabled, this option will prevent Swift from installing. + label: Ceph RBD for images (Glance) + type: checkbox + value: true + weight: 30 + images_vcenter: + description: Configures Glance to use the vCenter/ESXi backend to store images. + If enabled, this option will prevent Swift from installing. + label: VMWare vCenter/ESXi datastore for images (Glance) + restrictions: + - settings:common.libvirt_type.value != 'vcenter' + type: checkbox + value: false + weight: 35 + iser: + description: 'High performance block storage: Cinder volumes over iSER protocol + (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC, + and will use a dedicated virtual function for the storage network.' + label: iSER protocol for volumes (Cinder) + restrictions: + - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value + != 'kvm' + type: checkbox + value: false + weight: 11 + metadata: + label: Storage + weight: 60 + objects_ceph: + description: Configures RadosGW front end for Ceph RBD. This exposes S3 and + Swift API Interfaces. If enabled, this option will prevent Swift from installing. + label: Ceph RadosGW for objects (Swift API) + restrictions: + - settings:storage.images_ceph.value == false + type: checkbox + value: false + weight: 80 + osd_pool_size: + description: Configures the default number of object replicas in Ceph. This + number must be equal to or lower than the number of deployed 'Storage - + Ceph OSD' nodes. + label: Ceph object replication factor + regex: + error: Invalid number + source: ^[1-9]\d*$ + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + type: text + value: '2' + weight: 85 + vc_datacenter: + description: Inventory path to a datacenter. If you want to use ESXi host + as datastore, it should be "ha-datacenter". + label: Datacenter name + regex: + error: Empty datacenter + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 65 + vc_datastore: + description: Datastore associated with the datacenter. + label: Datastore name + regex: + error: Empty datastore + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 60 + vc_host: + description: IP Address of vCenter/ESXi + label: vCenter/ESXi IP + regex: + error: Specify valid IPv4 address + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$ + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 45 + vc_image_dir: + description: The name of the directory where the glance images will be stored + in the VMware datastore. + label: Datastore Images directory + regex: + error: Empty images directory + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: /openstack_glance + weight: 70 + vc_password: + description: vCenter/ESXi admin password + label: Password + regex: + error: Empty password + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: password + value: '' + weight: 55 + vc_user: + description: vCenter/ESXi admin username + label: Username + regex: + error: Empty username + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 50 + volumes_ceph: + description: Configures Cinder to store volumes in Ceph RBD images. + label: Ceph RBD for volumes (Cinder) + restrictions: + - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value + == 'vcenter' + type: checkbox + value: true + weight: 20 + volumes_lvm: + description: Requires at least one Storage - Cinder LVM node. + label: Cinder LVM over iSCSI for volumes + restrictions: + - settings:storage.volumes_ceph.value == true + type: checkbox + value: false + weight: 10 + volumes_vmdk: + description: Configures Cinder to store volumes via VMware vCenter. + label: VMware vCenter for volumes (Cinder) + restrictions: + - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value + == true + type: checkbox + value: false + weight: 15 + syslog: + metadata: + label: Syslog + weight: 50 + syslog_port: + description: Remote syslog port + label: Port + regex: + error: Invalid Syslog port + source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + type: text + value: '514' + weight: 20 + syslog_server: + description: Remote syslog hostname + label: Hostname + type: text + value: '' + weight: 10 + syslog_transport: + label: Syslog transport protocol + type: radio + value: tcp + values: + - data: udp + description: '' + label: UDP + - data: tcp + description: '' + label: TCP + weight: 30 + vcenter: + cluster: + description: vCenter cluster name. If you have multiple clusters, use comma + to separate names + label: Cluster + regex: + error: Invalid cluster list + source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$ + type: text + value: '' + weight: 40 + datastore_regex: + description: The Datastore regexp setting specifies the data stores to use + with Compute. For example, "nas.*". If you want to use all available datastores, + leave this field blank + label: Datastore regexp + regex: + error: Invalid datastore regexp + source: ^(\S.*\S|\S|)$ + type: text + value: '' + weight: 50 + host_ip: + description: IP Address of vCenter + label: vCenter IP + regex: + error: Specify valid IPv4 address + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$ + type: text + value: '' + weight: 10 + metadata: + label: vCenter + restrictions: + - action: hide + condition: settings:common.libvirt_type.value != 'vcenter' + weight: 20 + use_vcenter: + description: '' + label: '' + type: hidden + value: true + weight: 5 + vc_password: + description: vCenter admin password + label: Password + regex: + error: Empty password + source: \S + type: password + value: admin + weight: 30 + vc_user: + description: vCenter admin username + label: Username + regex: + error: Empty username + source: \S + type: text + value: admin + weight: 20 + vlan_interface: + description: Physical ESXi host ethernet adapter for VLAN networking (e.g. + vmnic1). If empty "vmnic0" is used by default + label: ESXi VLAN interface + restrictions: + - action: hide + condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager + != 'VlanManager' + type: text + value: '' + weight: 60 + zabbix: + metadata: + label: Zabbix Access + restrictions: + - action: hide + condition: not ('experimental' in version:feature_groups) + weight: 70 + password: + description: Password for Zabbix Administrator + label: password + type: password + value: zabbix + weight: 20 + username: + description: Username for Zabbix Administrator + label: username + type: text + value: admin + weight: 10 \ No newline at end of file diff --git a/fuel/deploy/libvirt/conf/multinode/dha.yaml b/fuel/deploy/libvirt/conf/multinode/dha.yaml new file mode 100644 index 0000000..5e560bf --- /dev/null +++ b/fuel/deploy/libvirt/conf/multinode/dha.yaml @@ -0,0 +1,42 @@ +title: Deployment Hardware Adapter (DHA) +# DHA API version supported +version: 1.1 +created: Sat Apr 25 16:26:22 UTC 2015 +comment: Small libvirt setup + +# Adapter to use for this definition +adapter: libvirt + +# Node list. +# Mandatory property is id, all other properties are adapter specific. + +nodes: +- id: 1 + libvirtName: controller1 + libvirtTemplate: libvirt/vms/controller.xml +- id: 2 + libvirtName: controller2 + libvirtTemplate: libvirt/vms/compute.xml +- id: 3 + libvirtName: controller3 + libvirtTemplate: libvirt/vms/compute.xml +- id: 4 + libvirtName: compute1 + libvirtTemplate: libvirt/vms/compute.xml +- id: 5 + libvirtName: compute2 + libvirtTemplate: libvirt/vms/compute.xml +- id: 6 + libvirtName: compute3 + libvirtTemplate: libvirt/vms/compute.xml +- id: 7 + libvirtName: fuel-master + libvirtTemplate: libvirt/vms/fuel.xml + isFuel: yes + username: root + password: r00tme + +disks: + fuel: 30G + controller: 30G + compute: 30G diff --git a/fuel/deploy/libvirt/dea.yaml b/fuel/deploy/libvirt/dea.yaml deleted file mode 100644 index 802293f..0000000 --- a/fuel/deploy/libvirt/dea.yaml +++ /dev/null @@ -1,976 +0,0 @@ -title: Deployment Environment Adapter (DEA) -# DEA API version supported -version: 1.1 -created: Sat Apr 25 16:26:22 UTC 2015 -comment: Small libvirt setup -environment_name: opnfv59-b -environment_mode: multinode -wanted_release: Juno on Ubuntu 12.04.4 -nodes: -- id: 1 - interfaces: interface1 - transformations: controller1 - role: controller -- id: 2 - interfaces: interface1 - transformations: controller1 - role: controller -- id: 3 - interfaces: interface1 - transformations: controller1 - role: controller -- id: 4 - interfaces: interface1 - transformations: compute1 - role: compute -- id: 5 - interfaces: interface1 - transformations: compute1 - role: compute -- id: 6 - interfaces: interface1 - transformations: compute1 - role: compute -fuel: - ADMIN_NETWORK: - ipaddress: 10.20.0.2 - netmask: 255.255.255.0 - dhcp_pool_start: 10.20.0.3 - dhcp_pool_end: 10.20.0.254 - DNS_UPSTREAM: 8.8.8.8 - DNS_DOMAIN: domain.tld - DNS_SEARCH: domain.tld - FUEL_ACCESS: - user: admin - password: admin - HOSTNAME: opnfv59 - NTP1: 0.pool.ntp.org - NTP2: 1.pool.ntp.org - NTP3: 2.pool.ntp.org -interfaces: - interface1: - eth0: - - fuelweb_admin - - management - eth1: - - storage - eth2: - - private - eth3: - - public -transformations: - controller1: - - action: add-br - name: br-eth0 - - action: add-port - bridge: br-eth0 - name: eth0 - - action: add-br - name: br-eth1 - - action: add-port - bridge: br-eth1 - name: eth1 - - action: add-br - name: br-eth2 - - action: add-port - bridge: br-eth2 - name: eth2 - - action: add-br - name: br-eth3 - - action: add-port - bridge: br-eth3 - name: eth3 - - action: add-br - name: br-ex - - action: add-br - name: br-mgmt - - action: add-br - name: br-storage - - action: add-br - name: br-fw-admin - - action: add-patch - bridges: - - br-eth1 - - br-storage - tags: - - 102 - - 0 - vlan_ids: - - 102 - - 0 - - action: add-patch - bridges: - - br-eth0 - - br-mgmt - tags: - - 101 - - 0 - vlan_ids: - - 101 - - 0 - - action: add-patch - bridges: - - br-eth0 - - br-fw-admin - trunks: - - 0 - - action: add-patch - bridges: - - br-eth3 - - br-ex - trunks: - - 0 - - action: add-br - name: br-prv - - action: add-patch - bridges: - - br-eth2 - - br-prv - compute1: - - action: add-br - name: br-eth0 - - action: add-port - bridge: br-eth0 - name: eth0 - - action: add-br - name: br-eth1 - - action: add-port - bridge: br-eth1 - name: eth1 - - action: add-br - name: br-eth2 - - action: add-port - bridge: br-eth2 - name: eth2 - - action: add-br - name: br-eth3 - - action: add-port - bridge: br-eth3 - name: eth3 - - action: add-br - name: br-mgmt - - action: add-br - name: br-storage - - action: add-br - name: br-fw-admin - - action: add-patch - bridges: - - br-eth1 - - br-storage - tags: - - 102 - - 0 - vlan_ids: - - 102 - - 0 - - action: add-patch - bridges: - - br-eth0 - - br-mgmt - tags: - - 101 - - 0 - vlan_ids: - - 101 - - 0 - - action: add-patch - bridges: - - br-eth0 - - br-fw-admin - trunks: - - 0 - - action: add-br - name: br-prv - - action: add-patch - bridges: - - br-eth2 - - br-prv -opnfv: - compute: {} - controller: {} -network: - networking_parameters: - base_mac: fa:16:3e:00:00:00 - dns_nameservers: - - 8.8.4.4 - - 8.8.8.8 - floating_ranges: - - - 172.16.0.130 - - 172.16.0.254 - gre_id_range: - - 2 - - 65535 - internal_cidr: 192.168.111.0/24 - internal_gateway: 192.168.111.1 - net_l23_provider: ovs - segmentation_type: vlan - vlan_range: - - 1000 - - 1030 - networks: - - cidr: 172.16.0.0/24 - gateway: 172.16.0.1 - ip_ranges: - - - 172.16.0.2 - - 172.16.0.126 - meta: - assign_vip: true - cidr: 172.16.0.0/24 - configurable: true - floating_range_var: floating_ranges - ip_range: - - 172.16.0.2 - - 172.16.0.126 - map_priority: 1 - name: public - notation: ip_ranges - render_addr_mask: public - render_type: null - use_gateway: true - vlan_start: null - name: public - vlan_start: null - - cidr: 192.168.0.0/24 - gateway: null - ip_ranges: - - - 192.168.0.1 - - 192.168.0.254 - meta: - assign_vip: true - cidr: 192.168.0.0/24 - configurable: true - map_priority: 2 - name: management - notation: cidr - render_addr_mask: internal - render_type: cidr - use_gateway: false - vlan_start: 101 - name: management - vlan_start: 101 - - cidr: 192.168.1.0/24 - gateway: null - ip_ranges: - - - 192.168.1.1 - - 192.168.1.254 - meta: - assign_vip: false - cidr: 192.168.1.0/24 - configurable: true - map_priority: 2 - name: storage - notation: cidr - render_addr_mask: storage - render_type: cidr - use_gateway: false - vlan_start: 102 - name: storage - vlan_start: 102 - - cidr: null - gateway: null - ip_ranges: [] - meta: - assign_vip: false - configurable: false - map_priority: 2 - name: private - neutron_vlan_range: true - notation: null - render_addr_mask: null - render_type: null - seg_type: vlan - use_gateway: false - vlan_start: null - name: private - vlan_start: null - - cidr: 10.20.0.0/24 - gateway: null - ip_ranges: - - - 10.20.0.3 - - 10.20.0.254 - meta: - assign_vip: false - configurable: false - map_priority: 0 - notation: ip_ranges - render_addr_mask: null - render_type: null - unmovable: true - use_gateway: true - name: fuelweb_admin - vlan_start: null -settings: - editable: - access: - email: - description: Email address for Administrator - label: email - type: text - value: admin@localhost - weight: 40 - metadata: - label: Access - weight: 10 - password: - description: Password for Administrator - label: password - type: password - value: admin - weight: 20 - tenant: - description: Tenant (project) name for Administrator - label: tenant - regex: - error: Invalid tenant name - source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).* - type: text - value: admin - weight: 30 - user: - description: Username for Administrator - label: username - regex: - error: Invalid username - source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).* - type: text - value: admin - weight: 10 - additional_components: - ceilometer: - description: If selected, Ceilometer component will be installed - label: Install Ceilometer - type: checkbox - value: false - weight: 40 - heat: - description: '' - label: '' - type: hidden - value: true - weight: 30 - metadata: - label: Additional Components - weight: 20 - murano: - description: If selected, Murano component will be installed - label: Install Murano - restrictions: - - cluster:net_provider != 'neutron' - type: checkbox - value: false - weight: 20 - sahara: - description: If selected, Sahara component will be installed - label: Install Sahara - type: checkbox - value: false - weight: 10 - common: - auth_key: - description: Public key(s) to include in authorized_keys on deployed nodes - label: Public Key - type: text - value: '' - weight: 70 - auto_assign_floating_ip: - description: If selected, OpenStack will automatically assign a floating IP - to a new instance - label: Auto assign floating IP - restrictions: - - cluster:net_provider == 'neutron' - type: checkbox - value: false - weight: 40 - compute_scheduler_driver: - label: Scheduler driver - type: radio - value: nova.scheduler.filter_scheduler.FilterScheduler - values: - - data: nova.scheduler.filter_scheduler.FilterScheduler - description: Currently the most advanced OpenStack scheduler. See the OpenStack - documentation for details. - label: Filter scheduler - - data: nova.scheduler.simple.SimpleScheduler - description: This is 'naive' scheduler which tries to find the least loaded - host - label: Simple scheduler - weight: 40 - debug: - description: Debug logging mode provides more information, but requires more - disk space. - label: OpenStack debug logging - type: checkbox - value: false - weight: 20 - disable_offload: - description: If set, generic segmentation offload (gso) and generic receive - offload (gro) on physical nics will be disabled. See ethtool man. - label: Disable generic offload on physical nics - restrictions: - - action: hide - condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type - == 'gre' - type: checkbox - value: true - weight: 80 - libvirt_type: - label: Hypervisor type - type: radio - value: kvm - values: - - data: kvm - description: Choose this type of hypervisor if you run OpenStack on hardware - label: KVM - restrictions: - - settings:common.libvirt_type.value == 'vcenter' - - data: qemu - description: Choose this type of hypervisor if you run OpenStack on virtual - hosts. - label: QEMU - restrictions: - - settings:common.libvirt_type.value == 'vcenter' - - data: vcenter - description: Choose this type of hypervisor if you run OpenStack in a vCenter - environment. - label: vCenter - restrictions: - - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider - == 'neutron' - weight: 30 - metadata: - label: Common - weight: 30 - nova_quota: - description: Quotas are used to limit CPU and memory usage for tenants. Enabling - quotas will increase load on the Nova database. - label: Nova quotas - type: checkbox - value: false - weight: 25 - resume_guests_state_on_host_boot: - description: Whether to resume previous guests state when the host reboots. - If enabled, this option causes guests assigned to the host to resume their - previous state. If the guest was running a restart will be attempted when - nova-compute starts. If the guest was not running previously, a restart - will not be attempted. - label: Resume guests state on host boot - type: checkbox - value: true - weight: 60 - use_cow_images: - description: For most cases you will want qcow format. If it's disabled, raw - image format will be used to run VMs. OpenStack with raw format currently - does not support snapshotting. - label: Use qcow format for images - type: checkbox - value: true - weight: 50 - corosync: - group: - description: '' - label: Group - type: text - value: 226.94.1.1 - weight: 10 - metadata: - label: Corosync - restrictions: - - action: hide - condition: 'true' - weight: 50 - port: - description: '' - label: Port - type: text - value: '12000' - weight: 20 - verified: - description: Set True only if multicast is configured correctly on router. - label: Need to pass network verification. - type: checkbox - value: false - weight: 10 - external_dns: - dns_list: - description: List of upstream DNS servers, separated by comma - label: DNS list - type: text - value: 8.8.8.8, 8.8.4.4 - weight: 10 - metadata: - label: Upstream DNS - weight: 90 - external_ntp: - metadata: - label: Upstream NTP - weight: 100 - ntp_list: - description: List of upstream NTP servers, separated by comma - label: NTP servers list - type: text - value: 0.pool.ntp.org, 1.pool.ntp.org - weight: 10 - kernel_params: - kernel: - description: Default kernel parameters - label: Initial parameters - type: text - value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset - weight: 45 - metadata: - label: Kernel parameters - weight: 40 - neutron_mellanox: - metadata: - enabled: true - label: Mellanox Neutron components - toggleable: false - weight: 50 - plugin: - label: Mellanox drivers and SR-IOV plugin - type: radio - value: disabled - values: - - data: disabled - description: If selected, Mellanox drivers, Neutron and Cinder plugin will - not be installed. - label: Mellanox drivers and plugins disabled - restrictions: - - settings:storage.iser.value == true - - data: drivers_only - description: If selected, Mellanox Ethernet drivers will be installed to - support networking over Mellanox NIC. Mellanox Neutron plugin will not - be installed. - label: Install only Mellanox drivers - restrictions: - - settings:common.libvirt_type.value != 'kvm' - - data: ethernet - description: If selected, both Mellanox Ethernet drivers and Mellanox network - acceleration (Neutron) plugin will be installed. - label: Install Mellanox drivers and SR-IOV plugin - restrictions: - - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider - == 'neutron' and networking_parameters:segmentation_type == 'vlan') - weight: 60 - vf_num: - description: Note that one virtual function will be reserved to the storage - network, in case of choosing iSER. - label: Number of virtual NICs - restrictions: - - settings:neutron_mellanox.plugin.value != 'ethernet' - type: text - value: '16' - weight: 70 - nsx_plugin: - connector_type: - description: Default network transport type to use - label: NSX connector type - type: select - value: stt - values: - - data: gre - label: GRE - - data: ipsec_gre - label: GRE over IPSec - - data: stt - label: STT - - data: ipsec_stt - label: STT over IPSec - - data: bridge - label: Bridge - weight: 80 - l3_gw_service_uuid: - description: UUID for the default L3 gateway service to use with this cluster - label: L3 service UUID - regex: - error: Invalid L3 gateway service UUID - source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}' - type: text - value: '' - weight: 50 - metadata: - enabled: false - label: VMware NSX - restrictions: - - action: hide - condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider - != 'nsx' - weight: 20 - nsx_controllers: - description: One or more IPv4[:port] addresses of NSX controller node, separated - by comma (e.g. 10.30.30.2,192.168.110.254:443) - label: NSX controller endpoint - regex: - error: Invalid controller endpoints, specify valid IPv4[:port] pair - source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$ - type: text - value: '' - weight: 60 - nsx_password: - description: Password for Administrator - label: NSX password - regex: - error: Empty password - source: \S - type: password - value: '' - weight: 30 - nsx_username: - description: NSX administrator's username - label: NSX username - regex: - error: Empty username - source: \S - type: text - value: admin - weight: 20 - packages_url: - description: URL to NSX specific packages - label: URL to NSX bits - regex: - error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g. - http://10.20.0.2/nsx) - source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$ - type: text - value: '' - weight: 70 - replication_mode: - description: '' - label: NSX cluster has Service nodes - type: checkbox - value: true - weight: 90 - transport_zone_uuid: - description: UUID of the pre-existing default NSX Transport zone - label: Transport zone UUID - regex: - error: Invalid transport zone UUID - source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}' - type: text - value: '' - weight: 40 - provision: - metadata: - label: Provision - restrictions: - - action: hide - condition: not ('experimental' in version:feature_groups) - weight: 80 - method: - description: Which provision method to use for this cluster. - label: Provision method - type: radio - value: cobbler - values: - - data: image - description: Copying pre-built images on a disk. - label: Image - - data: cobbler - description: Install from scratch using anaconda or debian-installer. - label: Classic (use anaconda or debian-installer) - public_network_assignment: - assign_to_all_nodes: - description: When disabled, public network will be assigned to controllers - and zabbix-server only - label: Assign public network to all nodes - type: checkbox - value: false - weight: 10 - metadata: - label: Public network assignment - restrictions: - - action: hide - condition: cluster:net_provider != 'neutron' - weight: 50 - storage: - ephemeral_ceph: - description: Configures Nova to store ephemeral volumes in RBD. This works - best if Ceph is enabled for volumes and images, too. Enables live migration - of all types of Ceph backed VMs (without this option, live migration will - only work with VMs launched from Cinder volumes). - label: Ceph RBD for ephemeral volumes (Nova) - restrictions: - - settings:common.libvirt_type.value == 'vcenter' - type: checkbox - value: false - weight: 75 - images_ceph: - description: Configures Glance to use the Ceph RBD backend to store images. - If enabled, this option will prevent Swift from installing. - label: Ceph RBD for images (Glance) - type: checkbox - value: false - weight: 30 - images_vcenter: - description: Configures Glance to use the vCenter/ESXi backend to store images. - If enabled, this option will prevent Swift from installing. - label: VMWare vCenter/ESXi datastore for images (Glance) - restrictions: - - settings:common.libvirt_type.value != 'vcenter' - type: checkbox - value: false - weight: 35 - iser: - description: 'High performance block storage: Cinder volumes over iSER protocol - (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC, - and will use a dedicated virtual function for the storage network.' - label: iSER protocol for volumes (Cinder) - restrictions: - - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value - != 'kvm' - type: checkbox - value: false - weight: 11 - metadata: - label: Storage - weight: 60 - objects_ceph: - description: Configures RadosGW front end for Ceph RBD. This exposes S3 and - Swift API Interfaces. If enabled, this option will prevent Swift from installing. - label: Ceph RadosGW for objects (Swift API) - restrictions: - - settings:storage.images_ceph.value == false - type: checkbox - value: false - weight: 80 - osd_pool_size: - description: Configures the default number of object replicas in Ceph. This - number must be equal to or lower than the number of deployed 'Storage - - Ceph OSD' nodes. - label: Ceph object replication factor - regex: - error: Invalid number - source: ^[1-9]\d*$ - restrictions: - - settings:common.libvirt_type.value == 'vcenter' - type: text - value: '2' - weight: 85 - vc_datacenter: - description: Inventory path to a datacenter. If you want to use ESXi host - as datastore, it should be "ha-datacenter". - label: Datacenter name - regex: - error: Empty datacenter - source: \S - restrictions: - - action: hide - condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value - != 'vcenter' - type: text - value: '' - weight: 65 - vc_datastore: - description: Datastore associated with the datacenter. - label: Datastore name - regex: - error: Empty datastore - source: \S - restrictions: - - action: hide - condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value - != 'vcenter' - type: text - value: '' - weight: 60 - vc_host: - description: IP Address of vCenter/ESXi - label: vCenter/ESXi IP - regex: - error: Specify valid IPv4 address - source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$ - restrictions: - - action: hide - condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value - != 'vcenter' - type: text - value: '' - weight: 45 - vc_image_dir: - description: The name of the directory where the glance images will be stored - in the VMware datastore. - label: Datastore Images directory - regex: - error: Empty images directory - source: \S - restrictions: - - action: hide - condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value - != 'vcenter' - type: text - value: /openstack_glance - weight: 70 - vc_password: - description: vCenter/ESXi admin password - label: Password - regex: - error: Empty password - source: \S - restrictions: - - action: hide - condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value - != 'vcenter' - type: password - value: '' - weight: 55 - vc_user: - description: vCenter/ESXi admin username - label: Username - regex: - error: Empty username - source: \S - restrictions: - - action: hide - condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value - != 'vcenter' - type: text - value: '' - weight: 50 - volumes_ceph: - description: Configures Cinder to store volumes in Ceph RBD images. - label: Ceph RBD for volumes (Cinder) - restrictions: - - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value - == 'vcenter' - type: checkbox - value: false - weight: 20 - volumes_lvm: - description: Requires at least one Storage - Cinder LVM node. - label: Cinder LVM over iSCSI for volumes - restrictions: - - settings:storage.volumes_ceph.value == true - type: checkbox - value: true - weight: 10 - volumes_vmdk: - description: Configures Cinder to store volumes via VMware vCenter. - label: VMware vCenter for volumes (Cinder) - restrictions: - - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value - == true - type: checkbox - value: false - weight: 15 - syslog: - metadata: - label: Syslog - weight: 50 - syslog_port: - description: Remote syslog port - label: Port - regex: - error: Invalid Syslog port - source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ - type: text - value: '514' - weight: 20 - syslog_server: - description: Remote syslog hostname - label: Hostname - type: text - value: '' - weight: 10 - syslog_transport: - label: Syslog transport protocol - type: radio - value: tcp - values: - - data: udp - description: '' - label: UDP - - data: tcp - description: '' - label: TCP - weight: 30 - vcenter: - cluster: - description: vCenter cluster name. If you have multiple clusters, use comma - to separate names - label: Cluster - regex: - error: Invalid cluster list - source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$ - type: text - value: '' - weight: 40 - datastore_regex: - description: The Datastore regexp setting specifies the data stores to use - with Compute. For example, "nas.*". If you want to use all available datastores, - leave this field blank - label: Datastore regexp - regex: - error: Invalid datastore regexp - source: ^(\S.*\S|\S|)$ - type: text - value: '' - weight: 50 - host_ip: - description: IP Address of vCenter - label: vCenter IP - regex: - error: Specify valid IPv4 address - source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$ - type: text - value: '' - weight: 10 - metadata: - label: vCenter - restrictions: - - action: hide - condition: settings:common.libvirt_type.value != 'vcenter' - weight: 20 - use_vcenter: - description: '' - label: '' - type: hidden - value: true - weight: 5 - vc_password: - description: vCenter admin password - label: Password - regex: - error: Empty password - source: \S - type: password - value: admin - weight: 30 - vc_user: - description: vCenter admin username - label: Username - regex: - error: Empty username - source: \S - type: text - value: admin - weight: 20 - vlan_interface: - description: Physical ESXi host ethernet adapter for VLAN networking (e.g. - vmnic1). If empty "vmnic0" is used by default - label: ESXi VLAN interface - restrictions: - - action: hide - condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager - != 'VlanManager' - type: text - value: '' - weight: 60 - zabbix: - metadata: - label: Zabbix Access - restrictions: - - action: hide - condition: not ('experimental' in version:feature_groups) - weight: 70 - password: - description: Password for Zabbix Administrator - label: password - type: password - value: zabbix - weight: 20 - username: - description: Username for Zabbix Administrator - label: username - type: text - value: admin - weight: 10 diff --git a/fuel/deploy/libvirt/dha.yaml b/fuel/deploy/libvirt/dha.yaml deleted file mode 100644 index ce61e53..0000000 --- a/fuel/deploy/libvirt/dha.yaml +++ /dev/null @@ -1,80 +0,0 @@ -title: Deployment Hardware Adapter (DHA) -# DHA API version supported -version: 1.1 -created: Sat Apr 25 16:26:22 UTC 2015 -comment: Small libvirt setup - -# Adapter to use for this definition -adapter: libvirt - -# Node list. -# Mandatory fields are id and role. -# The MAC address of the PXE boot interface is not mandatory -# to be set, but the field must be present. -# All other fields are adapter specific. - -nodes: -- id: 1 - pxeMac: 52:54:00:aa:dd:84 - libvirtName: controller1 - libvirtTemplate: controller - role: controller -- id: 2 - pxeMac: 52:54:00:aa:dd:84 - libvirtName: controller2 - libvirtTemplate: controller - role: controller -- id: 3 - pxeMac: 52:54:00:aa:dd:84 - libvirtName: controller3 - libvirtTemplate: controller - role: controller -- id: 4 - pxeMac: 52:54:00:41:64:f3 - libvirtName: compute1 - libvirtTemplate: compute - role: compute -- id: 5 - pxeMac: 52:54:00:69:a0:79 - libvirtName: compute2 - libvirtTemplate: compute - role: compute -- id: 6 - pxeMac: 52:54:00:69:a0:79 - libvirtName: compute3 - libvirtTemplate: compute - role: compute -- id: 7 - pxeMac: 52:54:00:f8:b0:75 - libvirtName: fuel-master - libvirtTemplate: fuel-master - isFuel: yes - nodeCanZeroMBR: yes - nodeCanSetBootOrderLive: yes - username: root - password: r00tme - -disks: - fuel: 30G - controller: 30G - compute: 30G - -# Deployment power on strategy -# all: Turn on all nodes at once. There will be no correlation -# between the DHA and DEA node numbering. MAC addresses -# will be used to select the node roles though. -# sequence: Turn on the nodes in sequence starting with the lowest order -# node and wait for the node to be detected by Fuel. Not until -# the node has been detected and assigned a role will the next -# node be turned on. -powerOnStrategy: all - -# If fuelCustomInstall is set to true, Fuel is assumed to be installed by -# calling the DHA adapter function "dha_fuelCustomInstall()" with two -# arguments: node ID and the ISO file name to deploy. The custom install -# function is then to handle all necessary logic to boot the Fuel master -# from the ISO and then return. -# Allowed values: true, false - -fuelCustomInstall: false - diff --git a/fuel/deploy/libvirt/networks/fuel1 b/fuel/deploy/libvirt/networks/fuel1 deleted file mode 100644 index 7b2b154..0000000 --- a/fuel/deploy/libvirt/networks/fuel1 +++ /dev/null @@ -1,12 +0,0 @@ - - fuel1 - - - - - - - - - - diff --git a/fuel/deploy/libvirt/networks/fuel1.xml b/fuel/deploy/libvirt/networks/fuel1.xml new file mode 100644 index 0000000..7b2b154 --- /dev/null +++ b/fuel/deploy/libvirt/networks/fuel1.xml @@ -0,0 +1,12 @@ + + fuel1 + + + + + + + + + + diff --git a/fuel/deploy/libvirt/networks/fuel2 b/fuel/deploy/libvirt/networks/fuel2 deleted file mode 100644 index 615c920..0000000 --- a/fuel/deploy/libvirt/networks/fuel2 +++ /dev/null @@ -1,5 +0,0 @@ - - fuel2 - - - diff --git a/fuel/deploy/libvirt/networks/fuel2.xml b/fuel/deploy/libvirt/networks/fuel2.xml new file mode 100644 index 0000000..615c920 --- /dev/null +++ b/fuel/deploy/libvirt/networks/fuel2.xml @@ -0,0 +1,5 @@ + + fuel2 + + + diff --git a/fuel/deploy/libvirt/networks/fuel3 b/fuel/deploy/libvirt/networks/fuel3 deleted file mode 100644 index 2383e6c..0000000 --- a/fuel/deploy/libvirt/networks/fuel3 +++ /dev/null @@ -1,5 +0,0 @@ - - fuel3 - - - diff --git a/fuel/deploy/libvirt/networks/fuel3.xml b/fuel/deploy/libvirt/networks/fuel3.xml new file mode 100644 index 0000000..2383e6c --- /dev/null +++ b/fuel/deploy/libvirt/networks/fuel3.xml @@ -0,0 +1,5 @@ + + fuel3 + + + diff --git a/fuel/deploy/libvirt/networks/fuel4 b/fuel/deploy/libvirt/networks/fuel4 deleted file mode 100644 index 5b69f91..0000000 --- a/fuel/deploy/libvirt/networks/fuel4 +++ /dev/null @@ -1,12 +0,0 @@ - - fuel4 - - - - - - - - - - diff --git a/fuel/deploy/libvirt/networks/fuel4.xml b/fuel/deploy/libvirt/networks/fuel4.xml new file mode 100644 index 0000000..5b69f91 --- /dev/null +++ b/fuel/deploy/libvirt/networks/fuel4.xml @@ -0,0 +1,12 @@ + + fuel4 + + + + + + + + + + diff --git a/fuel/deploy/libvirt/vms/compute b/fuel/deploy/libvirt/vms/compute deleted file mode 100644 index 7591509..0000000 --- a/fuel/deploy/libvirt/vms/compute +++ /dev/null @@ -1,91 +0,0 @@ - - compute4 - 8388608 - 8388608 - 2 - - hvm - - - - - - - - - - - SandyBridge - Intel - - - - - - - - - - - - - - - - - - - - - - - - - - destroy - restart - restart - - /usr/bin/kvm - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/fuel/deploy/libvirt/vms/compute.xml b/fuel/deploy/libvirt/vms/compute.xml new file mode 100644 index 0000000..2ea35ac --- /dev/null +++ b/fuel/deploy/libvirt/vms/compute.xml @@ -0,0 +1,91 @@ + + compute + 8388608 + 8388608 + 2 + + hvm + + + + + + + + + + + SandyBridge + Intel + + + + + + + + + + + + + + + + + + + + + + + + + + destroy + restart + restart + + /usr/bin/kvm + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/fuel/deploy/libvirt/vms/controller b/fuel/deploy/libvirt/vms/controller deleted file mode 100644 index a871262..0000000 --- a/fuel/deploy/libvirt/vms/controller +++ /dev/null @@ -1,90 +0,0 @@ - - controller1 - 2097152 - 2097152 - 2 - - hvm - - - - - - - - - - SandyBridge - Intel - - - - - - - - - - - - - - - - - - - - - - - - - - destroy - restart - restart - - /usr/bin/kvm - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/fuel/deploy/libvirt/vms/controller.xml b/fuel/deploy/libvirt/vms/controller.xml new file mode 100644 index 0000000..4377879 --- /dev/null +++ b/fuel/deploy/libvirt/vms/controller.xml @@ -0,0 +1,90 @@ + + controller + 2097152 + 2097152 + 2 + + hvm + + + + + + + + + + SandyBridge + Intel + + + + + + + + + + + + + + + + + + + + + + + + + + destroy + restart + restart + + /usr/bin/kvm + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/fuel/deploy/libvirt/vms/fuel-master b/fuel/deploy/libvirt/vms/fuel-master deleted file mode 100644 index f4e652b..0000000 --- a/fuel/deploy/libvirt/vms/fuel-master +++ /dev/null @@ -1,93 +0,0 @@ - - fuel-master - 2097152 - 2097152 - 2 - - /machine - - - hvm - - - - - - - - - - - SandyBridge - Intel - - - - - - - - - - - - - - - - - - - - - - - - - - destroy - restart - restart - - /usr/bin/kvm - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/fuel/deploy/libvirt/vms/fuel.xml b/fuel/deploy/libvirt/vms/fuel.xml new file mode 100644 index 0000000..1a32860 --- /dev/null +++ b/fuel/deploy/libvirt/vms/fuel.xml @@ -0,0 +1,93 @@ + + fuel + 2097152 + 2097152 + 2 + + /machine + + + hvm + + + + + + + + + + + SandyBridge + Intel + + + + + + + + + + + + + + + + + + + + + + + + + + destroy + restart + restart + + /usr/bin/kvm + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/fuel/deploy/reap.py b/fuel/deploy/reap.py new file mode 100644 index 0000000..8a8681a --- /dev/null +++ b/fuel/deploy/reap.py @@ -0,0 +1,330 @@ +import common +import time +import os +import yaml +import glob +import shutil + +N = common.N +E = common.E +R = common.R +ArgParser = common.ArgParser +exec_cmd = common.exec_cmd +parse = common.parse +err = common.err +log = common.log +delete_file = common.delete_file +commafy = common.commafy + +DEA_1 = ''' +title: Deployment Environment Adapter (DEA) +# DEA API version supported +version: 1.1 +created: {date} +comment: {comment} +''' + +DHA_1 = ''' +title: Deployment Hardware Adapter (DHA) +# DHA API version supported +version: 1.1 +created: {date} +comment: {comment} + +# Adapter to use for this definition +# adapter: [ipmi|libvirt] +adapter: + +# Node list. +# Mandatory properties are id and role. +# All other properties are adapter specific. +# For Non-Fuel nodes controlled by: +# - ipmi adapter you need to provide: +# pxeMac +# ipmiIp +# ipmiUser +# ipmiPass +# - libvirt adapter you need to provide: +# libvirtName: +# libvirtTemplate: [libvirt/vms/controller.xml | libvirt/vms/compute.xml] +# +# For the Fuel Node you need to provide: +# libvirtName: +# libvirtTemplate: libvirt/vms/fuel.xml +# isFuel: yes +# username: root +# password: r00tme +''' + +DHA_2 = ''' +# Adding the Fuel node as node id {node_id} +# which may not be correct - please adjust as needed. +''' + +DISKS = {'fuel': '30G', + 'controller': '30G', + 'compute': '30G'} + +class Reap(object): + + def __init__(self, dea_file, dha_file, comment): + self.dea_file = dea_file + self.dha_file = dha_file + self.comment = comment + self.temp_dir = None + self.env = None + self.env_id = None + self.last_node = None + + def get_env(self): + env_list = parse(exec_cmd('fuel env')) + if len(env_list) > 1: + err('Not exactly one environment') + self.env = env_list[0] + self.env_id = self.env[E['id']] + + def download_config(self, config_type): + log('Download %s config for environment %s' + % (config_type, self.env_id)) + exec_cmd('fuel %s --env %s --download --dir %s' + % (config_type, self.env_id, self.temp_dir)) + + def write(self, file, text, newline=True): + mode = 'a' if os.path.isfile(file) else 'w' + with open(file, mode) as f: + f.write('%s%s' % (text, ('\n' if newline else ''))) + + def write_yaml(self, file, data, newline=True): + self.write(file, yaml.dump(data, default_flow_style=False).strip(), + newline) + + def get_node_by_id(self, node_list, node_id): + for node in node_list: + if node[N['id']] == node_id: + return node + + def reap_interface(self, node_id, interfaces): + interface, mac = self.get_interface(node_id) + if_name = None + if interfaces: + if_name = self.check_dict_exists(interfaces, interface) + if not if_name: + if_name = 'interfaces_%s' % str(len(interfaces) + 1) + interfaces[if_name] = interface + return if_name, mac + + def reap_transformation(self, node_id, roles, transformations): + main_role = 'controller' if 'controller' in roles else 'compute' + node_file = glob.glob('%s/deployment_%s/*%s_%s.yaml' + % (self.temp_dir, self.env_id, + main_role, node_id)) + tr_name = None + with open(node_file[0]) as f: + node_config = yaml.load(f) + transformation = node_config['network_scheme']['transformations'] + if transformations: + tr_name = self.check_dict_exists(transformations, transformation) + if not tr_name: + tr_name = 'transformations_%s' % str(len(transformations) + 1) + transformations[tr_name] = transformation + return tr_name + + def check_dict_exists(self, main_dict, dict): + for key, val in main_dict.iteritems(): + if cmp(dict, val) == 0: + return key + + def reap_nodes_interfaces_transformations(self): + node_list = parse(exec_cmd('fuel node')) + real_node_ids = [node[N['id']] for node in node_list] + real_node_ids.sort() + min_node = real_node_ids[0] + + interfaces = {} + transformations = {} + dea_nodes = [] + dha_nodes = [] + + for real_node_id in real_node_ids: + node_id = int(real_node_id) - int(min_node) + 1 + self.last_node = node_id + node = self.get_node_by_id(node_list, real_node_id) + roles = commafy(node[N['roles']]) + if not roles: + err('Fuel Node %s has no role' % real_node_id) + dea_node = {'id': node_id, + 'role': roles} + dha_node = {'id': node_id} + if_name, mac = self.reap_interface(real_node_id, interfaces) + tr_name = self.reap_transformation(real_node_id, roles, + transformations) + dea_node.update( + {'interfaces': if_name, + 'transformations': tr_name}) + + dha_node.update( + {'pxeMac': mac if mac else None, + 'ipmiIp': None, + 'ipmiUser': None, + 'ipmiPass': None, + 'libvirtName': None, + 'libvirtTemplate': None}) + + dea_nodes.append(dea_node) + dha_nodes.append(dha_node) + + self.write_yaml(self.dha_file, {'nodes': dha_nodes}, False) + self.write_yaml(self.dea_file, {'nodes': dea_nodes}) + self.write_yaml(self.dea_file, {'interfaces': interfaces}) + self.write_yaml(self.dea_file, {'transformations': transformations}) + self.reap_fuel_node_info() + self.write_yaml(self.dha_file, {'disks': DISKS}) + + def reap_fuel_node_info(self): + dha_nodes = [] + dha_node = { + 'id': self.last_node + 1, + 'libvirtName': None, + 'libvirtTemplate': None, + 'isFuel': True, + 'username': 'root', + 'password': 'r00tme'} + + dha_nodes.append(dha_node) + + self.write(self.dha_file, DHA_2.format(node_id=dha_node['id']), False) + self.write_yaml(self.dha_file, dha_nodes) + + def reap_environment_info(self): + self.write_yaml(self.dea_file, + {'environment_name': self.env[E['name']]}) + self.write_yaml(self.dea_file, + {'environment_mode': self.env[E['mode']]}) + wanted_release = None + rel_list = parse(exec_cmd('fuel release')) + for rel in rel_list: + if rel[R['id']] == self.env[E['release_id']]: + wanted_release = rel[R['name']] + self.write_yaml(self.dea_file, {'wanted_release': wanted_release}) + + def reap_fuel_settings(self): + data = self.read_yaml('/etc/fuel/astute.yaml') + fuel = {} + del(data['ADMIN_NETWORK']['mac']) + del(data['ADMIN_NETWORK']['interface']) + for key in ['ADMIN_NETWORK', 'HOSTNAME', 'DNS_DOMAIN', 'DNS_SEARCH', + 'DNS_UPSTREAM', 'NTP1', 'NTP2', 'NTP3', 'FUEL_ACCESS']: + fuel[key] = data[key] + self.write_yaml(self.dea_file, {'fuel': fuel}) + + def reap_network_settings(self): + network_file = ('%s/network_%s.yaml' + % (self.temp_dir, self.env_id)) + data = self.read_yaml(network_file) + network = {} + network['networking_parameters'] = data['networking_parameters'] + network['networks'] = data['networks'] + for net in network['networks']: + del net['id'] + del net['group_id'] + self.write_yaml(self.dea_file, {'network': network}) + + def reap_settings(self): + settings_file = '%s/settings_%s.yaml' % (self.temp_dir, self.env_id) + settings = self.read_yaml(settings_file) + self.write_yaml(self.dea_file, {'settings': settings}) + + def get_opnfv_astute(self, role): + node_files = glob.glob('%s/deployment_%s/*%s*.yaml' + % (self.temp_dir, self.env_id, role)) + node_config = self.read_yaml(node_files[0]) + return node_config['opnfv'] if 'opnfv' in node_config else {} + + def reap_opnfv_astute(self): + controller_opnfv_astute = self.get_opnfv_astute('controller') + compute_opnfv_astute = self.get_opnfv_astute('compute') + opnfv = {} + opnfv['opnfv'] = { + 'controller': controller_opnfv_astute, + 'compute': compute_opnfv_astute} + self.write_yaml(self.dea_file, opnfv) + + def get_interface(self, real_node_id): + exec_cmd('fuel node --node-id %s --network --download --dir %s' + % (real_node_id, self.temp_dir)) + interface_file = ('%s/node_%s/interfaces.yaml' + % (self.temp_dir, real_node_id)) + interfaces = self.read_yaml(interface_file) + interface_config = {} + pxe_mac = None + for interface in interfaces: + networks = [] + for network in interface['assigned_networks']: + networks.append(network['name']) + if network['name'] == 'fuelweb_admin': + pxe_mac = interface['mac'] + if networks: + interface_config[interface['name']] = networks + return interface_config, pxe_mac + + def read_yaml(self, yaml_file): + with open(yaml_file) as f: + data = yaml.load(f) + return data + + def intro(self): + delete_file(self.dea_file) + delete_file(self.dha_file) + self.temp_dir = exec_cmd('mktemp -d') + date = time.strftime('%c') + self.write(self.dea_file, + DEA_1.format(date=date, comment=self.comment), False) + self.write(self.dha_file, + DHA_1.format(date=date, comment=self.comment)) + self.get_env() + self.download_config('deployment') + self.download_config('settings') + self.download_config('network') + + def finale(self): + log('DEA file is available at %s' % self.dea_file) + log('DHA file is available at %s (this is just a template)' + % self.dha_file) + shutil.rmtree(self.temp_dir) + + def reap(self): + self.intro() + self.reap_environment_info() + self.reap_nodes_interfaces_transformations() + self.reap_fuel_settings() + self.reap_opnfv_astute() + self.reap_network_settings() + self.reap_settings() + self.finale() + +def usage(): + print ''' + Usage: + python reap.py + ''' + +def parse_arguments(): + parser = ArgParser(prog='python %s' % __file__) + parser.add_argument('dea_file', nargs='?', action='store', + default='dea.yaml', + help='Deployment Environment Adapter: dea.yaml') + parser.add_argument('dha_file', nargs='?', action='store', + default='dha.yaml', + help='Deployment Hardware Adapter: dha.yaml') + parser.add_argument('comment', nargs='?', action='store', help='Comment') + args = parser.parse_args() + return (args.dea_file, args.dha_file, args.comment) + +def main(): + dea_file, dha_file, comment = parse_arguments() + + r = Reap(dea_file, dha_file, comment) + r.reap() + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/fuel/deploy/setup_environment.py b/fuel/deploy/setup_environment.py deleted file mode 100644 index 4e0e7ba..0000000 --- a/fuel/deploy/setup_environment.py +++ /dev/null @@ -1,165 +0,0 @@ -import sys -from lxml import etree -import os -import glob -import common - -from dha import DeploymentHardwareAdapter - -exec_cmd = common.exec_cmd -err = common.err -log = common.log -check_dir_exists = common.check_dir_exists -check_file_exists = common.check_file_exists -check_if_root = common.check_if_root - - -class LibvirtEnvironment(object): - - def __init__(self, storage_dir, dha_file): - self.dha = DeploymentHardwareAdapter(dha_file) - self.storage_dir = storage_dir - self.parser = etree.XMLParser(remove_blank_text=True) - self.file_dir = os.path.dirname(os.path.realpath(__file__)) - self.network_dir = '%s/libvirt/networks' % self.file_dir - self.vm_dir = '%s/libvirt/vms' % self.file_dir - self.node_ids = self.dha.get_all_node_ids() - self.fuel_node_id = self.dha.get_fuel_node_id() - self.net_names = self.collect_net_names() - - def create_storage(self, node_id, disk_path, disk_sizes): - if node_id == self.fuel_node_id: - disk_size = disk_sizes['fuel'] - else: - role = self.dha.get_node_role(node_id) - disk_size = disk_sizes[role] - exec_cmd('fallocate -l %s %s' % (disk_size, disk_path)) - - def create_vms(self): - temp_dir = exec_cmd('mktemp -d') - disk_sizes = self.dha.get_disks() - for node_id in self.node_ids: - vm_name = self.dha.get_node_property(node_id, 'libvirtName') - vm_template = self.dha.get_node_property(node_id, - 'libvirtTemplate') - disk_path = '%s/%s.raw' % (self.storage_dir, vm_name) - self.create_storage(node_id, disk_path, disk_sizes) - self.define_vm(vm_name, vm_template, temp_dir, disk_path) - exec_cmd('rm -fr %s' % temp_dir) - - def define_vm(self, vm_name, vm_template, temp_dir, disk_path): - log('Creating VM %s with disks %s' % (vm_name, disk_path)) - temp_vm_file = '%s/%s' % (temp_dir, vm_name) - exec_cmd('cp %s/%s %s' % (self.vm_dir, vm_template, temp_vm_file)) - with open(temp_vm_file) as f: - vm_xml = etree.parse(f) - names = vm_xml.xpath('/domain/name') - for name in names: - name.text = vm_name - uuids = vm_xml.xpath('/domain/uuid') - for uuid in uuids: - uuid.getparent().remove(uuid) - disks = vm_xml.xpath('/domain/devices/disk') - for disk in disks: - sources = disk.xpath('source') - for source in sources: - source.set('file', disk_path) - with open(temp_vm_file, 'w') as f: - vm_xml.write(f, pretty_print=True, xml_declaration=True) - exec_cmd('virsh define %s' % temp_vm_file) - - def create_networks(self): - for net_file in glob.glob('%s/*' % self.network_dir): - exec_cmd('virsh net-define %s' % net_file) - for net in self.net_names: - log('Creating network %s' % net) - exec_cmd('virsh net-autostart %s' % net) - exec_cmd('virsh net-start %s' % net) - - def delete_networks(self): - for net in self.net_names: - log('Deleting network %s' % net) - exec_cmd('virsh net-destroy %s' % net, False) - exec_cmd('virsh net-undefine %s' % net, False) - - def get_net_name(self, net_file): - with open(net_file) as f: - net_xml = etree.parse(f) - name_list = net_xml.xpath('/network/name') - for name in name_list: - net_name = name.text - return net_name - - def collect_net_names(self): - net_list = [] - for net_file in glob.glob('%s/*' % self.network_dir): - name = self.get_net_name(net_file) - net_list.append(name) - return net_list - - def delete_vms(self): - for node_id in self.node_ids: - vm_name = self.dha.get_node_property(node_id, 'libvirtName') - r, c = exec_cmd('virsh dumpxml %s' % vm_name, False) - if c > 0: - log(r) - continue - self.undefine_vm_delete_disk(r, vm_name) - - def undefine_vm_delete_disk(self, printout, vm_name): - disk_files = [] - xml_dump = etree.fromstring(printout, self.parser) - disks = xml_dump.xpath('/domain/devices/disk') - for disk in disks: - sources = disk.xpath('source') - for source in sources: - source_file = source.get('file') - if source_file: - disk_files.append(source_file) - log('Deleting VM %s with disks %s' % (vm_name, disk_files)) - exec_cmd('virsh destroy %s' % vm_name, False) - exec_cmd('virsh undefine %s' % vm_name, False) - for file in disk_files: - exec_cmd('rm -f %s' % file) - - def setup_environment(self): - check_if_root() - check_dir_exists(self.network_dir) - check_dir_exists(self.vm_dir) - self.cleanup_environment() - self.create_vms() - self.create_networks() - - def cleanup_environment(self): - self.delete_vms() - self.delete_networks() - - -def usage(): - print ''' - Usage: - python setup_environment.py - - Example: - python setup_environment.py /mnt/images dha.yaml - ''' - -def parse_arguments(): - if len(sys.argv) != 3: - log('Incorrect number of arguments') - usage() - sys.exit(1) - storage_dir = sys.argv[-2] - dha_file = sys.argv[-1] - check_dir_exists(storage_dir) - check_file_exists(dha_file) - return storage_dir, dha_file - -def main(): - storage_dir, dha_file = parse_arguments() - - virt = LibvirtEnvironment(storage_dir, dha_file) - virt.setup_environment() - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/fuel/deploy/setup_execution_environment.py b/fuel/deploy/setup_execution_environment.py new file mode 100644 index 0000000..d97fcde --- /dev/null +++ b/fuel/deploy/setup_execution_environment.py @@ -0,0 +1,36 @@ +import yaml +import io +import sys +import os + +import common +from environments.libvirt_environment import LibvirtEnvironment +from environments.virtual_fuel import VirtualFuel +from dea import DeploymentEnvironmentAdapter + +exec_cmd = common.exec_cmd +err = common.err +log = common.log +check_dir_exists = common.check_dir_exists +check_file_exists = common.check_file_exists +check_if_root = common.check_if_root +ArgParser = common.ArgParser + +class ExecutionEnvironment(object): + def __new__(cls, storage_dir, pxe_bridge, dha_path, dea): + + with io.open(dha_path) as yaml_file: + dha_struct = yaml.load(yaml_file) + + type = dha_struct['adapter'] + + root_dir = os.path.dirname(os.path.realpath(__file__)) + + if cls is ExecutionEnvironment: + if type == 'libvirt': + return LibvirtEnvironment(storage_dir, dha_path, dea, root_dir) + + if type == 'ipmi' or type == 'hp': + return VirtualFuel(storage_dir, pxe_bridge, dha_path, root_dir) + + return super(ExecutionEnvironment, cls).__new__(cls) diff --git a/fuel/deploy/setup_vfuel.py b/fuel/deploy/setup_vfuel.py deleted file mode 100644 index 65ee013..0000000 --- a/fuel/deploy/setup_vfuel.py +++ /dev/null @@ -1,143 +0,0 @@ -import sys -from lxml import etree -import os - -import common -from dha import DeploymentHardwareAdapter - -exec_cmd = common.exec_cmd -err = common.err -log = common.log -check_dir_exists = common.check_dir_exists -check_file_exists = common.check_file_exists -check_if_root = common.check_if_root - -VFUELNET = ''' -iface vfuelnet inet static - bridge_ports em1 - address 10.40.0.1 - netmask 255.255.255.0 - pre-down iptables -t nat -D POSTROUTING --out-interface p1p1.20 -j MASQUERADE -m comment --comment "vfuelnet" - pre-down iptables -D FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet" - post-up iptables -t nat -A POSTROUTING --out-interface p1p1.20 -j MASQUERADE -m comment --comment "vfuelnet" - post-up iptables -A FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet" -''' -VM_DIR = 'baremetal/vm' -FUEL_DISK_SIZE = '30G' -IFACE = 'vfuelnet' -INTERFACE_CONFIG = '/etc/network/interfaces' - -class VFuel(object): - - def __init__(self, storage_dir, dha_file): - self.dha = DeploymentHardwareAdapter(dha_file) - self.storage_dir = storage_dir - self.parser = etree.XMLParser(remove_blank_text=True) - self.fuel_node_id = self.dha.get_fuel_node_id() - self.file_dir = os.path.dirname(os.path.realpath(__file__)) - self.vm_dir = '%s/%s' % (self.file_dir, VM_DIR) - - def setup_environment(self): - check_if_root() - check_dir_exists(self.vm_dir) - self.setup_networking() - self.delete_vm() - self.create_vm() - - def setup_networking(self): - with open(INTERFACE_CONFIG) as f: - data = f.read() - if VFUELNET not in data: - log('Appending to file %s:\n %s' % (INTERFACE_CONFIG, VFUELNET)) - with open(INTERFACE_CONFIG, 'a') as f: - f.write('\n%s\n' % VFUELNET) - if exec_cmd('ip link show | grep %s' % IFACE): - log('Bring DOWN interface %s' % IFACE) - exec_cmd('ifdown %s' % IFACE, False) - log('Bring UP interface %s' % IFACE) - exec_cmd('ifup %s' % IFACE, False) - - def delete_vm(self): - vm_name = self.dha.get_node_property(self.fuel_node_id, 'libvirtName') - r, c = exec_cmd('virsh dumpxml %s' % vm_name, False) - if c > 0: - log(r) - return - self.undefine_vm_delete_disk(r, vm_name) - - def undefine_vm_delete_disk(self, printout, vm_name): - disk_files = [] - xml_dump = etree.fromstring(printout, self.parser) - disks = xml_dump.xpath('/domain/devices/disk') - for disk in disks: - sources = disk.xpath('source') - for source in sources: - source_file = source.get('file') - if source_file: - disk_files.append(source_file) - log('Deleting VM %s with disks %s' % (vm_name, disk_files)) - exec_cmd('virsh destroy %s' % vm_name, False) - exec_cmd('virsh undefine %s' % vm_name, False) - for file in disk_files: - exec_cmd('rm -f %s' % file) - - def create_vm(self): - temp_dir = exec_cmd('mktemp -d') - vm_name = self.dha.get_node_property(self.fuel_node_id, 'libvirtName') - vm_template = self.dha.get_node_property(self.fuel_node_id, - 'libvirtTemplate') - disk_path = '%s/%s.raw' % (self.storage_dir, vm_name) - exec_cmd('fallocate -l %s %s' % (FUEL_DISK_SIZE, disk_path)) - self.define_vm(vm_name, vm_template, temp_dir, disk_path) - exec_cmd('rm -fr %s' % temp_dir) - - def define_vm(self, vm_name, vm_template, temp_dir, disk_path): - log('Creating VM %s with disks %s' % (vm_name, disk_path)) - temp_vm_file = '%s/%s' % (temp_dir, vm_name) - exec_cmd('cp %s/%s %s' % (self.vm_dir, vm_template, temp_vm_file)) - with open(temp_vm_file) as f: - vm_xml = etree.parse(f) - names = vm_xml.xpath('/domain/name') - for name in names: - name.text = vm_name - uuids = vm_xml.xpath('/domain/uuid') - for uuid in uuids: - uuid.getparent().remove(uuid) - disks = vm_xml.xpath('/domain/devices/disk') - for disk in disks: - sources = disk.xpath('source') - for source in sources: - source.set('file', disk_path) - with open(temp_vm_file, 'w') as f: - vm_xml.write(f, pretty_print=True, xml_declaration=True) - exec_cmd('virsh define %s' % temp_vm_file) - - -def usage(): - print ''' - Usage: - python setup_vfuel.py - - Example: - python setup_vfuel.py /mnt/images dha.yaml - ''' - -def parse_arguments(): - if len(sys.argv) != 3: - log('Incorrect number of arguments') - usage() - sys.exit(1) - storage_dir = sys.argv[-2] - dha_file = sys.argv[-1] - check_dir_exists(storage_dir) - check_file_exists(dha_file) - return storage_dir, dha_file - -def main(): - storage_dir, dha_file = parse_arguments() - - vfuel = VFuel(storage_dir, dha_file) - vfuel.setup_environment() - -if __name__ == '__main__': - main() diff --git a/fuel/deploy/ssh_client.py b/fuel/deploy/ssh_client.py index 9ea227a..8bf87bc 100644 --- a/fuel/deploy/ssh_client.py +++ b/fuel/deploy/ssh_client.py @@ -6,6 +6,7 @@ TIMEOUT = 600 log = common.log err = common.err + class SSHClient(object): def __init__(self, host, username, password): @@ -18,7 +19,8 @@ class SSHClient(object): self.client = paramiko.SSHClient() self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) self.client.connect(self.host, username=self.username, - password=self.password, timeout=timeout) + password=self.password, look_for_keys=False, + timeout=timeout) def close(self): if self.client is not None: @@ -60,16 +62,14 @@ class SSHClient(object): if chan.recv_ready(): data = chan.recv(1024) while data: - print data + log(data.strip()) data = chan.recv(1024) if chan.recv_stderr_ready(): error_buff = chan.recv_stderr(1024) while error_buff: - print error_buff + log(error_buff.strip()) error_buff = chan.recv_stderr(1024) - exit_status = chan.recv_exit_status() - log('Exit status %s' % exit_status) def scp_get(self, remote, local='.', dir=False): try: -- cgit 1.2.3-korg From be75ed95cc956e1ef634d3878148701c21d15b5a Mon Sep 17 00:00:00 2001 From: randyl Date: Wed, 17 Jun 2015 15:23:45 -0600 Subject: Fixing verification of vbox drivers JIRA: BGS-74 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With the foreman install the vboxdrv module is not loaded then the deploy.sh script attempts to load the driver by running ‘/etc/init.d/vboxdrv setup’ and checks the return code for success. However, /etc/init.d/vboxdrv will return 0 as long a valid parameter was passed on the command line. In the case of a failure, a failure message will be logged and the return code will still be 0. For instance if the kvm module is already loaded, the VirtualBox driver will never install. deploy.sh will now check for the VirtualBox kernel module with lsmod after the setup script is installed. The deploy.sh will now exit if the vboxdrv is not loaded. Change-Id: I702819cbf28afb08e0035e08918390af85c07674 Signed-off-by: randyl --- foreman/ci/deploy.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index 86f03a7..31d41d2 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -225,7 +225,8 @@ fi ##install kmod-VirtualBox if ! lsmod | grep vboxdrv; then - if ! sudo /etc/init.d/vboxdrv setup; then + sudo /etc/init.d/vboxdrv setup + if ! lsmod | grep vboxdrv; then printf '%s\n' 'deploy.sh: Unable to install kernel module for virtualbox' >&2 exit 1 fi -- cgit 1.2.3-korg From 49b17cd12b10327ca8848c37746768aa6c804827 Mon Sep 17 00:00:00 2001 From: Szilard Cserey Date: Thu, 18 Jun 2015 11:32:57 +0200 Subject: Catching exit status from remote deployment process JIRA: [BGS-2] Create Fuel deployment script Change-Id: I21997df2534ef3cb0ae9ed47a01e6625b8404af9 Signed-off-by: Szilard Cserey --- fuel/deploy/deploy.py | 7 ++++--- fuel/deploy/deploy_env.py | 6 ++++-- fuel/deploy/ssh_client.py | 1 + 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/fuel/deploy/deploy.py b/fuel/deploy/deploy.py index 3305aed..402d0f2 100644 --- a/fuel/deploy/deploy.py +++ b/fuel/deploy/deploy.py @@ -2,6 +2,7 @@ import os import shutil import io import re +import sys import netaddr import uuid import yaml @@ -138,7 +139,7 @@ class AutoDeploy(object): def deploy_env(self): dep = CloudDeploy(self.dha, self.fuel_conf['ip'], self.fuel_username, self.fuel_password, self.dea_file, WORK_DIR) - dep.deploy() + return dep.deploy() def setup_execution_environment(self): exec_env = ExecutionEnvironment(self.storage_dir, self.pxe_bridge, @@ -157,7 +158,7 @@ class AutoDeploy(object): self.create_tmp_dir() self.install_fuel_master() shutil.rmtree(self.tmp_dir) - self.deploy_env() + return self.deploy_env() def check_bridge(pxe_bridge, dha_path): with io.open(dha_path) as yaml_file: @@ -215,7 +216,7 @@ def main(): d = AutoDeploy(without_fuel, storage_dir, pxe_bridge, iso_file, dea_file, dha_file) - d.deploy() + sys.exit(d.deploy()) if __name__ == '__main__': main() \ No newline at end of file diff --git a/fuel/deploy/deploy_env.py b/fuel/deploy/deploy_env.py index 48aec18..084f37e 100644 --- a/fuel/deploy/deploy_env.py +++ b/fuel/deploy/deploy_env.py @@ -68,7 +68,9 @@ class CloudDeploy(object): dea_file = '%s/%s' % (self.work_dir, os.path.basename(self.dea_file)) macs_file = '%s/%s' % (self.work_dir, os.path.basename(self.macs_file)) with self.ssh as s: - s.run('python %s %s %s' % (deploy_app, dea_file, macs_file)) + status = s.run('python %s %s %s' + % (deploy_app, dea_file, macs_file)) + return status def deploy(self): @@ -84,4 +86,4 @@ class CloudDeploy(object): self.upload_cloud_deployment_files() - self.run_cloud_deploy(CLOUD_DEPLOY_FILE) + return self.run_cloud_deploy(CLOUD_DEPLOY_FILE) diff --git a/fuel/deploy/ssh_client.py b/fuel/deploy/ssh_client.py index 8bf87bc..0ec2edc 100644 --- a/fuel/deploy/ssh_client.py +++ b/fuel/deploy/ssh_client.py @@ -70,6 +70,7 @@ class SSHClient(object): while error_buff: log(error_buff.strip()) error_buff = chan.recv_stderr(1024) + return chan.recv_exit_status() def scp_get(self, remote, local='.', dir=False): try: -- cgit 1.2.3-korg From 28aafb5b284bc572212ec013ef3cfc9a0f91ba2e Mon Sep 17 00:00:00 2001 From: Szilard Cserey Date: Thu, 18 Jun 2015 17:05:03 +0200 Subject: Updating ci deploy script + argument parsing improvements JIRA: [BGS-2] Create Fuel deployment script Change-Id: I891d574a5f8593a83edc89bb145bde90943c593f Signed-off-by: Szilard Cserey --- fuel/ci/deploy.sh | 17 +++++++---------- fuel/deploy/README.txt | 34 +++++++++++++++++----------------- fuel/deploy/deploy.py | 7 +++---- fuel/deploy/install_fuel_master.py | 2 +- 4 files changed, 28 insertions(+), 32 deletions(-) diff --git a/fuel/ci/deploy.sh b/fuel/ci/deploy.sh index df23249..5923f5c 100755 --- a/fuel/ci/deploy.sh +++ b/fuel/ci/deploy.sh @@ -1,12 +1,9 @@ -#!/bin/bash -x -set -o xtrace -set -o errexit -set -o nounset -set -o pipefail +#!/bin/bash +topdir=$(dirname $(readlink -f $BASH_SOURCE)) +deploydir=$(cd ${topdir}/../deploy; pwd) -WORKSPACE=$(readlink -e ..) -ISO_LOCATION="$(readlink -f $(find $WORKSPACE -iname 'fuel*iso' -type f))" -INTERFACE="fuel" +pushd ${deploydir} > /dev/null +echo -e "python deploy.py $@\n" +python deploy.py $@ +popd > /dev/null -cd "${WORKSPACE}/deploy" -./deploy_fuel.sh "$ISO_LOCATION" $INTERFACE 2>&1 | tee deploy_fuel.log diff --git a/fuel/deploy/README.txt b/fuel/deploy/README.txt index 6f322d0..33baff1 100644 --- a/fuel/deploy/README.txt +++ b/fuel/deploy/README.txt @@ -55,32 +55,32 @@ you will have to modify them according to your needs --- Step.2 Run Autodeployment: -usage: python deploy.py [-h] [-nf] - [iso_file] dea_file dha_file [storage_dir] - [pxe_bridge] +usage: python deploy.py [-h] [-nf] [-s [STORAGE_DIR]] [-b [PXE_BRIDGE]] + [iso_file] dea_file dha_file positional arguments: - iso_file ISO File [default: OPNFV.iso] - dea_file Deployment Environment Adapter: dea.yaml - dha_file Deployment Hardware Adapter: dha.yaml - storage_dir Storage Directory [default: images] - pxe_bridge Linux Bridge for booting up the Fuel Master VM [default: pxebr] + iso_file ISO File [default: OPNFV.iso] + dea_file Deployment Environment Adapter: dea.yaml + dha_file Deployment Hardware Adapter: dha.yaml optional arguments: - -h, --help show this help message and exit - -nf Do not install Fuel Master (and Node VMs when using libvirt) + -h, --help show this help message and exit + -nf Do not install Fuel Master (and Node VMs when using + libvirt) + -s [STORAGE_DIR] Storage Directory [default: images] + -b [PXE_BRIDGE] Linux Bridge for booting up the Fuel Master VM [default: + pxebr] * WARNING: -If is not specified, Autodeployment will use -"/images" as default, and it will create it, -if it hasn't been created before +If optional argument -s is not specified, Autodeployment will use +"/images" as default, and it will create it, if it hasn't been created before -If is not specified, Autodeployment will use "pxebr" as default, +If optional argument -b is not specified, Autodeployment will use "pxebr" as default, if the bridge does not exist, the application will terminate with an error message -IF is not specified, Autodeployment will use "/OPNFV.iso" +IF optional argument is not specified, Autodeployment will use "/OPNFV.iso" as default, if the iso file does not exist, the application will terminate with an error message is not required for Autodeployment in virtual environment, even if it is specified @@ -91,12 +91,12 @@ it will not be used at all - Install Fuel Master and deploy OPNFV Cloud from scratch on Baremetal Environment -sudo python deploy.py ~/ISO/opnfv.iso ~/CONF/baremetal/dea.yaml ~/CONF/baremetal/dha.yaml /mnt/images pxebr +sudo python deploy.py ~/ISO/opnfv.iso ~/CONF/baremetal/dea.yaml ~/CONF/baremetal/dha.yaml -s /mnt/images -b pxebr - Install Fuel Master and deploy OPNFV Cloud from scratch on Virtual Environment -sudo python deploy.py ~/ISO/opnfv.iso ~/CONF/virtual/dea.yaml ~/CONF/virtual/dha.yaml /mnt/images +sudo python deploy.py ~/ISO/opnfv.iso ~/CONF/virtual/dea.yaml ~/CONF/virtual/dha.yaml -s /mnt/images diff --git a/fuel/deploy/deploy.py b/fuel/deploy/deploy.py index 402d0f2..33c6f9f 100644 --- a/fuel/deploy/deploy.py +++ b/fuel/deploy/deploy.py @@ -184,15 +184,16 @@ def parse_arguments(): help='Deployment Environment Adapter: dea.yaml') parser.add_argument('dha_file', action='store', help='Deployment Hardware Adapter: dha.yaml') - parser.add_argument('storage_dir', nargs='?', action='store', + parser.add_argument('-s', dest='storage_dir', action='store', default='%s/images' % CWD, help='Storage Directory [default: images]') - parser.add_argument('pxe_bridge', nargs='?', action='store', + parser.add_argument('-b', dest='pxe_bridge', action='store', default='pxebr', help='Linux Bridge for booting up the Fuel Master VM ' '[default: pxebr]') args = parser.parse_args() + log(args) check_file_exists(args.dea_file) check_file_exists(args.dha_file) @@ -202,8 +203,6 @@ def parse_arguments(): check_file_exists(args.iso_file) log('Using image directory: %s' % args.storage_dir) create_dir_if_not_exists(args.storage_dir) - log('Using bridge %s to boot up Fuel Master VM on it' - % args.pxe_bridge) check_bridge(args.pxe_bridge, args.dha_file) return (args.without_fuel, args.storage_dir, args.pxe_bridge, diff --git a/fuel/deploy/install_fuel_master.py b/fuel/deploy/install_fuel_master.py index ea24ff0..b9b7809 100644 --- a/fuel/deploy/install_fuel_master.py +++ b/fuel/deploy/install_fuel_master.py @@ -138,7 +138,7 @@ class InstallFuelMaster(object): self.work_dir, os.path.basename(self.dea_file))) def wait_until_installation_completed(self): - WAIT_LOOP = 320 + WAIT_LOOP = 360 SLEEP_TIME = 10 CMD = 'ps -ef | grep %s | grep -v grep' % BOOTSTRAP_ADMIN -- cgit 1.2.3-korg From a7c763a679eab63a8887bb85158ebdd38213e9a0 Mon Sep 17 00:00:00 2001 From: Szilard Cserey Date: Thu, 18 Jun 2015 22:04:25 +0200 Subject: Fix typo: conatiner => container Change-Id: If7a29b0c88a7009ef7587a460ecfeff4ba3f3661 Signed-off-by: Szilard Cserey --- common/puppet-opnfv/manifests/odl_docker.pp | 2 +- .../puppet/modules/opnfv/manifests/odl_lith_docker.pp | 2 +- .../puppet/modules/opnfv/scripts/start_odl_container.sh | 4 ++-- fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp | 2 +- .../f_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/common/puppet-opnfv/manifests/odl_docker.pp b/common/puppet-opnfv/manifests/odl_docker.pp index 6e70ba0..fdbbe67 100644 --- a/common/puppet-opnfv/manifests/odl_docker.pp +++ b/common/puppet-opnfv/manifests/odl_docker.pp @@ -39,7 +39,7 @@ class opnfv::odl_docker mode => 750, } - file { "/opt/opnfv/odl/start_odl_conatiner.sh": + file { "/opt/opnfv/odl/start_odl_container.sh": ensure => present, source => "/etc/puppet/modules/opnfv/scripts/start_odl_container.sh", mode => 750, diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/manifests/odl_lith_docker.pp b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/manifests/odl_lith_docker.pp index cd243ef..e456180 100644 --- a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/manifests/odl_lith_docker.pp +++ b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/manifests/odl_lith_docker.pp @@ -34,7 +34,7 @@ class opnfv::odl_lith_docker mode => 750, } - file { '/opt/opnfv/odl/start_odl_conatiner.sh': + file { '/opt/opnfv/odl/start_odl_container.sh': ensure => present, source => '/etc/puppet/modules/opnfv/scripts/start_odl_container.sh', mode => 750, diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh index 347ac74..7b91f4f 100755 --- a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh +++ b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh @@ -2,7 +2,7 @@ # Ericsson Canada Inc. # Authoer: Daniel Smith # -# A helper script to install and setup the ODL docker conatiner on the controller +# A helper script to install and setup the ODL docker container on the controller # # # Inputs: odl_docker_image.tar @@ -86,7 +86,7 @@ then echo "Starting Container in Interactive Mode (/bin/bash will be provided, you will need to run ./start_odl_docker.sh inside the container yourself)" $LOCALPATH/$DOCKERBINNAME run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -t loving_daniel /bin/bash else - echo "Starting Conatiner in Daemon mode - no shell will be provided and docker attach will not provide shell)" + echo "Starting Container in Daemon mode - no shell will be provided and docker attach will not provide shell)" $LOCALPATH/$DOCKERBINNAME run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -d -t loving_daniel echo "should see the process listed here in docker ps -a" $LOCALPATH/$DOCKERBINNAME ps -a; diff --git a/fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp b/fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp index c286127..56f0216 100644 --- a/fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp +++ b/fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp @@ -30,7 +30,7 @@ class opnfv::odl_docker mode => 750, } - file { '/opt/opnfv/odl/start_odl_conatiner.sh': + file { '/opt/opnfv/odl/start_odl_container.sh': ensure => present, source => '/etc/puppet/modules/opnfv/scripts/start_odl_container.sh', mode => 750, diff --git a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh b/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh index 347ac74..7b91f4f 100755 --- a/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh +++ b/fuel/build/f_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh @@ -2,7 +2,7 @@ # Ericsson Canada Inc. # Authoer: Daniel Smith # -# A helper script to install and setup the ODL docker conatiner on the controller +# A helper script to install and setup the ODL docker container on the controller # # # Inputs: odl_docker_image.tar @@ -86,7 +86,7 @@ then echo "Starting Container in Interactive Mode (/bin/bash will be provided, you will need to run ./start_odl_docker.sh inside the container yourself)" $LOCALPATH/$DOCKERBINNAME run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -t loving_daniel /bin/bash else - echo "Starting Conatiner in Daemon mode - no shell will be provided and docker attach will not provide shell)" + echo "Starting Container in Daemon mode - no shell will be provided and docker attach will not provide shell)" $LOCALPATH/$DOCKERBINNAME run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -d -t loving_daniel echo "should see the process listed here in docker ps -a" $LOCALPATH/$DOCKERBINNAME ps -a; -- cgit 1.2.3-korg From 213fecf4e1822758797f29d0df2f0c5351cd38f9 Mon Sep 17 00:00:00 2001 From: Szilard Cserey Date: Fri, 19 Jun 2015 18:11:34 +0200 Subject: Fixing network configuration bug in dea.yaml it kept failing whenever I tried to deploy in HA mode JIRA: [BGS-2] Create Fuel deployment script Change-Id: I94f2b49ef4caf6674c636568601aac69c0339617 Signed-off-by: Szilard Cserey --- fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dea.yaml | 2 +- fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dea.yaml | 2 +- fuel/prototypes/auto-deploy/configs/lf_pod1/dea_ha.yaml | 2 +- fuel/prototypes/auto-deploy/configs/lf_pod1/dea_no-ha.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dea.yaml b/fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dea.yaml index 2528229..8aafc9a 100644 --- a/fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dea.yaml +++ b/fuel/deploy/baremetal/conf/linux_foundation_lab/ha/dea.yaml @@ -186,7 +186,7 @@ network: gateway: 172.30.9.1 ip_ranges: - - 172.30.9.70 - - 172.30.9.70 + - 172.30.9.79 meta: assign_vip: true cidr: 172.16.0.0/24 diff --git a/fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dea.yaml b/fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dea.yaml index 2387443..5a93e96 100644 --- a/fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dea.yaml +++ b/fuel/deploy/baremetal/conf/linux_foundation_lab/multinode/dea.yaml @@ -186,7 +186,7 @@ network: gateway: 172.30.9.1 ip_ranges: - - 172.30.9.70 - - 172.30.9.70 + - 172.30.9.79 meta: assign_vip: true cidr: 172.16.0.0/24 diff --git a/fuel/prototypes/auto-deploy/configs/lf_pod1/dea_ha.yaml b/fuel/prototypes/auto-deploy/configs/lf_pod1/dea_ha.yaml index 9e70427..25de4b9 100644 --- a/fuel/prototypes/auto-deploy/configs/lf_pod1/dea_ha.yaml +++ b/fuel/prototypes/auto-deploy/configs/lf_pod1/dea_ha.yaml @@ -205,7 +205,7 @@ network: gateway: 172.30.9.1 ip_ranges: - - 172.30.9.70 - - 172.30.9.70 + - 172.30.9.79 meta: assign_vip: true cidr: 172.16.0.0/24 diff --git a/fuel/prototypes/auto-deploy/configs/lf_pod1/dea_no-ha.yaml b/fuel/prototypes/auto-deploy/configs/lf_pod1/dea_no-ha.yaml index fd0e7b3..3abbdce 100644 --- a/fuel/prototypes/auto-deploy/configs/lf_pod1/dea_no-ha.yaml +++ b/fuel/prototypes/auto-deploy/configs/lf_pod1/dea_no-ha.yaml @@ -205,7 +205,7 @@ network: gateway: 172.30.9.1 ip_ranges: - - 172.30.9.70 - - 172.30.9.70 + - 172.30.9.79 meta: assign_vip: true cidr: 172.16.0.0/24 -- cgit 1.2.3-korg From 244397911ce3a00ac94a6a1b40af2b3c90597855 Mon Sep 17 00:00:00 2001 From: Szilard Cserey Date: Mon, 22 Jun 2015 17:37:50 +0200 Subject: Fetching exit code from deploy.py in deploy.sh JIRA: [BGS-2] Create Fuel deployment script Change-Id: I9b4f7f27fc8ad271fc9bb37f4cb1910af5068a73 Signed-off-by: Szilard Cserey --- fuel/ci/deploy.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/fuel/ci/deploy.sh b/fuel/ci/deploy.sh index 5923f5c..d5b70d0 100755 --- a/fuel/ci/deploy.sh +++ b/fuel/ci/deploy.sh @@ -1,9 +1,8 @@ #!/bin/bash +set -o errexit topdir=$(dirname $(readlink -f $BASH_SOURCE)) deploydir=$(cd ${topdir}/../deploy; pwd) - pushd ${deploydir} > /dev/null echo -e "python deploy.py $@\n" python deploy.py $@ -popd > /dev/null - +popd > /dev/null \ No newline at end of file -- cgit 1.2.3-korg From 9d303fdfce261b429de8e710be0a19787626b69f Mon Sep 17 00:00:00 2001 From: Daniel Farrell Date: Wed, 24 Jun 2015 10:41:47 -0400 Subject: Add ODL Deployment stack docs to Foreman guide JIRA: BGS-59 Change-Id: Ia2541f7b56077f2ce95acc4dd042d72bb8cbbf2c Signed-off-by: Daniel Farrell --- foreman/docs/src/installation-instructions.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/foreman/docs/src/installation-instructions.rst b/foreman/docs/src/installation-instructions.rst index 2ac872d..19c526b 100644 --- a/foreman/docs/src/installation-instructions.rst +++ b/foreman/docs/src/installation-instructions.rst @@ -353,7 +353,11 @@ OpenStack OpenDaylight ------------ -`OpenDaylight artifacts `_ +Upstream OpenDaylight provides `a number of packaging and deployment options `_ meant for consumption by downstream projects like OPNFV. + +Currently, OPNFV Foreman uses `OpenDaylight's Puppet module `_, which in turn depends on `OpenDaylight's RPM `_. + +Note that the RPM is currently hosted on Copr, but `will soon `_ be migrated to OpenDaylight's infrastructure and/or the new CentOS NFV SIG. Foreman ------- -- cgit 1.2.3-korg From 3fea98bc7bd7f0f411c2be1be7665a569ec8fb0a Mon Sep 17 00:00:00 2001 From: randyl Date: Wed, 24 Jun 2015 12:55:53 -0600 Subject: Moved 80% of Foreman deploy.sh into functions To help with readability and troubleshooting, the 80% of the script that was not in a function were added to functions based on logical blocks. Neither functional nor order of execution changes were made to the script. Since most of the file and other Foreman CI scripts use a 2 space indent, that was also applied uniformly across the file. Change-Id: I83b96e231d484813025f6aa900cf2ffc92e94397 Signed-off-by: randyl --- foreman/ci/deploy.sh | 944 +++++++++++++++++++++++++++------------------------ 1 file changed, 509 insertions(+), 435 deletions(-) diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index 31d41d2..46ba80e 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -149,19 +149,21 @@ parse_yaml() { }' } -##END FUNCTIONS - -if [[ ( $1 == "--help") || $1 == "-h" ]]; then +##translates the command line paramaters into variables +##params: $@ the entire command line is passed +##usage: parse_cmd_line() "$@" +parse_cmdline() { + if [[ ( $1 == "--help") || $1 == "-h" ]]; then display_usage exit 0 -fi + fi -echo -e "\n\n${blue}This script is used to deploy Foreman/QuickStack Installer and Provision OPNFV Target System${reset}\n\n" -echo "Use -h to display help" -sleep 2 + echo -e "\n\n${blue}This script is used to deploy Foreman/QuickStack Installer and Provision OPNFV Target System${reset}\n\n" + echo "Use -h to display help" + sleep 2 -while [ "`echo $1 | cut -c1`" = "-" ] -do + while [ "`echo $1 | cut -c1`" = "-" ] + do echo $1 case "$1" in -base_config) @@ -180,31 +182,45 @@ do display_usage exit 1 ;; -esac -done + esac + done +} ##disable selinux -/sbin/setenforce 0 - -# Install EPEL repo for access to many other yum repos -# Major version is pinned to force some consistency for Arno -yum install -y epel-release-7* - -# Install other required packages -# Major versions are pinned to force some consistency for Arno -if ! yum install -y binutils-2* gcc-4* make-3* patch-2* libgomp-4* glibc-headers-2* glibc-devel-2* kernel-headers-3* kernel-devel-3* dkms-2* psmisc-22*; then - printf '%s\n' 'deploy.sh: Unable to install depdency packages' >&2 - exit 1 -fi - -##install VirtualBox repo -if cat /etc/*release | grep -i "Fedora release"; then - vboxurl=http://download.virtualbox.org/virtualbox/rpm/fedora/\$releasever/\$basearch -else - vboxurl=http://download.virtualbox.org/virtualbox/rpm/el/\$releasever/\$basearch -fi - -cat > /etc/yum.repos.d/virtualbox.repo << EOM +##params: none +##usage: disable_selinux() +disable_selinux() { + /sbin/setenforce 0 +} + +##Install the EPEL repository and additional packages +##params: none +##usage: install_EPEL() +install_EPEL() { + # Install EPEL repo for access to many other yum repos + # Major version is pinned to force some consistency for Arno + yum install -y epel-release-7* + + # Install other required packages + # Major versions are pinned to force some consistency for Arno + if ! yum install -y binutils-2* gcc-4* make-3* patch-2* libgomp-4* glibc-headers-2* glibc-devel-2* kernel-headers-3* kernel-devel-3* dkms-2* psmisc-22*; then + printf '%s\n' 'deploy.sh: Unable to install depdency packages' >&2 + exit 1 + fi +} + +##Download and install virtual box +##params: none +##usage: install_vbox() +install_vbox() { + ##install VirtualBox repo + if cat /etc/*release | grep -i "Fedora release"; then + vboxurl=http://download.virtualbox.org/virtualbox/rpm/fedora/\$releasever/\$basearch + else + vboxurl=http://download.virtualbox.org/virtualbox/rpm/el/\$releasever/\$basearch + fi + + cat > /etc/yum.repos.d/virtualbox.repo << EOM [virtualbox] name=Oracle Linux / RHEL / CentOS-\$releasever / \$basearch - VirtualBox baseurl=$vboxurl @@ -215,380 +231,101 @@ skip_if_unavailable = 1 keepcache = 0 EOM -##install VirtualBox -if ! yum list installed | grep -i virtualbox; then - if ! yum -y install VirtualBox-4.3; then - printf '%s\n' 'deploy.sh: Unable to install virtualbox package' >&2 - exit 1 + ##install VirtualBox + if ! yum list installed | grep -i virtualbox; then + if ! yum -y install VirtualBox-4.3; then + printf '%s\n' 'deploy.sh: Unable to install virtualbox package' >&2 + exit 1 + fi fi -fi -##install kmod-VirtualBox -if ! lsmod | grep vboxdrv; then - sudo /etc/init.d/vboxdrv setup + ##install kmod-VirtualBox if ! lsmod | grep vboxdrv; then - printf '%s\n' 'deploy.sh: Unable to install kernel module for virtualbox' >&2 - exit 1 - fi -else - printf '%s\n' 'deploy.sh: Skipping kernel module for virtualbox. Already Installed' -fi - -##install Ansible -if ! yum list installed | grep -i ansible; then - if ! yum -y install ansible-1*; then - printf '%s\n' 'deploy.sh: Unable to install Ansible package' >&2 - exit 1 + sudo /etc/init.d/vboxdrv setup + if ! lsmod | grep vboxdrv; then + printf '%s\n' 'deploy.sh: Unable to install kernel module for virtualbox' >&2 + exit 1 + fi + else + printf '%s\n' 'deploy.sh: Skipping kernel module for virtualbox. Already Installed' fi -fi +} -##install Vagrant -if ! rpm -qa | grep vagrant; then - if ! rpm -Uvh https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.rpm; then - printf '%s\n' 'deploy.sh: Unable to install vagrant package' >&2 - exit 1 - fi -else - printf '%s\n' 'deploy.sh: Skipping Vagrant install as it is already installed.' -fi - -##add centos 7 box to vagrant -if ! vagrant box list | grep chef/centos-7.0; then - if ! vagrant box add chef/centos-7.0 --provider virtualbox; then - printf '%s\n' 'deploy.sh: Unable to download centos7 box for Vagrant' >&2 - exit 1 - fi -else - printf '%s\n' 'deploy.sh: Skipping Vagrant box add as centos-7.0 is already installed.' -fi - -##install workaround for centos7 -if ! vagrant plugin list | grep vagrant-centos7_fix; then - if ! vagrant plugin install vagrant-centos7_fix; then - printf '%s\n' 'deploy.sh: Warning: unable to install vagrant centos7 workaround' >&2 +##install Ansible using yum +##params: none +##usage: install_anible() +install_ansible() { + if ! yum list installed | grep -i ansible; then + if ! yum -y install ansible-1*; then + printf '%s\n' 'deploy.sh: Unable to install Ansible package' >&2 + exit 1 + fi fi -else - printf '%s\n' 'deploy.sh: Skipping Vagrant plugin as centos7 workaround is already installed.' -fi - -cd /tmp/ - -##remove bgs vagrant incase it wasn't cleaned up -rm -rf /tmp/bgs_vagrant - -##clone bgs vagrant -##will change this to be opnfv repo when commit is done -if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git; then - printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2 - exit 1 -fi - -cd bgs_vagrant - -echo "${blue}Detecting network configuration...${reset}" -##detect host 1 or 3 interface configuration -#output=`ip link show | grep -E "^[0-9]" | grep -Ev ": lo|tun|virbr|vboxnet" | awk '{print $2}' | sed 's/://'` -output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'` - -if [ ! "$output" ]; then - printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2 - exit 1 -fi - -##find number of interfaces with ip and substitute in VagrantFile -if_counter=0 -for interface in ${output}; do +} - if [ "$if_counter" -ge 4 ]; then - break - fi - interface_ip=$(find_ip $interface) - if [ ! "$interface_ip" ]; then - continue - fi - new_ip=$(next_usable_ip $interface_ip) - if [ ! "$new_ip" ]; then - continue - fi - interface_arr[$interface]=$if_counter - interface_ip_arr[$if_counter]=$new_ip - subnet_mask=$(find_netmask $interface) - if [ "$if_counter" -eq 1 ]; then - private_subnet_mask=$subnet_mask - private_short_subnet_mask=$(find_short_netmask $interface) - fi - if [ "$if_counter" -eq 2 ]; then - public_subnet_mask=$subnet_mask - public_short_subnet_mask=$(find_short_netmask $interface) - fi - if [ "$if_counter" -eq 3 ]; then - storage_subnet_mask=$subnet_mask - fi - sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile - ((if_counter++)) -done - -##now remove interface config in Vagrantfile for 1 node -##if 1, 3, or 4 interfaces set deployment type -##if 2 interfaces remove 2nd interface and set deployment type -if [ "$if_counter" == 1 ]; then - deployment_type="single_network" - remove_vagrant_network eth_replace1 - remove_vagrant_network eth_replace2 - remove_vagrant_network eth_replace3 -elif [ "$if_counter" == 2 ]; then - deployment_type="single_network" - second_interface=`echo $output | awk '{print $2}'` - remove_vagrant_network $second_interface - remove_vagrant_network eth_replace2 -elif [ "$if_counter" == 3 ]; then - deployment_type="three_network" - remove_vagrant_network eth_replace3 -else - deployment_type="multi_network" -fi - -echo "${blue}Network detected: ${deployment_type}! ${reset}" - -if route | grep default; then - echo "${blue}Default Gateway Detected ${reset}" - host_default_gw=$(ip route | grep default | awk '{print $3}') - echo "${blue}Default Gateway: $host_default_gw ${reset}" - default_gw_interface=$(ip route get $host_default_gw | awk '{print $3}') - case "${interface_arr[$default_gw_interface]}" in - 0) - echo "${blue}Default Gateway Detected on Admin Interface!${reset}" - sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile - node_default_gw=$host_default_gw - ;; - 1) - echo "${red}Default Gateway Detected on Private Interface!${reset}" - echo "${red}Private subnet should be private and not have Internet access!${reset}" - exit 1 - ;; - 2) - echo "${blue}Default Gateway Detected on Public Interface!${reset}" - sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile - echo "${blue}Will setup NAT from Admin -> Public Network on VM!${reset}" - sed -i 's/^.*nat_flag =.*$/ nat_flag = true/' Vagrantfile - echo "${blue}Setting node gateway to be VM Admin IP${reset}" - node_default_gw=${interface_ip_arr[0]} - public_gateway=$default_gw - ;; - 3) - echo "${red}Default Gateway Detected on Storage Interface!${reset}" - echo "${red}Storage subnet should be private and not have Internet access!${reset}" - exit 1 - ;; - *) - echo "${red}Unable to determine which interface default gateway is on..Exiting!${reset}" - exit 1 - ;; - esac -else - #assumes 24 bit mask - defaultgw=`echo ${interface_ip_arr[0]} | cut -d. -f1-3` - firstip=.1 - defaultgw=$defaultgw$firstip - echo "${blue}Unable to find default gateway. Assuming it is $defaultgw ${reset}" - sed -i 's/^.*default_gw =.*$/ default_gw = '\""$defaultgw"\"'/' Vagrantfile - node_default_gw=$defaultgw -fi - -if [ $base_config ]; then - if ! cp -f $base_config opnfv_ksgen_settings.yml; then - echo "{red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}" - exit 1 +##install Vagrant RPM directly with the bintray.com site +##params: none +##usage: install_vagrant() +install_vagrant() { + if ! rpm -qa | grep vagrant; then + if ! rpm -Uvh https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.rpm; then + printf '%s\n' 'deploy.sh: Unable to install vagrant package' >&2 + exit 1 + fi + else + printf '%s\n' 'deploy.sh: Skipping Vagrant install as it is already installed.' fi -fi - -if [ $no_parse ]; then -echo "${blue}Skipping parsing variables into settings file as no_parse flag is set${reset}" - -else - -echo "${blue}Gathering network parameters for Target System...this may take a few minutes${reset}" -##Edit the ksgen settings appropriately -##ksgen settings will be stored in /vagrant on the vagrant machine -##if single node deployment all the variables will have the same ip -##interface names will be enp0s3, enp0s8, enp0s9 in chef/centos7 -sed -i 's/^.*default_gw:.*$/default_gw:'" $node_default_gw"'/' opnfv_ksgen_settings.yml - -##replace private interface parameter -##private interface will be of hosts, so we need to know the provisioned host interface name -##we add biosdevname=0, net.ifnames=0 to the kickstart to use regular interface naming convention on hosts -##replace IP for parameters with next IP that will be given to controller -if [ "$deployment_type" == "single_network" ]; then - ##we also need to assign IP addresses to nodes - ##for single node, foreman is managing the single network, so we can't reserve them - ##not supporting single network anymore for now - echo "{blue}Single Network type is unsupported right now. Please check your interface configuration. Exiting. ${reset}" - exit 0 - -elif [[ "$deployment_type" == "multi_network" || "$deployment_type" == "three_network" ]]; then - - if [ "$deployment_type" == "three_network" ]; then - sed -i 's/^.*network_type:.*$/network_type: three_network/' opnfv_ksgen_settings.yml + ##add centos 7 box to vagrant + if ! vagrant box list | grep chef/centos-7.0; then + if ! vagrant box add chef/centos-7.0 --provider virtualbox; then + printf '%s\n' 'deploy.sh: Unable to download centos7 box for Vagrant' >&2 + exit 1 + fi + else + printf '%s\n' 'deploy.sh: Skipping Vagrant box add as centos-7.0 is already installed.' fi - sed -i 's/^.*deployment_type:.*$/ deployment_type: '"$deployment_type"'/' opnfv_ksgen_settings.yml - - ##get ip addresses for private network on controllers to make dhcp entries - ##required for controllers_ip_array global param - next_private_ip=${interface_ip_arr[1]} - type=_private - for node in controller1 controller2 controller3; do - next_private_ip=$(next_usable_ip $next_private_ip) - if [ ! "$next_private_ip" ]; then - printf '%s\n' 'deploy.sh: Unable to find next ip for private network for control nodes' >&2 - exit 1 + ##install workaround for centos7 + if ! vagrant plugin list | grep vagrant-centos7_fix; then + if ! vagrant plugin install vagrant-centos7_fix; then + printf '%s\n' 'deploy.sh: Warning: unable to install vagrant centos7 workaround' >&2 fi - sed -i 's/'"$node$type"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml - controller_ip_array=$controller_ip_array$next_private_ip, - done + else + printf '%s\n' 'deploy.sh: Skipping Vagrant plugin as centos7 workaround is already installed.' + fi +} - ##replace global param for contollers_ip_array - controller_ip_array=${controller_ip_array%?} - sed -i 's/^.*controllers_ip_array:.*$/ controllers_ip_array: '"$controller_ip_array"'/' opnfv_ksgen_settings.yml - - ##now replace all the VIP variables. admin//private can be the same IP - ##we have to use IP's here that won't be allocated to hosts at provisioning time - ##therefore we increment the ip by 10 to make sure we have a safe buffer - next_private_ip=$(increment_ip $next_private_ip 10) - - grep -E '*private_vip|loadbalancer_vip|db_vip|amqp_vip|*admin_vip' opnfv_ksgen_settings.yml | while read -r line ; do - sed -i 's/^.*'"$line"'.*$/ '"$line $next_private_ip"'/' opnfv_ksgen_settings.yml - next_private_ip=$(next_usable_ip $next_private_ip) - if [ ! "$next_private_ip" ]; then - printf '%s\n' 'deploy.sh: Unable to find next ip for private network for vip replacement' >&2 - exit 1 - fi - done - ##replace foreman site - next_public_ip=${interface_ip_arr[2]} - sed -i 's/^.*foreman_url:.*$/ foreman_url:'" https:\/\/$next_public_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml - ##replace public vips - next_public_ip=$(increment_ip $next_public_ip 10) - grep -E '*public_vip' opnfv_ksgen_settings.yml | while read -r line ; do - sed -i 's/^.*'"$line"'.*$/ '"$line $next_public_ip"'/' opnfv_ksgen_settings.yml - next_public_ip=$(next_usable_ip $next_public_ip) - if [ ! "$next_public_ip" ]; then - printf '%s\n' 'deploy.sh: Unable to find next ip for public network for vip replcement' >&2 - exit 1 - fi - done +##remove bgs vagrant incase it wasn't cleaned up +##params: none +##usage: clean_tmp() +clean_tmp() { + rm -rf /tmp/bgs_vagrant +} - ##replace public_network param - public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) - sed -i 's/^.*public_network:.*$/ public_network:'" $public_subnet"'/' opnfv_ksgen_settings.yml - ##replace private_network param - private_subnet=$(find_subnet $next_private_ip $private_subnet_mask) - sed -i 's/^.*private_network:.*$/ private_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml - ##replace storage_network - if [ "$deployment_type" == "three_network" ]; then - sed -i 's/^.*storage_network:.*$/ storage_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml - else - next_storage_ip=${interface_ip_arr[3]} - storage_subnet=$(find_subnet $next_storage_ip $storage_subnet_mask) - sed -i 's/^.*storage_network:.*$/ storage_network:'" $storage_subnet"'/' opnfv_ksgen_settings.yml - fi +##clone bgs vagrant version 1.0 using git +##params: none +##usage: clone_bgs +clone_bgs() { + cd /tmp/ - ##replace public_subnet param - public_subnet=$public_subnet'\'$public_short_subnet_mask - sed -i 's/^.*public_subnet:.*$/ public_subnet:'" $public_subnet"'/' opnfv_ksgen_settings.yml - ##replace private_subnet param - private_subnet=$private_subnet'\'$private_short_subnet_mask - sed -i 's/^.*private_subnet:.*$/ private_subnet:'" $private_subnet"'/' opnfv_ksgen_settings.yml - - ##replace public_dns param to be foreman server - sed -i 's/^.*public_dns:.*$/ public_dns: '${interface_ip_arr[2]}'/' opnfv_ksgen_settings.yml - - ##replace public_gateway - if [ -z "$public_gateway" ]; then - ##if unset then we assume its the first IP in the public subnet - public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) - public_gateway=$(increment_subnet $public_subnet 1) - fi - sed -i 's/^.*public_gateway:.*$/ public_gateway:'" $public_gateway"'/' opnfv_ksgen_settings.yml - - ##we have to define an allocation range of the public subnet to give - ##to neutron to use as floating IPs - ##we should control this subnet, so this range should work .150-200 - ##but generally this is a bad idea and we are assuming at least a /24 subnet here - public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) - public_allocation_start=$(increment_subnet $public_subnet 150) - public_allocation_end=$(increment_subnet $public_subnet 200) - - sed -i 's/^.*public_allocation_start:.*$/ public_allocation_start:'" $public_allocation_start"'/' opnfv_ksgen_settings.yml - sed -i 's/^.*public_allocation_end:.*$/ public_allocation_end:'" $public_allocation_end"'/' opnfv_ksgen_settings.yml - -else - printf '%s\n' 'deploy.sh: Unknown network type: $deployment_type' >&2 - exit 1 -fi - -echo "${blue}Parameters Complete. Settings have been set for Foreman. ${reset}" - -fi - -if [ $virtual ]; then - echo "${blue} Virtual flag detected, setting Khaleesi playbook to be opnfv-vm.yml ${reset}" - sed -i 's/opnfv.yml/opnfv-vm.yml/' bootstrap.sh -fi - -echo "${blue}Starting Vagrant! ${reset}" - -##stand up vagrant -if ! vagrant up; then - printf '%s\n' 'deploy.sh: Unable to start vagrant' >&2 - exit 1 -else - echo "${blue}Foreman VM is up! ${reset}" -fi - -if [ $virtual ]; then - -##Bring up VM nodes -echo "${blue}Setting VMs up... ${reset}" -nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'` -##due to ODL Helium bug of OVS connecting to ODL too early, we need controllers to install first -##this is fix kind of assumes more than I would like to, but for now it should be OK as we always have -##3 static controllers -compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "` -controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "` -nodes=${controller_nodes}${compute_nodes} - -for node in ${nodes}; do - cd /tmp - - ##remove VM nodes incase it wasn't cleaned up - rm -rf /tmp/$node - - ##clone bgs vagrant ##will change this to be opnfv repo when commit is done - if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git $node; then + if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git; then printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2 exit 1 fi +} - cd $node - - if [ $base_config ]; then - if ! cp -f $base_config opnfv_ksgen_settings.yml; then - echo "{red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}" - exit 1 - fi - fi - - ##parse yaml into variables - eval $(parse_yaml opnfv_ksgen_settings.yml "config_") - ##find node type - node_type=config_nodes_${node}_type - node_type=$(eval echo \$$node_type) +##validates the netork settings and update VagrantFile with network settings +##params: none +##usage: configure_network() +configure_network() { + cd /tmp/bgs_vagrant - ##find number of interfaces with ip and substitute in VagrantFile + echo "${blue}Detecting network configuration...${reset}" + ##detect host 1 or 3 interface configuration + #output=`ip link show | grep -E "^[0-9]" | grep -Ev ": lo|tun|virbr|vboxnet" | awk '{print $2}' | sed 's/://'` output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'` if [ ! "$output" ]; then @@ -596,7 +333,7 @@ for node in ${nodes}; do exit 1 fi - + ##find number of interfaces with ip and substitute in VagrantFile if_counter=0 for interface in ${output}; do @@ -607,36 +344,25 @@ for node in ${nodes}; do if [ ! "$interface_ip" ]; then continue fi - case "${if_counter}" in - 0) - mac_string=config_nodes_${node}_mac_address - mac_addr=$(eval echo \$$mac_string) - mac_addr=$(echo $mac_addr | sed 's/:\|-//g') - if [ $mac_addr == "" ]; then - echo "${red} Unable to find mac_address for $node! ${reset}" - exit 1 - fi - ;; - 1) - if [ "$node_type" == "controller" ]; then - mac_string=config_nodes_${node}_private_mac - mac_addr=$(eval echo \$$mac_string) - if [ $mac_addr == "" ]; then - echo "${red} Unable to find private_mac for $node! ${reset}" - exit 1 - fi - else - ##generate random mac - mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"') - fi - mac_addr=$(echo $mac_addr | sed 's/:\|-//g') - ;; - *) - mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"') - mac_addr=$(echo $mac_addr | sed 's/:\|-//g') - ;; - esac - sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", bridge: '\'"$interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile + new_ip=$(next_usable_ip $interface_ip) + if [ ! "$new_ip" ]; then + continue + fi + interface_arr[$interface]=$if_counter + interface_ip_arr[$if_counter]=$new_ip + subnet_mask=$(find_netmask $interface) + if [ "$if_counter" -eq 1 ]; then + private_subnet_mask=$subnet_mask + private_short_subnet_mask=$(find_short_netmask $interface) + fi + if [ "$if_counter" -eq 2 ]; then + public_subnet_mask=$subnet_mask + public_short_subnet_mask=$(find_short_netmask $interface) + fi + if [ "$if_counter" -eq 3 ]; then + storage_subnet_mask=$subnet_mask + fi + sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile ((if_counter++)) done @@ -660,36 +386,384 @@ for node in ${nodes}; do deployment_type="multi_network" fi - ##modify provisioning to do puppet install, config, and foreman check-in - ##substitute host_name and dns_server in the provisioning script - host_string=config_nodes_${node}_hostname - host_name=$(eval echo \$$host_string) - sed -i 's/^host_name=REPLACE/host_name='$host_name'/' vm_nodes_provision.sh - ##dns server should be the foreman server - sed -i 's/^dns_server=REPLACE/dns_server='${interface_ip_arr[0]}'/' vm_nodes_provision.sh + echo "${blue}Network detected: ${deployment_type}! ${reset}" + + if route | grep default; then + echo "${blue}Default Gateway Detected ${reset}" + host_default_gw=$(ip route | grep default | awk '{print $3}') + echo "${blue}Default Gateway: $host_default_gw ${reset}" + default_gw_interface=$(ip route get $host_default_gw | awk '{print $3}') + case "${interface_arr[$default_gw_interface]}" in + 0) + echo "${blue}Default Gateway Detected on Admin Interface!${reset}" + sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile + node_default_gw=$host_default_gw + ;; + 1) + echo "${red}Default Gateway Detected on Private Interface!${reset}" + echo "${red}Private subnet should be private and not have Internet access!${reset}" + exit 1 + ;; + 2) + echo "${blue}Default Gateway Detected on Public Interface!${reset}" + sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile + echo "${blue}Will setup NAT from Admin -> Public Network on VM!${reset}" + sed -i 's/^.*nat_flag =.*$/ nat_flag = true/' Vagrantfile + echo "${blue}Setting node gateway to be VM Admin IP${reset}" + node_default_gw=${interface_ip_arr[0]} + public_gateway=$default_gw + ;; + 3) + echo "${red}Default Gateway Detected on Storage Interface!${reset}" + echo "${red}Storage subnet should be private and not have Internet access!${reset}" + exit 1 + ;; + *) + echo "${red}Unable to determine which interface default gateway is on..Exiting!${reset}" + exit 1 + ;; + esac + else + #assumes 24 bit mask + defaultgw=`echo ${interface_ip_arr[0]} | cut -d. -f1-3` + firstip=.1 + defaultgw=$defaultgw$firstip + echo "${blue}Unable to find default gateway. Assuming it is $defaultgw ${reset}" + sed -i 's/^.*default_gw =.*$/ default_gw = '\""$defaultgw"\"'/' Vagrantfile + node_default_gw=$defaultgw + fi + + if [ $base_config ]; then + if ! cp -f $base_config opnfv_ksgen_settings.yml; then + echo "{red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}" + exit 1 + fi + fi + + if [ $no_parse ]; then + echo "${blue}Skipping parsing variables into settings file as no_parse flag is set${reset}" + + else + + echo "${blue}Gathering network parameters for Target System...this may take a few minutes${reset}" + ##Edit the ksgen settings appropriately + ##ksgen settings will be stored in /vagrant on the vagrant machine + ##if single node deployment all the variables will have the same ip + ##interface names will be enp0s3, enp0s8, enp0s9 in chef/centos7 + + sed -i 's/^.*default_gw:.*$/default_gw:'" $node_default_gw"'/' opnfv_ksgen_settings.yml + + ##replace private interface parameter + ##private interface will be of hosts, so we need to know the provisioned host interface name + ##we add biosdevname=0, net.ifnames=0 to the kickstart to use regular interface naming convention on hosts + ##replace IP for parameters with next IP that will be given to controller + if [ "$deployment_type" == "single_network" ]; then + ##we also need to assign IP addresses to nodes + ##for single node, foreman is managing the single network, so we can't reserve them + ##not supporting single network anymore for now + echo "{blue}Single Network type is unsupported right now. Please check your interface configuration. Exiting. ${reset}" + exit 0 + + elif [[ "$deployment_type" == "multi_network" || "$deployment_type" == "three_network" ]]; then + + if [ "$deployment_type" == "three_network" ]; then + sed -i 's/^.*network_type:.*$/network_type: three_network/' opnfv_ksgen_settings.yml + fi + + sed -i 's/^.*deployment_type:.*$/ deployment_type: '"$deployment_type"'/' opnfv_ksgen_settings.yml + + ##get ip addresses for private network on controllers to make dhcp entries + ##required for controllers_ip_array global param + next_private_ip=${interface_ip_arr[1]} + type=_private + for node in controller1 controller2 controller3; do + next_private_ip=$(next_usable_ip $next_private_ip) + if [ ! "$next_private_ip" ]; then + printf '%s\n' 'deploy.sh: Unable to find next ip for private network for control nodes' >&2 + exit 1 + fi + sed -i 's/'"$node$type"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml + controller_ip_array=$controller_ip_array$next_private_ip, + done + + ##replace global param for contollers_ip_array + controller_ip_array=${controller_ip_array%?} + sed -i 's/^.*controllers_ip_array:.*$/ controllers_ip_array: '"$controller_ip_array"'/' opnfv_ksgen_settings.yml + + ##now replace all the VIP variables. admin//private can be the same IP + ##we have to use IP's here that won't be allocated to hosts at provisioning time + ##therefore we increment the ip by 10 to make sure we have a safe buffer + next_private_ip=$(increment_ip $next_private_ip 10) + + grep -E '*private_vip|loadbalancer_vip|db_vip|amqp_vip|*admin_vip' opnfv_ksgen_settings.yml | while read -r line ; do + sed -i 's/^.*'"$line"'.*$/ '"$line $next_private_ip"'/' opnfv_ksgen_settings.yml + next_private_ip=$(next_usable_ip $next_private_ip) + if [ ! "$next_private_ip" ]; then + printf '%s\n' 'deploy.sh: Unable to find next ip for private network for vip replacement' >&2 + exit 1 + fi + done + + ##replace foreman site + next_public_ip=${interface_ip_arr[2]} + sed -i 's/^.*foreman_url:.*$/ foreman_url:'" https:\/\/$next_public_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml + ##replace public vips + next_public_ip=$(increment_ip $next_public_ip 10) + grep -E '*public_vip' opnfv_ksgen_settings.yml | while read -r line ; do + sed -i 's/^.*'"$line"'.*$/ '"$line $next_public_ip"'/' opnfv_ksgen_settings.yml + next_public_ip=$(next_usable_ip $next_public_ip) + if [ ! "$next_public_ip" ]; then + printf '%s\n' 'deploy.sh: Unable to find next ip for public network for vip replcement' >&2 + exit 1 + fi + done + + ##replace public_network param + public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) + sed -i 's/^.*public_network:.*$/ public_network:'" $public_subnet"'/' opnfv_ksgen_settings.yml + ##replace private_network param + private_subnet=$(find_subnet $next_private_ip $private_subnet_mask) + sed -i 's/^.*private_network:.*$/ private_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml + ##replace storage_network + if [ "$deployment_type" == "three_network" ]; then + sed -i 's/^.*storage_network:.*$/ storage_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml + else + next_storage_ip=${interface_ip_arr[3]} + storage_subnet=$(find_subnet $next_storage_ip $storage_subnet_mask) + sed -i 's/^.*storage_network:.*$/ storage_network:'" $storage_subnet"'/' opnfv_ksgen_settings.yml + fi + + ##replace public_subnet param + public_subnet=$public_subnet'\'$public_short_subnet_mask + sed -i 's/^.*public_subnet:.*$/ public_subnet:'" $public_subnet"'/' opnfv_ksgen_settings.yml + ##replace private_subnet param + private_subnet=$private_subnet'\'$private_short_subnet_mask + sed -i 's/^.*private_subnet:.*$/ private_subnet:'" $private_subnet"'/' opnfv_ksgen_settings.yml + + ##replace public_dns param to be foreman server + sed -i 's/^.*public_dns:.*$/ public_dns: '${interface_ip_arr[2]}'/' opnfv_ksgen_settings.yml + + ##replace public_gateway + if [ -z "$public_gateway" ]; then + ##if unset then we assume its the first IP in the public subnet + public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) + public_gateway=$(increment_subnet $public_subnet 1) + fi + sed -i 's/^.*public_gateway:.*$/ public_gateway:'" $public_gateway"'/' opnfv_ksgen_settings.yml + + ##we have to define an allocation range of the public subnet to give + ##to neutron to use as floating IPs + ##we should control this subnet, so this range should work .150-200 + ##but generally this is a bad idea and we are assuming at least a /24 subnet here + public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) + public_allocation_start=$(increment_subnet $public_subnet 150) + public_allocation_end=$(increment_subnet $public_subnet 200) + + sed -i 's/^.*public_allocation_start:.*$/ public_allocation_start:'" $public_allocation_start"'/' opnfv_ksgen_settings.yml + sed -i 's/^.*public_allocation_end:.*$/ public_allocation_end:'" $public_allocation_end"'/' opnfv_ksgen_settings.yml + + else + printf '%s\n' 'deploy.sh: Unknown network type: $deployment_type' >&2 + exit 1 + fi - ## remove bootstrap and NAT provisioning - sed -i '/nat_setup.sh/d' Vagrantfile - sed -i 's/bootstrap.sh/vm_nodes_provision.sh/' Vagrantfile + echo "${blue}Parameters Complete. Settings have been set for Foreman. ${reset}" - ## modify default_gw to be node_default_gw - sed -i 's/^.*default_gw =.*$/ default_gw = '\""$node_default_gw"\"'/' Vagrantfile + fi +} - ## modify VM memory to be 4gig - sed -i 's/^.*vb.memory =.*$/ vb.memory = 4096/' Vagrantfile +##Configure bootstrap.sh to use the virtual Khaleesi playbook +##params: none +##usage: configure_virtual() +configure_virtual() { + if [ $virtual ]; then + echo "${blue} Virtual flag detected, setting Khaleesi playbook to be opnfv-vm.yml ${reset}" + sed -i 's/opnfv.yml/opnfv-vm.yml/' bootstrap.sh + fi +} - echo "${blue}Starting Vagrant Node $node! ${reset}" +##Starts for forement VM with Vagrant +##params: none +##usage: start_vagrant() +start_foreman() { + echo "${blue}Starting Vagrant! ${reset}" ##stand up vagrant if ! vagrant up; then - echo "${red} Unable to start $node ${reset}" + printf '%s\n' 'deploy.sh: Unable to start vagrant' >&2 exit 1 else - echo "${blue} $node VM is up! ${reset}" + echo "${blue}Foreman VM is up! ${reset}" fi +} -done +##start the VM if this is a virtual installaion +##this function does nothing if baremetal servers are being used +##params: none +##usage: start_virtual_nodes() +start_virutal_nodes() { + if [ $virtual ]; then + + ##Bring up VM nodes + echo "${blue}Setting VMs up... ${reset}" + nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'` + ##due to ODL Helium bug of OVS connecting to ODL too early, we need controllers to install first + ##this is fix kind of assumes more than I would like to, but for now it should be OK as we always have + ##3 static controllers + compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "` + controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "` + nodes=${controller_nodes}${compute_nodes} + + for node in ${nodes}; do + cd /tmp + + ##remove VM nodes incase it wasn't cleaned up + rm -rf /tmp/$node + + ##clone bgs vagrant + ##will change this to be opnfv repo when commit is done + if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git $node; then + printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2 + exit 1 + fi + + cd $node + + if [ $base_config ]; then + if ! cp -f $base_config opnfv_ksgen_settings.yml; then + echo "{red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}" + exit 1 + fi + fi + + ##parse yaml into variables + eval $(parse_yaml opnfv_ksgen_settings.yml "config_") + ##find node type + node_type=config_nodes_${node}_type + node_type=$(eval echo \$$node_type) + + ##find number of interfaces with ip and substitute in VagrantFile + output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'` + + if [ ! "$output" ]; then + printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2 + exit 1 + fi + + + if_counter=0 + for interface in ${output}; do + + if [ "$if_counter" -ge 4 ]; then + break + fi + interface_ip=$(find_ip $interface) + if [ ! "$interface_ip" ]; then + continue + fi + case "${if_counter}" in + 0) + mac_string=config_nodes_${node}_mac_address + mac_addr=$(eval echo \$$mac_string) + mac_addr=$(echo $mac_addr | sed 's/:\|-//g') + if [ $mac_addr == "" ]; then + echo "${red} Unable to find mac_address for $node! ${reset}" + exit 1 + fi + ;; + 1) + if [ "$node_type" == "controller" ]; then + mac_string=config_nodes_${node}_private_mac + mac_addr=$(eval echo \$$mac_string) + if [ $mac_addr == "" ]; then + echo "${red} Unable to find private_mac for $node! ${reset}" + exit 1 + fi + else + ##generate random mac + mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"') + fi + mac_addr=$(echo $mac_addr | sed 's/:\|-//g') + ;; + *) + mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"') + mac_addr=$(echo $mac_addr | sed 's/:\|-//g') + ;; + esac + sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", bridge: '\'"$interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile + ((if_counter++)) + done + + ##now remove interface config in Vagrantfile for 1 node + ##if 1, 3, or 4 interfaces set deployment type + ##if 2 interfaces remove 2nd interface and set deployment type + if [ "$if_counter" == 1 ]; then + deployment_type="single_network" + remove_vagrant_network eth_replace1 + remove_vagrant_network eth_replace2 + remove_vagrant_network eth_replace3 + elif [ "$if_counter" == 2 ]; then + deployment_type="single_network" + second_interface=`echo $output | awk '{print $2}'` + remove_vagrant_network $second_interface + remove_vagrant_network eth_replace2 + elif [ "$if_counter" == 3 ]; then + deployment_type="three_network" + remove_vagrant_network eth_replace3 + else + deployment_type="multi_network" + fi + + ##modify provisioning to do puppet install, config, and foreman check-in + ##substitute host_name and dns_server in the provisioning script + host_string=config_nodes_${node}_hostname + host_name=$(eval echo \$$host_string) + sed -i 's/^host_name=REPLACE/host_name='$host_name'/' vm_nodes_provision.sh + ##dns server should be the foreman server + sed -i 's/^dns_server=REPLACE/dns_server='${interface_ip_arr[0]}'/' vm_nodes_provision.sh + + ## remove bootstrap and NAT provisioning + sed -i '/nat_setup.sh/d' Vagrantfile + sed -i 's/bootstrap.sh/vm_nodes_provision.sh/' Vagrantfile + + ## modify default_gw to be node_default_gw + sed -i 's/^.*default_gw =.*$/ default_gw = '\""$node_default_gw"\"'/' Vagrantfile + + ## modify VM memory to be 4gig + sed -i 's/^.*vb.memory =.*$/ vb.memory = 4096/' Vagrantfile + + echo "${blue}Starting Vagrant Node $node! ${reset}" + + ##stand up vagrant + if ! vagrant up; then + echo "${red} Unable to start $node ${reset}" + exit 1 + else + echo "${blue} $node VM is up! ${reset}" + fi + + done + + echo "${blue} All VMs are UP! ${reset}" + + fi +} - echo "${blue} All VMs are UP! ${reset}" +##END FUNCTIONS + +main() { + parse_cmdline "$@" + disable_selinux + install_EPEL + install_vbox + install_ansible + install_vagrant + clean_tmp + clone_bgs + configure_network + configure_virtual + start_foreman + start_virutal_nodes +} -fi +main "$@" -- cgit 1.2.3-korg From b678795a66c1cab612cf548ef10a033060e3ff27 Mon Sep 17 00:00:00 2001 From: Jonas Bjurel Date: Fri, 26 Jun 2015 09:03:36 +0000 Subject: Revert "Adding in support for Lithium container." since it doesnt deploy This reverts commit eb887812da568cfb4908f6ae14449b2ceaeb5bc0. Change-Id: Ia7490d86c1d91abca1f50d90f43bdf3e4917f23a --- .../templates/Lithium_rc0/dockerfile/Dockerfile | 82 -------- .../dockerfile/container_scripts/check_feature.sh | 18 -- .../dockerfile/container_scripts/speak.sh | 20 -- .../start_odl_docker_container.sh | 48 ----- fuel/build/Makefile | 2 - fuel/build/f_lith_odl_docker/Makefile | 52 ----- fuel/build/f_lith_odl_docker/dockerfile/Dockerfile | 72 ------- .../dockerfile/container_scripts/check_feature.sh | 8 - .../dockerfile/container_scripts/speak.sh | 17 -- .../container_scripts/start_odl_docker.sh | 38 ---- .../modules/opnfv/manifests/odl_lith_docker.pp | 81 -------- .../Lithium_rc0/container_scripts/check_feature.sh | 18 -- .../Lithium_rc0/container_scripts/speak.sh | 20 -- .../start_odl_docker_container.sh | 48 ----- .../puppet/modules/opnfv/scripts/change.sh | 219 --------------------- .../puppet/modules/opnfv/scripts/config_net_odl.sh | 192 ------------------ .../puppet/modules/opnfv/scripts/stage_odl.sh | 54 ----- .../modules/opnfv/scripts/start_odl_container.sh | 95 --------- .../f_lith_odl_docker/scripts/config_net_odl.sh | 164 --------------- .../scripts/config_neutron_for_odl.sh | 146 -------------- .../f_lith_odl_docker/scripts/prep_nets_for_odl.sh | 90 --------- .../f_lith_odl_docker/scripts/setup_ovs_for_odl.sh | 23 --- .../puppet/modules/opnfv/manifests/init.pp | 2 - 23 files changed, 1509 deletions(-) delete mode 100644 common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/Dockerfile delete mode 100644 common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/check_feature.sh delete mode 100644 common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/speak.sh delete mode 100644 common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/start_odl_docker_container.sh delete mode 100755 fuel/build/f_lith_odl_docker/Makefile delete mode 100755 fuel/build/f_lith_odl_docker/dockerfile/Dockerfile delete mode 100755 fuel/build/f_lith_odl_docker/dockerfile/container_scripts/check_feature.sh delete mode 100755 fuel/build/f_lith_odl_docker/dockerfile/container_scripts/speak.sh delete mode 100755 fuel/build/f_lith_odl_docker/dockerfile/container_scripts/start_odl_docker.sh delete mode 100644 fuel/build/f_lith_odl_docker/puppet/modules/opnfv/manifests/odl_lith_docker.pp delete mode 100644 fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/check_feature.sh delete mode 100644 fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/speak.sh delete mode 100644 fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/start_odl_docker_container.sh delete mode 100644 fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/change.sh delete mode 100755 fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh delete mode 100755 fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh delete mode 100755 fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh delete mode 100644 fuel/build/f_lith_odl_docker/scripts/config_net_odl.sh delete mode 100644 fuel/build/f_lith_odl_docker/scripts/config_neutron_for_odl.sh delete mode 100755 fuel/build/f_lith_odl_docker/scripts/prep_nets_for_odl.sh delete mode 100644 fuel/build/f_lith_odl_docker/scripts/setup_ovs_for_odl.sh diff --git a/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/Dockerfile b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/Dockerfile deleted file mode 100644 index 6d7535d..0000000 --- a/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/Dockerfile +++ /dev/null @@ -1,82 +0,0 @@ -#################################################################### -# Copyright (c) 2015 Ericsson AB and others. -# daniel.smith@ericsson.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## -# -# DOCKER FILE FOR LITHIUM ODL RC0 Testing -# -############################################################################# - - -#Set the base image - note: the current release of Karaf is using Jdk7 and alot of 12.04, so we will use it rather than 14.04 and backport a ton of stuff -FROM ubuntu:12.04 - -# Maintainer Info -MAINTAINER Daniel Smith - - -#Run apt-get update one start just to check for updates when building -RUN echo "Updating APT" -RUN apt-get update -RUN echo "Adding wget" -RUN apt-get install -y wget -RUN apt-get install -y net-tools -RUN apt-get install -y openjdk-7-jre -RUN apt-get install -y openjdk-7-jdk -RUN apt-get install -y openssh-server -RUN apt-get install -y vim -RUN apt-get install -y expect -RUN apt-get install -y daemontools -RUN mkdir -p /opt/odl_source/lithium -RUN bash -c 'echo "export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64" >> ~/.bashrc' - - - -#Now lets got and fetch the ODL distribution -RUN echo "Fetching Lithium Rc0" -RUN wget https://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/org/opendaylight/integration/distribution-karaf/0.3.0-SNAPSHOT/distribution-karaf-0.3.0-20150612.144348-2492.tar.gz -O /opt/odl_source/lithium/distribution-karaf-0.3.0-Lithium-RC0.tar.gz - -RUN echo "Untarring ODL inplace" -RUN mkdir -p /opt/odl/lithium -RUN tar zxvf /opt/odl_source/lithium/distribution-karaf-0.3.0-Lithium-RC0.tar.gz -C /opt/odl/lithium - -RUN echo "Installing DLUX and other features into ODL" -#COPY dockerfile/container_scripts/start_odl_docker.sh /etc/init.d/start_odl_docker.sh -COPY container_scripts/start_odl_docker_container.sh /etc/init.d/ -COPY container_scripts/speak.sh /etc/init.d/ -#COPY dockerfile/container_scripts/speak.sh /etc/init.d/speak.sh -RUN chmod 777 /etc/init.d/start_odl_docker_container.sh -RUN chmod 777 /etc/init.d/speak.sh - - - -# Expose the ports - -# PORTS FOR BASE SYSTEM AND DLUX -EXPOSE 8101 -EXPOSE 6633 -EXPOSE 1099 -EXPOSE 43506 -EXPOSE 8181 -EXPOSE 8185 -EXPOSE 9000 -EXPOSE 39378 -EXPOSE 33714 -EXPOSE 44444 -EXPOSE 6653 - -# PORTS FOR OVSDB AND ODL CONTROL -EXPOSE 12001 -EXPOSE 6640 -EXPOSE 8080 -EXPOSE 7800 -EXPOSE 55130 -EXPOSE 52150 -EXPOSE 36826 - -# set the ENTRYPOINT - An entry point allows us to run this container as an exectuable -CMD ["/etc/init.d/start_odl_docker_container.sh"] diff --git a/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/check_feature.sh b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/check_feature.sh deleted file mode 100644 index 04d7b53..0000000 --- a/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/check_feature.sh +++ /dev/null @@ -1,18 +0,0 @@ -############################################################################## -# Copyright (c) 2015 Ericsson AB and others. -# daniel.smith@ericsson.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -#!/usr/bin/expect -spawn /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/client -expect "root>" -send "feature:list | grep -i odl-restconf\r" -send "\r\r\r" -expect "root>" -send "logout\r" - - diff --git a/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/speak.sh b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/speak.sh deleted file mode 100644 index a7d0e6c..0000000 --- a/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/speak.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/expect -############################################################################## -# Copyright (c) 2015 Ericsson AB and others. -# daniel.smith@ericsson.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## -# -# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB -# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY -################################################################################# - -spawn /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/client -expect "root>" -send "feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core" -send "\r\r\r" -expect "root>" -send "logout\r" diff --git a/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/start_odl_docker_container.sh b/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/start_odl_docker_container.sh deleted file mode 100644 index 96a40ec..0000000 --- a/common/puppet-opnfv/manifests/templates/Lithium_rc0/dockerfile/container_scripts/start_odl_docker_container.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash -############################################################################## -# Copyright (c) 2015 Ericsson AB and others. -# daniel.smith@ericsson.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## -# -# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB -# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY -################################################################################# -# Start up script for calling karaf / ODL inside a docker container. -# -# This script will also call a couple expect scripts to load the feature set that we want - - -#ENV -export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 - -#MAIN -echo "Starting up the da Sheilds..." -/opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/karaf server & -echo "Sleeping 5 bad hack" -sleep 10 -echo "should see stuff listening now" -netstat -na -echo " should see proess running for karaf" -ps -efa -echo " Starting the packages we want" -/etc/init.d/speak.sh -echo "Printout the status - if its right, you should see 8181 appear now" -netstat -na -ps -efa - - - -## This is a loop that keeps our container going currently, prinout the "status of karaf" to the docker logs every minute -## Cheap - but effective -while true; -do - echo "Checking status of ODL:" - /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/status - sleep 60 -done - - diff --git a/fuel/build/Makefile b/fuel/build/Makefile index 6c98ed9..5f63120 100644 --- a/fuel/build/Makefile +++ b/fuel/build/Makefile @@ -43,7 +43,6 @@ SUBDIRS += f_l23network SUBDIRS += f_resolvconf SUBDIRS += f_ntp SUBDIRS += f_odl_docker -SUBDIRS += f_lith_odl_docker #SUBDIRS += f_odl # f_example is only an example of how to generate a .deb package and @@ -65,7 +64,6 @@ all: @echo "cache.mk" $(shell md5sum $(BUILD_BASE)/cache.mk | cut -f1 -d " ") >> $(VERSION_FILE) @echo "config.mk" $(shell md5sum $(BUILD_BASE)/config.mk | cut -f1 -d " ") >> $(VERSION_FILE) $(MAKE) -C f_odl_docker -f Makefile all - $(MAKE) -C f_lith_odl_docker -f Makefile all @make -C docker @docker/runcontext $(DOCKERIMG) $(MAKE) $(MAKEFLAGS) iso diff --git a/fuel/build/f_lith_odl_docker/Makefile b/fuel/build/f_lith_odl_docker/Makefile deleted file mode 100755 index e89da94..0000000 --- a/fuel/build/f_lith_odl_docker/Makefile +++ /dev/null @@ -1,52 +0,0 @@ -############################################################################## -# Copyright (c) 2015 Ericsson AB and others. -# stefan.k.berg@ericsson.com -# jonas.bjurel@ericsson.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -TOP := $(shell pwd) -BUILDTAG := robust_stefan -RELEASE := Lithium_rc0 - -# Edit this to match the GENESIS / OPNFV in your environment -export OPNFV_PUPPET := $(BUILD_BASE)/../../common/puppet-opnfv -include ../config.mk - -.PHONY: all -all: - @mkdir -p puppet/modules/opnfv/odl_docker/${RELEASE} - @rm -rf tmp - @mkdir -p tmp - @cp -Rvp ${OPNFV_PUPPET}/manifests/templates/${RELEASE}/dockerfile tmp/. - @docker build -t ${BUILDTAG} tmp/dockerfile/. - @docker save ${BUILDTAG} > puppet/modules/opnfv/odl_docker/${RELEASE}/odl_docker_image.tar - @wget ${DOCKER_REPO}/${DOCKER_TAG} -O puppet/modules/opnfv/odl_docker/${RELEASE}/docker-latest - @echo "OPFNV_PUPPET is: ${OPNFV_PUPPET}" - @cp -Rvp ${OPNFV_PUPPET}/manifests/templates/${RELEASE}/dockerfile/container_scripts puppet/modules/opnfv/odl_docker/${RELEASE}/. - -.PHONY: clean -clean: - @rm -rf tmp - @rm -rf release - -.PHONY: build-clean -build-clean: - @rm -rf tmp - @rm -rf release - @rm -rf puppet/modules/opnfv/odl_docker/${RELEASE}/odl_docker_image.tar - @rm -rf puppet/modules/opnfv/odl_docker/${RELEASE}/docker-latest - -.PHONY: validate-cache -validate-cache: - @echo "No cache validation schema available for $(shell pwd)" - @echo "Continuing ..." - -.PHONY: release -release: - # Fetch PP from OPNFV Common - @cp -Rvp ${OPNFV_PUPPET}/manifests/odl_docker.pp ${PUPPET_DEST} - @cp -Rvp puppet/modules/* $(PUPPET_DEST) diff --git a/fuel/build/f_lith_odl_docker/dockerfile/Dockerfile b/fuel/build/f_lith_odl_docker/dockerfile/Dockerfile deleted file mode 100755 index e3c7ee5..0000000 --- a/fuel/build/f_lith_odl_docker/dockerfile/Dockerfile +++ /dev/null @@ -1,72 +0,0 @@ -#################################################################### -# -# Dockerfile to build a ODL (Karaf) Docker Container -# -# Copyright daniel.smith@ericsson.com -# License: Apache GPL -# -#################################################################### - - -#Set the base image - note: the current release of Karaf is using Jdk7 and alot of 12.04, so we will use it rather than 14.04 and backport a ton of stuff -FROM ubuntu:12.04 - -# Maintainer Info -MAINTAINER Daniel Smith - -#Run apt-get update one start just to check for updates when building -RUN echo "Updating APT" -RUN apt-get update -RUN echo "Adding wget" -RUN apt-get install -y wget -RUN apt-get install -y net-tools -RUN apt-get install -y openjdk-7-jre -RUN apt-get install -y openjdk-7-jdk -RUN apt-get install -y openssh-server -RUN apt-get install -y vim -RUN apt-get install -y expect -RUN apt-get install -y daemontools -RUN mkdir -p /opt/odl_source -RUN bash -c 'echo "export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64" >> ~/.bashrc' - - -#Now lets got and fetch the ODL distribution -RUN echo "Fetching ODL" -RUN wget https://nexus.opendaylight.org/content/groups/public/org/opendaylight/integration/distribution-karaf/0.2.3-Helium-SR3/distribution-karaf-0.2.3-Helium-SR3.tar.gz -O /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz - -RUN echo "Untarring ODL inplace" -RUN mkdir -p /opt/odl -RUN tar zxvf /opt/odl_source/distribution-karaf-0.2.3-Helium-SR3.tar.gz -C /opt/odl - -RUN echo "Installing DLUX and other features into ODL" -COPY tmp/dockerfile/container_scripts/start_odl_docker.sh /etc/init.d/start_odl_docker.sh -COPY tmp/dockerfile/container_scripts/speak.sh /etc/init.d/speak.sh -RUN chmod 777 /etc/init.d/start_odl_docker.sh -RUN chmod 777 /etc/init.d/speak.sh - - -# Expose the ports -# PORTS FOR BASE SYSTEM AND DLUX -EXPOSE 8101 -EXPOSE 6633 -EXPOSE 1099 -EXPOSE 43506 -EXPOSE 8181 -EXPOSE 8185 -EXPOSE 9000 -EXPOSE 39378 -EXPOSE 33714 -EXPOSE 44444 -EXPOSE 6653 - -# PORTS FOR OVSDB AND ODL CONTROL -EXPOSE 12001 -EXPOSE 6640 -EXPOSE 8080 -EXPOSE 7800 -EXPOSE 55130 -EXPOSE 52150 -EXPOSE 36826 - -# set the ENTRYPOINT - An entry point allows us to run this container as an exectuable -CMD ["/etc/init.d/start_odl_docker.sh"] diff --git a/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/check_feature.sh b/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/check_feature.sh deleted file mode 100755 index 3e5d0b2..0000000 --- a/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/check_feature.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/expect -spawn /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/client -expect "root>" -send "feature:list | grep -i odl-restconf\r" -send "\r\r\r" -expect "root>" -send "logout\r" - diff --git a/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/speak.sh b/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/speak.sh deleted file mode 100755 index 3ba07a8..0000000 --- a/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/speak.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/expect -# Ericsson Research Canada -# -# Author: Daniel Smith -# -# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB -# -# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY -# DEPRECATED AFTER ARNO - -spawn /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/client -expect "root>" -send "feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core" -send "\r\r\r" -expect "root>" -send "logout\r" - diff --git a/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/start_odl_docker.sh b/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/start_odl_docker.sh deleted file mode 100755 index 1c72dda..0000000 --- a/fuel/build/f_lith_odl_docker/dockerfile/container_scripts/start_odl_docker.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -# Ericsson Research Canada -# -# Author: Daniel Smith -# -# Start up script for calling karaf / ODL inside a docker container. -# -# This script will also call a couple expect scripts to load the feature set that we want - - -#ENV -export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 - -#MAIN -echo "Starting up the da Sheilds..." -/opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/karaf server & -echo "Sleeping 5 bad hack" -sleep 10 -echo "should see stuff listening now" -netstat -na -echo " should see proess running for karaf" -ps -efa -echo " Starting the packages we want" -/etc/init.d/speak.sh -echo "Printout the status - if its right, you should see 8181 appear now" -netstat -na -ps -efa - - - -## This is a loop that keeps our container going currently, prinout the "status of karaf" to the docker logs every minute -## Cheap - but effective -while true; -do - echo "Checking status of ODL:" - /opt/odl/distribution-karaf-0.2.3-Helium-SR3/bin/status - sleep 60 -done diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/manifests/odl_lith_docker.pp b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/manifests/odl_lith_docker.pp deleted file mode 100644 index e456180..0000000 --- a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/manifests/odl_lith_docker.pp +++ /dev/null @@ -1,81 +0,0 @@ -class opnfv::odl_lith_docker -{ - case $::fuel_settings['role'] { - /controller/: { - - file { '/opt': - ensure => 'directory', - } - - file { '/opt/opnfv': - ensure => 'directory', - owner => 'root', - group => 'root', - mode => 777, - } - - file { '/opt/opnfv/odl': - ensure => 'directory', - } - - file { '/opt/opnfv/odl/lithium': - ensure => 'directory', - } - - file { '/opt/opnfv/odl/lithium/odl_docker_image.tar': - ensure => present, - source => '/etc/puppet/modules/opnfv/odl_docker/Lithium_rc0/odl_docker_image.tar', - mode => 750, - } - - file { '/opt/opnfv/odl/lithium/docker-latest': - ensure => present, - source => '/etc/puppet/modules/opnfv/odl_docker/Lithium_rc0/docker-latest', - mode => 750, - } - - file { '/opt/opnfv/odl/start_odl_container.sh': - ensure => present, - source => '/etc/puppet/modules/opnfv/scripts/start_odl_container.sh', - mode => 750, - } - file { '/opt/opnfv/odl/stage_odl.sh': - ensure => present, - source => '/etc/puppet/modules/opnfv/scripts/stage_odl.sh', - mode => 750, - } - file { '/opt/opnfv/odl/config_net_odl.sh': - ensure => present, - source => '/etc/puppet/modules/opnfv/scripts/config_net_odl.sh', - mode => 750, - } - file { '/opt/opnfv/odl/change.sh': - ensure => present, - source => '/etc/puppet/modules/opnfv/scripts/change.sh', - mode => 750, - } - - - # fix failed to find the cgroup root issue - # https://github.com/docker/docker/issues/8791 - case $::operatingsystem { - 'ubuntu': { - package {'cgroup-lite': - ensure => present, - } - - service {'cgroup-lite': - ensure => running, - enable => true, - require => Package['cgroup-lite'], - } - } - 'centos': { - package {'docker-io': - ensure => latest, - } - } - } - } - } -} diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/check_feature.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/check_feature.sh deleted file mode 100644 index 04d7b53..0000000 --- a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/check_feature.sh +++ /dev/null @@ -1,18 +0,0 @@ -############################################################################## -# Copyright (c) 2015 Ericsson AB and others. -# daniel.smith@ericsson.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -#!/usr/bin/expect -spawn /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/client -expect "root>" -send "feature:list | grep -i odl-restconf\r" -send "\r\r\r" -expect "root>" -send "logout\r" - - diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/speak.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/speak.sh deleted file mode 100644 index a7d0e6c..0000000 --- a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/speak.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/expect -############################################################################## -# Copyright (c) 2015 Ericsson AB and others. -# daniel.smith@ericsson.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## -# -# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB -# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY -################################################################################# - -spawn /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/client -expect "root>" -send "feature:install odl-base-all odl-aaa-authn odl-restconf odl-nsf-all odl-adsal-northbound odl-mdsal-apidocs odl-ovsdb-openstack odl-ovsdb-northbound odl-dlux-core" -send "\r\r\r" -expect "root>" -send "logout\r" diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/start_odl_docker_container.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/start_odl_docker_container.sh deleted file mode 100644 index 96a40ec..0000000 --- a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/odl_docker/Lithium_rc0/container_scripts/start_odl_docker_container.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash -############################################################################## -# Copyright (c) 2015 Ericsson AB and others. -# daniel.smith@ericsson.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## -# -# Simple expect script to start up ODL client and load feature set for DLUX and OVSDB -# NOTE: THIS WILL BE REPLACED WITH A PROGRAMATIC METHOD SHORTLY -################################################################################# -# Start up script for calling karaf / ODL inside a docker container. -# -# This script will also call a couple expect scripts to load the feature set that we want - - -#ENV -export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64 - -#MAIN -echo "Starting up the da Sheilds..." -/opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/karaf server & -echo "Sleeping 5 bad hack" -sleep 10 -echo "should see stuff listening now" -netstat -na -echo " should see proess running for karaf" -ps -efa -echo " Starting the packages we want" -/etc/init.d/speak.sh -echo "Printout the status - if its right, you should see 8181 appear now" -netstat -na -ps -efa - - - -## This is a loop that keeps our container going currently, prinout the "status of karaf" to the docker logs every minute -## Cheap - but effective -while true; -do - echo "Checking status of ODL:" - /opt/odl/distribution-karaf-0.3.0-Lithium-RC0/bin/status - sleep 60 -done - - diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/change.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/change.sh deleted file mode 100644 index f7f3d6e..0000000 --- a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/change.sh +++ /dev/null @@ -1,219 +0,0 @@ -#!/bin/bash -# script to remove bridges and reset networking for ODL - - -#VARS -MODE=0 -DNS=8.8.8.8 - -#ENV -source ~/openrc - -# GET IPS for that node -function get_ips { - BR_MGMT=`grep address /etc/network/ifcfg_backup/ifcfg-br-mgmt | awk -F" " '{print $2}'` - BR_STORAGE=`grep address /etc/network/ifcfg_backup/ifcfg-br-storage | awk -F" " '{print $2}'` - BR_FW_ADMIN=`grep address /etc/network/ifcfg_backup/ifcfg-br-fw-admin | awk -F" " '{print $2}'` - BR_EX=`grep address /etc/network/ifcfg_backup/ifcfg-br-ex | awk -F" " '{print $2}'` - DEF_NETMASK=255.255.255.0 - DEF_GW=172.30.9.1 -} - -function backup_ifcfg { - echo " backing up " - mkdir -p /etc/network/ifcfg_backup - mv /etc/network/interfaces.d/ifcfg-br-ex /etc/network/ifcfg_backup/. - mv /etc/network/interfaces.d/ifcfg-br-fw-admin /etc/network/ifcfg_backup/. - mv /etc/network/interfaces.d/ifcfg-br-mgmt /etc/network/ifcfg_backup/. - mv /etc/network/interfaces.d/ifcfg-br-storage /etc/network/ifcfg_backup/. - mv /etc/network/interfaces.d/ifcfg-br-prv /etc/network/ifcfg_backup/. - mv /etc/network/interfaces.d/ifcfg-eth0 /etc/network/ifcfg_backup/. - mv /etc/network/interfaces.d/ifcfg-eth1 /etc/network/ifcfg_backup/. - rm -rf /etc/network/interfaces.d/ifcfg-eth1.300 - rm -rf /etc/network/interfaces.d/ifcfg-eth1.301 - rm -rf /etc/network/interfaces.d/ifcfg-eth1 - rm -rf /etc/network/interfaces.d/ifcfg-eth0 - -} - - -function create_ifcfg_br_mgmt { - echo "migrating br_mgmt" - echo "auto eth1.300" >> /etc/network/interfaces.d/ifcfg-eth1.300 - echo "iface eth1.300 inet static" >> /etc/network/interfaces.d/ifcfg-eth1.300 - echo " address $BR_MGMT" >> /etc/network/interfaces.d/ifcfg-eth1.300 - echo " netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1.300 -} - -function create_ifcfg_br_storage { - echo "migration br_storage" - echo "auto eth1.301" >> /etc/network/interfaces.d/ifcfg-eth1.301 - echo "iface eth1.301 inet static" >> /etc/network/interfaces.d/ifcfg-eth1.301 - echo " address $BR_STORAGE" >> /etc/network/interfaces.d/ifcfg-eth1.301 - echo " netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1.301 -} - -function create_ifcfg_br_fw_admin { - echo " migratinng br_fw_admin" - echo "auto eth1" >> /etc/network/interfaces.d/ifcfg-eth1 - echo "iface eth1 inet static" >> /etc/network/interfaces.d/ifcfg-eth1 - echo " address $BR_FW_ADMIN" >> /etc/network/interfaces.d/ifcfg-eth1 - echo " netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth1 -} - -function create_ifcfg_eth0 { - echo "migratinng br-ex to eth0 - temporarily" - echo "auto eth0" >> /etc/network/interfaces.d/ifcfg-eth0 - echo "iface eth0 inet static" >> /etc/network/interfaces.d/ifcfg-eth0 - echo " address $BR_EX" >> /etc/network/interfaces.d/ifcfg-eth0 - echo " netmask $DEF_NETMASK" >> /etc/network/interfaces.d/ifcfg-eth0 - echo " gateway $DEF_GW" >> /etc/network/interfaces.d/ifcfg-eth0 -} - -function set_mode { - if [ -d "/var/lib/glance/images" ] - then - echo " controller " - MODE=0 - else - echo " compute " - MODE=1 - fi -} - - -function stop_ovs { - echo "Stopping OpenVSwitch" - service openvswitch-switch stop - -} - -function start_ovs { - echo "Starting OVS" - service openvswitch-switch start - ovs-vsctl show -} - - -function clean_ovs { - echo "cleaning OVS DB" - stop_ovs - rm -rf /var/log/openvswitch/* - mkdir -p /opt/opnfv/odl/ovs_back - cp -pr /etc/openvswitch/* /opt/opnfv/odl/ovs_back/. - rm -rf /etc/openvswitch/conf.db - echo "restarting OVS - you should see Nothing there" - start_ovs -} - - - -function reboot_me { - reboot -} - -function allow_challenge { - sed -i -e 's/ChallengeResponseAuthentication no/ChallengeResponseAuthentication yes/g' /etc/ssh/sshd_config - service ssh restart -} - -function clean_neutron { - subnets=( `neutron subnet-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` ) - networks=( `neutron net-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` ) - ports=( `neutron port-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` ) - routers=( `neutron router-list | awk -F" " '{print $2}' | grep -v id | sed '/^$/d'` ) - - #display all elements - echo "SUBNETS: ${subnets[@]} " - echo "NETWORKS: ${networks[@]} " - echo "PORTS: ${ports[@]} " - echo "ROUTERS: ${routers[@]} " - - - # get port and subnet for each router - for i in "${routers[@]}" - do - routerport=( `neutron router-port-list $i | awk -F" " '{print $2}' | grep -v id | sed '/^$/d' `) - routersnet=( `neutron router-port-list $i | awk -F" " '{print $8}' | grep -v fixed | sed '/^$/d' | sed 's/,$//' | sed -e 's/^"//' -e 's/"$//' `) - done - - echo "ROUTER PORTS: ${routerport[@]} " - echo "ROUTER SUBNET: ${routersnet[@]} " - - #remove router subnets - echo "router-interface-delete" - for i in "${routersnet[@]}" - do - neutron router-interface-delete ${routers[0]} $i - done - - #remove subnets - echo "subnet-delete" - for i in "${subnets[@]}" - do - neutron subnet-delete $i - done - - #remove nets - echo "net-delete" - for i in "${networks[@]}" - do - neutron net-delete $i - done - - #remove routers - echo "router-delete" - for i in "${routers[@]}" - do - neutron router-delete $i - done - - #remove ports - echo "port-delete" - for i in "${ports[@]}" - do - neutron port-delete $i - done - - #remove subnets - echo "subnet-delete second pass" - for i in "${subnets[@]}" - do - neutron subnet-delete $i - done - -} - -function set_dns { - sed -i -e 's/nameserver 10.20.0.2/nameserver $DNS/g' /etc/resolv.conf -} - - -#OUTPUT - -function check { - echo $BR_MGMT - echo $BR_STORAGE - echo $BR_FW_ADMIN - echo $BR_EX -} - -### MAIN - - -set_mode -backup_ifcfg -get_ips -create_ifcfg_br_mgmt -create_ifcfg_br_storage -create_ifcfg_br_fw_admin -if [ $MODE == "0" ] -then - create_ifcfg_eth0 -fi -allow_challenge -clean_ovs -check -reboot_me - - diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh deleted file mode 100755 index 145da80..0000000 --- a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/config_net_odl.sh +++ /dev/null @@ -1,192 +0,0 @@ -#!/bin/bash -# -# Author: Daniel Smith (Ericsson) -# -# Script to update neutron configuration for OVSDB/ODL integratino -# -# Usage - Set / pass CONTROL_HOST to your needs -# -### SET THIS VALUE TO MATCH YOUR SYSTEM -CONTROL_HOST=192.168.0.2 -BR_EX_IP=172.30.9.70 - -# ENV -source ~/openrc -# VARS -ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini -MODE=0 - - -# FUNCTIONS -# Update ml2_conf.ini -function update_ml2conf { - echo "Backing up and modifying ml2_conf.ini" - cp $ML2_CONF $ML2_CONF.bak - sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF - sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF - sed -i -e 's/bridge_mappings=physnet2:br-prv/bridge_mappings=physnet1:br-ex/g' $ML2_CONF - echo "[ml2_odl]" >> $ML2_CONF - echo "password = admin" >> $ML2_CONF - echo "username = admin" >> $ML2_CONF - echo "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF -} - -function reset_neutrondb { - echo "Reseting DB" - mysql -e "drop database if exists neutron_ml2;" - mysql -e "create database neutron_ml2 character set utf8;" - mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';" - neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head -} - -function restart_neutron { - echo "Restarting Neutron Server" - service neutron-server restart - echo "Should see Neutron runing now" - service neutron-server status - echo "Shouldnt be any nets, but should work (return empty)" - neutron net-list -} - -function stop_neutron { - echo "Stopping Neutron / OVS components" - service neutron-plugin-openvswitch-agent stop - if [ $MODE == "0" ] - then - service neutron-server stop - fi -} - -function disable_agent { - echo "Disabling Neutron Plugin Agents from running" - service neutron-plugin-openvswitch-agent stop - echo 'manual' > /etc/init/neutron-plugin-openvswitch-agent.override -} - - - -function verify_ML2_working { - echo "checking that we can talk via ML2 properly" - curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2 - if grep "network" /tmp/check_ml2 - then - echo "Success - ML2 to ODL is working" - else - echo "im sorry Jim, but its dead" - fi - -} - - -function set_mode { - if [ -d "/var/lib/glance/images" ] - then - echo "Controller Mode" - MODE=0 - else - echo "Compute Mode" - MODE=1 - fi -} - -function stop_ovs { - echo "Stopping OpenVSwitch" - service openvswitch-switch stop - -} - -function start_ovs { - echo "Starting OVS" - service openvswitch-vswitch start - ovs-vsctl show -} - - -function control_setup { - echo "Modifying Controller" - stop_neutron - stop_ovs - disable_agent - rm -rf /var/log/openvswitch/* - mkdir -p /opt/opnfv/odl/ovs_back - mv /etc/openvswitch/conf.db /opt/opnfv/odl/ovs_back/. - mv /etc/openvswitch/.conf*lock* /opt/opnfv/odl/ovs_back/. - rm -rf /etc/openvswitch/conf.db - rm -rf /etc/openvswitch/.conf* - service openvswitch-switch start - ovs-vsctl add-br br-ex - ovs-vsctl add-port br-ex eth0 - ovs-vsctl set interface br-ex type=external - ifconfig br-ex 172.30.9.70/24 up - service neutron-server restart - - echo "setting up networks" - ip link add link eth1 name br-mgmt type vlan id 300 - ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24 up arp - ip link add link eth1 name br-storage type vlan id 301 - ip link add link eth1 name br-prv type vlan id 1000 - ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24 up arp - ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-admin | awk -F" " '{print $2}'`/24 up arp - - echo "Setting ODL Manager IP" - ovs-vsctl set-manager tcp:192.168.0.2:6640 - - echo "Verifying ODL ML2 plugin is working" - verify_ML2_working - - # BAD HACK - Should be parameterized - this is to catch up - route add default gw 172.30.9.1 - -} - -function clean_ovs { - echo "cleaning OVS DB" - stop_ovs - rm -rf /var/log/openvswitch/* - mkdir -p /opt/opnfv/odl/ovs_back - cp -pr /etc/openvswitch/* /opt/opnfv/odl/ovs_back/. - rm -rf /etc/openvswitch/conf.db - echo "restarting OVS - you should see Nothing there" - start_ovs -} - -function compute_setup { - echo "Modifying Compute" - echo "Disabling neutron openvswitch plugin" - stop_neutron - disable_agent - ip link add link eth1 name br-mgmt type vlan id 300 - ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24 up arp - ip link add link eth1 name br-storage type vlan id 301 - ip link add link eth1 name br-prv type vlan id 1000 - ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24 up arp - ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-admin | awk -F" " '{print $2}'`/24 up arp - - echo "set manager, and route for ODL controller" - ovs-vsctl set-manager tcp:192.168.0.2:6640 - route add 172.17.0.1 gw 192.168.0.2 - verify_ML2_working -} - - -# MAIN -echo "Starting to make call" -update_ml2conf -echo "Check Mode" -set_mode - -if [ $MODE == "0" ]; -then - echo "Calling control setup" - control_setup -elif [ $MODE == "1" ]; -then - echo "Calling compute setup" - compute_setup - -else - echo "Something is bad - call for help" - exit -fi - - diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh deleted file mode 100755 index fa14b47..0000000 --- a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/stage_odl.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash -# Author: Daniel Smith (Ericsson) -# Stages ODL Controlleer -# Inputs: odl_docker_image.tar -# Usage: ./stage_odl.sh - -# ENVS -source ~/.bashrc -source ~/openrc - -LOCALPATH=/opt/opnfv/odl -DOCKERBIN=docker-latest -ODLIMGNAME=odl_docker_image.tar -DNS=8.8.8.8 -HOST_IP=`ifconfig br-ex | grep -i "inet addr" | awk -F":" '{print $2}' | awk -F" " '{print $1}'` - - - -# DEBUG ECHOS -echo $LOCALPATH -echo $DOCKERBIN -echo $ODLIMGNAME -echo $DNS -echo $HOST_IP - - -# Set DNS to someting external and default GW - ODL requires a connection to the internet -sed -i -e 's/nameserver 10.20.0.2/nameserver 8.8.8.8/g' /etc/resolv.conf -route delete default gw 10.20.0.2 -route add default gw 172.30.9.1 - -# Start Docker daemon and in background -echo "Starting Docker" -chmod +x $LOCALPATH/$DOCKERBIN -$LOCALPATH/$DOCKERBIN -d & -#courtesy sleep for virtual env -sleep 2 - -# Import the ODL Container -echo "Importing ODL Container" -$LOCALPATH/$DOCKERBIN load -i $LOCALPATH/$ODLIMGNAME - -# Start ODL, load DLUX and OVSDB modules -echo "Removing any old install found - file not found is ok here" -$LOCALPATH/$DOCKERBIN rm odl_docker -echo "Starting up ODL controller in Daemon mode - no shell possible" -$LOCALPATH/$DOCKERBIN run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -d -t loving_daniel - -# Following, you should see the docker ps listed and a port opened -echo " you should reach ODL controller at http://HOST_IP:8181/dlux/index.html" -$LOCALPATH/$DOCKERBINNAME ps -a -netstat -lnt - - diff --git a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh b/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh deleted file mode 100755 index 7b91f4f..0000000 --- a/fuel/build/f_lith_odl_docker/puppet/modules/opnfv/scripts/start_odl_container.sh +++ /dev/null @@ -1,95 +0,0 @@ -#!/bin/bash -# Ericsson Canada Inc. -# Authoer: Daniel Smith -# -# A helper script to install and setup the ODL docker container on the controller -# -# -# Inputs: odl_docker_image.tar -# -# Usage: ./start_odl_docker.sh -echo "DEPRECATED - USE stage_odl.sh instead - this will be removed shortly once automated deployment is working - SR1" - - -# ENVS -source ~/.bashrc -source ~/openrc - -# VARS - -# Switch for Dev mode - uses apt-get on control to cheat and get docker installed locally rather than from puppet source - -DEV=1 - -# Switch for 1:1 port mapping of EXPOSED ports in Docker to the host, if set to 0, then random ports will be used - NOTE: this doesnt work for all web services X port on Host --> Y port in Container, -# especially for SSL/HTTPS cases. Be aware. - -MATCH_PORT=1 - -LOCALPATH=/opt/opnfv/odl -DOCKERBINNAME=docker-latest -DOCKERIMAGENAME=odl_docker_image.tar -DNS=8.8.8.8 -HOST_IP=`ifconfig br-fw-admin | grep -i "inet addr" | awk -F":" '{print $2}' | awk -F" " '{print $1}'` - - -# Set this to "1" if you want to have your docker container startup into a shell - - -ENABLE_SHELL=1 - - -echo " Fetching Docker " -if [ "$DEV" -eq "1" ]; -# If testing Locally (on a control node) you can set DEV=1 to enable apt-get based install on the control node (not desired target, but good for testing). -then - echo "Dev Mode - Fetching from Internet"; - echo " this wont work in production builds"; - apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 - mkdir -p $LOCALPATH - wget https://get.docker.com/builds/Linux/x86_64/docker-latest -O $LOCALPATH/$DOCKERBINNAME - wget http://ftp.us.debian.org/debian/pool/main/d/docker.io/docker.io_1.3.3~dfsg1-2_amd64.deb - chmod 777 $LOCALPATH/$DOCKERBINNAME - echo "done "; -else - echo "Using Binaries delivered from Puppet" - echo "Starting Docker in Daemon mode" - chmod +x $LOCALPATH/$DOCKERBINNAME - $LOCALPATH/$DOCKERBINNAME -d & - - # wait until docker will be fully initialized - # before any further action against just started docker - sleep 5 -fi - - -# We need to perform some cleanup of the Openstack Environment -echo "TODO -- This should be automated in the Fuel deployment at some point" -echo "However, the timing should come after basic tests are running, since this " -echo " part will remove the subnet router association that is deployed automativally" -echo " via fuel. Refer to the ODL + Openstack Integration Page " - -# Import the ODL container into docker - -echo "Importing ODL container into docker" -$LOCALPATH/$DOCKERBINNAME load -i $LOCALPATH/$DOCKERIMAGENAME - -echo " starting up ODL - DLUX and Mapping Ports" -if [ "$MATCH_PORT" -eq "1" ] -then - echo "Starting up Docker..." - $LOCALPATH/$DOCKERBINNAME rm odl_docker -fi - -if [ "$ENABLE_SHELL" -eq "1" ]; -then - echo "Starting Container in Interactive Mode (/bin/bash will be provided, you will need to run ./start_odl_docker.sh inside the container yourself)" - $LOCALPATH/$DOCKERBINNAME run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -t loving_daniel /bin/bash -else - echo "Starting Container in Daemon mode - no shell will be provided and docker attach will not provide shell)" - $LOCALPATH/$DOCKERBINNAME run --name odl_docker -p 8181:8181 -p 8185:8185 -p 9000:9000 -p 1099:1099 -p 8101:8101 -p 6633:6633 -p 43506:43506 -p 44444:44444 -p 6653:6653 -p 12001:12001 -p 6400:6400 -p 6640:6640 -p 8080:8080 -p 7800:7800 -p 55130:55130 -p 52150:52150 -p 36826:26826 -i -d -t loving_daniel - echo "should see the process listed here in docker ps -a" - $LOCALPATH/$DOCKERBINNAME ps -a; - echo "Match Port enabled, you can reach the DLUX login at: " - echo "http://$HOST_IP:8181/dlux.index.html" -fi diff --git a/fuel/build/f_lith_odl_docker/scripts/config_net_odl.sh b/fuel/build/f_lith_odl_docker/scripts/config_net_odl.sh deleted file mode 100644 index d292acd..0000000 --- a/fuel/build/f_lith_odl_docker/scripts/config_net_odl.sh +++ /dev/null @@ -1,164 +0,0 @@ -#!/bin/bash -# -# Author: Daniel Smith (Ericsson) -# -# Script to update neutron configuration for OVSDB/ODL integratino -# -# Usage - Set / pass CONTROL_HOST to your needs -# -CONTROL_HOST=172.30.9.70 - -# ENV -source ~/openrc - -# VARS -ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini -MODE=0 - - -# FUNCTIONS - -# Update ml2_conf.ini -function update_ml2conf { - echo "Backing up and modifying ml2_conf.ini" - cp $ML2_CONF $ML2_CONF.bak - sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF - sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF - cat "[ml2_odl]" >> $ML2_CONF - cat "password = admin" >> $ML2_CONF - cat "username = admin" >> $ML2_CONF - cat "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF -} - -function reset_neutrondb { - echo "Reseting DB" - mysql -e "drop database if exists neutron_ml2;" - mysql -e "create database neutron_ml2 character set utf8;" - mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';" - neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head -} - -function restart_neutron { - echo "Restarting Neutron Server" - service neutron-server restart - echo "Should see Neutron runing now" - service neutron-server status - echo "Shouldnt be any nets, but should work (return empty)" - neutron net-list -} - -function stop_neutron { - echo "Stopping Neutron / OVS components" - service neutron-plugin-openvswitch-agent stop - if [ $MODE == "0" ] - then - service neutron-server stop - fi -} - - - -function verify_ML2_working { - echo "checking that we can talk via ML2 properly" - curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2 - if grep "network" /tmp/check_ml2 - then - echo "Success - ML2 to ODL is working" - else - echo "im sorry Jim, but its dead" - fi - -} - - -function set_mode { - if ls -l /var/lib/glance/images - then - echo "Controller Mode" - MODE=0 - else - echo "Compute Mode" - MODE=1 - fi -} - -function stop_ovs { - echo "Stopping OpenVSwitch" - service openvswitch-switch stop - -} - -function control_setup { - echo "Modifying Controller" - stop_neutron - stop_ovs - rm -rf /var/log/openvswitch/* - mkdir -p /opt/opnfv/odl/ovs_back - mv /etc/openvswitch/conf.db /opt/opnfv/odl/ovs_back/. - mv /etc/openvswitch/.conf*lock* /opt/opnfv/odl/ovs_back/. - service openvswitch-switch start - ovs-vsctl set-manager tcp:172.30.9.70:6640 - ovs-vsctl add-br br-eth0 - ovs-vsctl add-br br-ex - ovs-vsctl add-port br-eth0 eth0 - ovs-vsctl add-port br-eth0 br-eth0--br-ex - ovs-vsctl add-port br-ex br-ex--br-eth0 - ovs-vsctl set interface br-ex--br-eth0 type=patch - ovs-vsctl set interface br-eth0--br-ex type=patch - ovs-vsctl set interface br-ex--br-eth0 options:peer=br-eth0--br-ex - ovs-vsctl set interface br-eth0--br-ex options:peer=br-ex--br-eth0 - ifconfig br-ex 172.30.9.70/24 up - service neutron-server restart - - echo "setting up networks" - ip link add link eth1 name br-mgmt type vlan id 300 - ip link add link eth1 name br-storage type vlan id 301 - /etc/init.d/networking restart - - - echo "Reset Neutron DB" - #reset_neutrondb - echo "Restarting Neutron Components" - #restart_neutron - echo "Verifying ODL ML2 plugin is working" - verify_ML2_working - -} - -function compute_setup { - echo "do compute stuff here" - echo "stopping neutron openvswitch plugin" - stop_neutron - ip link add link eth1 name br-mgmt type vlan id 300 - ifconfig br-mgmt `grep address /etc/network/interfaces.d/ifcfg-br-mgmt | awk -F" " '{print $2}'`/24 - ip link add link eth1 name br-storage type vlan id 301 - ifconfig br-storage `grep address /etc/network/interfaces.d/ifcfg-br-storage | awk -F" " '{print $2}'`/24 - ifconfig eth1 `grep address /etc/network/interfaces.d/ifcfg-br-fw-mgmt | awk -F" " '{print $2}'`/24 - echo "set manager, and route for ODL controller" - ovs-vsctl set-manager tcp:192.168.0.2:6640 - route add 172.17.0.1 gw 192.168.0.2 - verify_ML2_working -} - - -# MAIN -echo "Starting to make call" -update_ml2conf -echo "Check Mode" -set_mode - -if [ $MODE == "0" ]; -then - echo "Calling control setup" - control_setup -elif [ $MODE == "1" ]; -then - echo "Calling compute setup" - compute_setup - -else - echo "Something is bad - call for help" - exit -fi - - diff --git a/fuel/build/f_lith_odl_docker/scripts/config_neutron_for_odl.sh b/fuel/build/f_lith_odl_docker/scripts/config_neutron_for_odl.sh deleted file mode 100644 index 3b688ae..0000000 --- a/fuel/build/f_lith_odl_docker/scripts/config_neutron_for_odl.sh +++ /dev/null @@ -1,146 +0,0 @@ -#!/bin/bash -CONTROL_HOST=172.17.0.3 - -# ENV -source ~/openrc - - - -# VARS -ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini -MODE=0 - - -# FUCNTIONS - - -# Update ml2_conf.ini -function update_ml2conf { - echo "Backing up and modifying ml2_conf.ini" - cp $ML2_CONF $ML2_CONF.bak - sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF -#!/bin/bash -CONTROL_HOST=172.17.0.3 - -# ENV -source ~/openrc - - - -# VARS -ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini -MODE=0 - - -# FUCNTIONS - - -# Update ml2_conf.ini -function update_ml2conf { - echo "Backing up and modifying ml2_conf.ini" - cp $ML2_CONF $ML2_CONF.bak - sed -i -e 's/mechanism_drivers =openvswitch/mechanism_drivers = opendaylight/g' $ML2_CONF - sed -i -e 's/tenant_network_types = flat,vlan,gre,vxlan/tenant_network_types = vxlan/g' $ML2_CONF - cat "[ml2_odl]" >> $ML2_CONF - cat "password = admin" >> $ML2_CONF - cat "username = admin" >> $ML2_CONF - cat "url = http://${CONTROL_HOST}:8080/controller/nb/v2/neutron" >> $ML2_CONF -} - -function reset_neutrondb { - echo "Reseting DB" - mysql -e "drop database if exists neutron_ml2;" - mysql -e "create database neutron_ml2 character set utf8;" - mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';" - neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head -} - -function restart_neutron { - echo "Restarting Neutron Server" - service neutron-server restart - echo "Should see Neutron runing now" - service neutron-server status - echo "Shouldnt be any nets, but should work (return empty)" - neutron net-list -} - -function stop_neutron { - echo "Stopping Neutron / OVS components" - service neutron-plugin-openvswitch-agent stop - if [ $MODE == "0" ] - then - service neutron-server stop - fi -} - - - -function verify_ML2_working { - echo "checking that we can talk via ML2 properly" - curl -u admin:admin http://${CONTROL_HOST}:8080/controller/nb/v2/neutron/networks > /tmp/check_ml2 - if grep "network" /tmp/check_ml2 - then - echo "Success - ML2 to ODL is working" - else - echo "im sorry Jim, but its dead" - fi - -} - - -function set_mode { - if df -k | grep glance - then - echo "Controller Mode" - MODE=0 - else - echo "Compute Mode" - MODE=1 - fi -} - -function stop_ovs { - echo "Stopping OpenVSwitch" - service openvswitch-switch stop - -} - -function control_setup { - echo "do control stuff here" - echo "Reset Neutron DB" - #reset_neutrondb - echo "Restarting Neutron Components" - #restart_neutron - echo "Verifying ODL ML2 plugin is working" - verify_ML2_working - -} - -function compute_setup { - echo "do compute stuff here" - stop_neutron - verify_ML2_working -} - - -# MAIN -echo "Starting to make call" -#update_ml2conf -echo "Check Mode" -set_mode - -if [ $MODE == "0" ]; -then - echo "Calling control setup" - control_setup -elif [ $MODE == "1" ]; -then - echo "Calling compute setup" - compute_setup - -else - echo "Something is bad - call for help" - exit -fi - - diff --git a/fuel/build/f_lith_odl_docker/scripts/prep_nets_for_odl.sh b/fuel/build/f_lith_odl_docker/scripts/prep_nets_for_odl.sh deleted file mode 100755 index dd4fc9f..0000000 --- a/fuel/build/f_lith_odl_docker/scripts/prep_nets_for_odl.sh +++ /dev/null @@ -1,90 +0,0 @@ -#!/bin/bash -# a "cheat" way to install docker on the controller -# can only be used if you have a connecting out to the internet - -# Usage: ./install_docker.sh - -OLDGW=$1 -#!/bin/bash -# a "cheat" way to install docker on the controller -# can only be used if you have a connecting out to the internet - -# Usage: ./install_docker.sh - -OLDGW=$1 -NEWGW=$2 -IMAGEPATH=/opt/opnfv -IMAGENAME=odl_docker_image.tar -SOURCES=/etc/apt/sources.list - - -if [ "$#" -ne 2]; then - echo "Two args not provided, will not touch networking" -else - - # Fix routes - echo "Fixing routes" - #DEBUG - netstat -rn - - echo "delete old def route" - route delete default gw $1 - echo "adding new def route" - route add default gw $2 - - echo " you should see a good nslookup now" - nslookup www.google.ca -#!/bin/bash -# a "cheat" way to install docker on the controller -# can only be used if you have a connecting out to the internet - -# Usage: ./install_docker.sh - -OLDGW=$1 -NEWGW=$2 -IMAGEPATH=/opt/opnfv -IMAGENAME=odl_docker_image.tar -SOURCES=/etc/apt/sources.list - - -if [ "$#" -ne 2]; then - echo "Two args not provided, will not touch networking" -else - - # Fix routes - echo "Fixing routes" - #DEBUG - netstat -rn - - echo "delete old def route" - route delete default gw $1 - echo "adding new def route" - route add default gw $2 - - echo " you should see a good nslookup now" - nslookup www.google.ca -fi - - -if egrep "mirrors.txt" $SOURCES -then - echo "Sources was already updated, not touching" -else - echo "adding the closests mirrors and docker mirror to the mix" - echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise main restricted universe multiverse" >> /etc/apt/sources.list - echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-updates main restricted universe multiverse" >> /etc/apt/sources.list - echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-backports main restricted universe multiverse" >> /etc/apt/sources.list - echo "deb mirror://mirrors.ubuntu.com/mirrors.txt precise-security main restricted universe multiverse" >> /etc/apt/sources.list - apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 - echo "deb https://get.docker.com/ubuntu docker main " > /etc/apt/sources.list.d/docker.list -fi - -echo "Updating" -apt-get update -echo "Installing Docker" -apt-get install -y lxc-docker - -echo "Loading ODL Docker Image" -docker load -i $IMAGEPATH/$IMAGENAME - - diff --git a/fuel/build/f_lith_odl_docker/scripts/setup_ovs_for_odl.sh b/fuel/build/f_lith_odl_docker/scripts/setup_ovs_for_odl.sh deleted file mode 100644 index 42c9451..0000000 --- a/fuel/build/f_lith_odl_docker/scripts/setup_ovs_for_odl.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - - - -ok .. so they created br-int - -so lets add a physical nic to it - - -# First - Removal all the bridges you find - -for i in $(ovs-vsctl list-br) -do - if [ "$i" == "br-int" ]; - then - echo "skipped br-int" - elif [ "$i" == "br-prv"]; - then - echo "skipped br-pr" - else - ovs-vsctl del-br $i - fi -done diff --git a/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp b/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp index 436f496..54f1c86 100644 --- a/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp +++ b/fuel/build/f_opnfv_puppet/puppet/modules/opnfv/manifests/init.pp @@ -25,6 +25,4 @@ class opnfv { include opnfv::add_packages # Setup OpenDaylight include opnfv::odl_docker - # Setup OpenDaylight - include opnfv::odl_lith_docker } -- cgit 1.2.3-korg From bff17c3ed2f7ba8a665a1ff2a0ba89ace4d3a5c9 Mon Sep 17 00:00:00 2001 From: arnaudmorin Date: Fri, 29 May 2015 15:56:50 +0200 Subject: Add create vm script and foreman config Add a script to create a KVM machine and install foreman in it. This is the beginning of the OpenSteak installation mecanism. It is very similare as what is done in the Foreman/Quickstack approach with small differences: - Ubuntu as base OS (instead of CentOS) - Foreman v1.8 (instead of 1.7.5 - KVM (libvirt) virtual machine (instead of Vagrant) - Only python/bash scripts to manage all the stuff (instead of Khaleesi/Ansible/Astaport playbooks) Change-Id: Ie66b1da4288372927e30163f82f5a0f45e2e73d0 JIRA: BGS-9 Signed-off-by: arnaudmorin --- opensteak/ci/build.sh | 19 ++ opensteak/ci/deploy.sh | 28 +++ opensteak/config/common.yaml | 119 +++++++++++ opensteak/config/infra.yaml | 81 +++++++ opensteak/tools/README.rst | 52 +++++ opensteak/tools/config.yaml | 78 +++++++ opensteak/tools/create_foreman.py | 236 +++++++++++++++++++++ opensteak/tools/files_foreman/id_rsa | 27 +++ opensteak/tools/files_foreman/id_rsa.pub | 1 + opensteak/tools/opensteak/.gitignore | 58 +++++ opensteak/tools/opensteak/__init__.py | 18 ++ opensteak/tools/opensteak/argparser.py | 46 ++++ opensteak/tools/opensteak/conf.py | 72 +++++++ opensteak/tools/opensteak/foreman.py | 60 ++++++ .../tools/opensteak/foreman_objects/.gitignore | 58 +++++ .../tools/opensteak/foreman_objects/__init__.py | 18 ++ opensteak/tools/opensteak/foreman_objects/api.py | 197 +++++++++++++++++ .../opensteak/foreman_objects/architectures.py | 49 +++++ .../opensteak/foreman_objects/compute_resources.py | 62 ++++++ .../tools/opensteak/foreman_objects/domains.py | 44 ++++ .../tools/opensteak/foreman_objects/freeip.py | 79 +++++++ .../tools/opensteak/foreman_objects/hostgroups.py | 103 +++++++++ opensteak/tools/opensteak/foreman_objects/hosts.py | 142 +++++++++++++ opensteak/tools/opensteak/foreman_objects/item.py | 135 ++++++++++++ .../tools/opensteak/foreman_objects/itemHost.py | 141 ++++++++++++ .../opensteak/foreman_objects/itemHostsGroup.py | 50 +++++ .../foreman_objects/itemOverrideValues.py | 61 ++++++ .../foreman_objects/itemSmartClassParameter.py | 62 ++++++ .../tools/opensteak/foreman_objects/objects.py | 136 ++++++++++++ .../opensteak/foreman_objects/operatingsystems.py | 66 ++++++ .../opensteak/foreman_objects/puppetClasses.py | 46 ++++ .../opensteak/foreman_objects/smart_proxies.py | 36 ++++ .../tools/opensteak/foreman_objects/subnets.py | 67 ++++++ opensteak/tools/opensteak/printer.py | 141 ++++++++++++ opensteak/tools/opensteak/templateparser.py | 34 +++ opensteak/tools/opensteak/virsh.py | 174 +++++++++++++++ opensteak/tools/templates_foreman/install.sh | 216 +++++++++++++++++++ opensteak/tools/templates_foreman/kvm-config | 65 ++++++ opensteak/tools/templates_foreman/meta-data | 12 ++ opensteak/tools/templates_foreman/user-data | 25 +++ 40 files changed, 3114 insertions(+) create mode 100644 opensteak/config/common.yaml create mode 100644 opensteak/config/infra.yaml create mode 100644 opensteak/tools/README.rst create mode 100644 opensteak/tools/config.yaml create mode 100644 opensteak/tools/create_foreman.py create mode 100644 opensteak/tools/files_foreman/id_rsa create mode 100644 opensteak/tools/files_foreman/id_rsa.pub create mode 100644 opensteak/tools/opensteak/.gitignore create mode 100644 opensteak/tools/opensteak/__init__.py create mode 100644 opensteak/tools/opensteak/argparser.py create mode 100644 opensteak/tools/opensteak/conf.py create mode 100644 opensteak/tools/opensteak/foreman.py create mode 100644 opensteak/tools/opensteak/foreman_objects/.gitignore create mode 100644 opensteak/tools/opensteak/foreman_objects/__init__.py create mode 100644 opensteak/tools/opensteak/foreman_objects/api.py create mode 100644 opensteak/tools/opensteak/foreman_objects/architectures.py create mode 100644 opensteak/tools/opensteak/foreman_objects/compute_resources.py create mode 100644 opensteak/tools/opensteak/foreman_objects/domains.py create mode 100644 opensteak/tools/opensteak/foreman_objects/freeip.py create mode 100644 opensteak/tools/opensteak/foreman_objects/hostgroups.py create mode 100644 opensteak/tools/opensteak/foreman_objects/hosts.py create mode 100644 opensteak/tools/opensteak/foreman_objects/item.py create mode 100644 opensteak/tools/opensteak/foreman_objects/itemHost.py create mode 100644 opensteak/tools/opensteak/foreman_objects/itemHostsGroup.py create mode 100644 opensteak/tools/opensteak/foreman_objects/itemOverrideValues.py create mode 100644 opensteak/tools/opensteak/foreman_objects/itemSmartClassParameter.py create mode 100644 opensteak/tools/opensteak/foreman_objects/objects.py create mode 100644 opensteak/tools/opensteak/foreman_objects/operatingsystems.py create mode 100644 opensteak/tools/opensteak/foreman_objects/puppetClasses.py create mode 100644 opensteak/tools/opensteak/foreman_objects/smart_proxies.py create mode 100644 opensteak/tools/opensteak/foreman_objects/subnets.py create mode 100644 opensteak/tools/opensteak/printer.py create mode 100644 opensteak/tools/opensteak/templateparser.py create mode 100644 opensteak/tools/opensteak/virsh.py create mode 100644 opensteak/tools/templates_foreman/install.sh create mode 100644 opensteak/tools/templates_foreman/kvm-config create mode 100644 opensteak/tools/templates_foreman/meta-data create mode 100644 opensteak/tools/templates_foreman/user-data diff --git a/opensteak/ci/build.sh b/opensteak/ci/build.sh index e69de29..7a85332 100644 --- a/opensteak/ci/build.sh +++ b/opensteak/ci/build.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +#placeholder diff --git a/opensteak/ci/deploy.sh b/opensteak/ci/deploy.sh index e69de29..bd6ff86 100644 --- a/opensteak/ci/deploy.sh +++ b/opensteak/ci/deploy.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) + +# TODO: find a way to create the openvswitch bridge + + +# This will create a Foreman Virtual Machine with KVM (libvirt) +cd ../tools/ +sudo python3 create_foreman.py --config ../config/infra.yaml + + diff --git a/opensteak/config/common.yaml b/opensteak/config/common.yaml new file mode 100644 index 0000000..144e84f --- /dev/null +++ b/opensteak/config/common.yaml @@ -0,0 +1,119 @@ +# common.yaml +--- + +### +## OpenStack passwords +### +ceph_password: "password" +admin_password: "password" +mysql_service_password: "password" +mysql_root_password: "password" +rabbitmq_password: "password" +glance_password: "password" +nova_password: "password" +neutron_shared_secret: "password" +neutron_password: "password" +cinder_password: "password" +keystone_admin_token: "password" +horizon_secret_key: "12345" + +domain: "infra.opensteak.fr" + +### +## Class parameters +### +# Rabbit +opensteak::rabbitmq::rabbitmq_password: "%{hiera('rabbitmq_password')}" + +# MySQL +opensteak::mysql::root_password: "%{hiera('mysql_root_password')}" +opensteak::mysql::mysql_password: "%{hiera('mysql_service_password')}" + +# Key +opensteak::key::password: "%{hiera('admin_password')}" +opensteak::key::stack_domain: "%{hiera('domain')}" + +# Keystone +opensteak::keystone::mysql_password: "%{hiera('mysql_root_password')}" +opensteak::keystone::rabbitmq_password: "%{hiera('rabbitmq_password')}" +opensteak::keystone::keystone_token: "%{hiera('keystone_admin_token')}" +opensteak::keystone::stack_domain: "%{hiera('domain')}" +opensteak::keystone::admin_mail: "admin@opensteak.fr" +opensteak::keystone::admin_password: "%{hiera('admin_password')}" +opensteak::keystone::glance_password: "%{hiera('glance_password')}" +opensteak::keystone::nova_password: "%{hiera('nova_password')}" +opensteak::keystone::neutron_password: "%{hiera('neutron_password')}" +opensteak::keystone::cinder_password: "%{hiera('cinder_password')}" + +# Glance +opensteak::glance::mysql_password: "%{hiera('mysql_root_password')}" +opensteak::glance::rabbitmq_password: "%{hiera('rabbitmq_password')}" +opensteak::glance::stack_domain: "%{hiera('domain')}" +opensteak::glance::glance_password: "%{hiera('glance_password')}" + +# Nova +opensteak::nova::mysql_password: "%{hiera('mysql_root_password')}" +opensteak::nova::rabbitmq_password: "%{hiera('rabbitmq_password')}" +opensteak::nova::stack_domain: "%{hiera('domain')}" +opensteak::nova::nova_password: "%{hiera('nova_password')}" +opensteak::nova::neutron_password: "%{hiera('neutron_password')}" +opensteak::nova::neutron_shared: "%{hiera('neutron_shared_secret')}" + +# Cinder +opensteak::cinder::mysql_password: "%{hiera('mysql_root_password')}" +opensteak::cinder::rabbitmq_password: "%{hiera('rabbitmq_password')}" +opensteak::cinder::stack_domain: "%{hiera('domain')}" +opensteak::cinder::nova_password: "%{hiera('cinder_password')}" + +# Compute +opensteak::nova-compute::mysql_password: "%{hiera('mysql_root_password')}" +opensteak::nova-compute::rabbitmq_password: "%{hiera('rabbitmq_password')}" +opensteak::nova-compute::stack_domain: "%{hiera('domain')}" +opensteak::nova-compute::neutron_password: "%{hiera('neutron_password')}" + + +# Neutron controller +opensteak::neutron-controller::mysql_password: "%{hiera('mysql_root_password')}" +opensteak::neutron-controller::rabbitmq_password: "%{hiera('rabbitmq_password')}" +opensteak::neutron-controller::stack_domain: "%{hiera('domain')}" +opensteak::neutron-controller::nova_password: "%{hiera('nova_password')}" +opensteak::neutron-controller::neutron_password: "%{hiera('neutron_password')}" +# Neutron compute +opensteak::neutron-compute::mysql_password: "%{hiera('mysql_root_password')}" +opensteak::neutron-compute::rabbitmq_password: "%{hiera('rabbitmq_password')}" +opensteak::neutron-compute::stack_domain: "%{hiera('domain')}" +opensteak::neutron-compute::neutron_password: "%{hiera('neutron_password')}" +opensteak::neutron-compute::neutron_shared: "%{hiera('neutron_shared_secret')}" +opensteak::neutron-compute::infra_nodes: + server186: + ip: 192.168.1.27 + bridge_uplinks: + - 'br-vm:p3p1' + server187: + ip: 192.168.1.155 + bridge_uplinks: + - 'br-vm:p3p1' + server188: + ip: 192.168.1.116 + bridge_uplinks: + - 'br-vm:p3p1' + server189: + ip: 192.168.1.117 + bridge_uplinks: + - 'br-vm:p3p1' +# Neutron network +opensteak::neutron-network::mysql_password: "%{hiera('mysql_root_password')}" +opensteak::neutron-network::rabbitmq_password: "%{hiera('rabbitmq_password')}" +opensteak::neutron-network::stack_domain: "%{hiera('domain')}" +opensteak::neutron-network::neutron_password: "%{hiera('neutron_password')}" +opensteak::neutron-network::neutron_shared: "%{hiera('neutron_shared_secret')}" +opensteak::neutron-network::infra_nodes: + server98: + ip: 192.168.1.58 + bridge_uplinks: + - 'br-ex:em2' + - 'br-vm:em5' + +# Horizon +opensteak::horizon::stack_domain: "%{hiera('domain')}" +opensteak::horizon::secret_key: "%{hiera('horizon_secret_key')}" diff --git a/opensteak/config/infra.yaml b/opensteak/config/infra.yaml new file mode 100644 index 0000000..2ff02a1 --- /dev/null +++ b/opensteak/config/infra.yaml @@ -0,0 +1,81 @@ +domains: "infra.opensteak.fr" +media: "Ubuntu mirror" +environments: "production" +operatingsystems: "Ubuntu14.04Cloud" +subnets: "Admin" +compute_profiles: "Test" +smart_proxies: "foreman.infra.opensteak.fr" +ptables: "Preseed default" +architectures: "x86_64" + +operatingsystems: + "Ubuntu 14.04.2 LTS": + name: "Ubuntu" + description: "Ubuntu 14.04.2 LTS" + major: "14" + minor: "04" + family: "Debian" + release_name: "trusty" + password_hash: "MD5" + "Ubuntu 14.04 Cloud": + name: "Ubuntu14.04Cloud" + description: "Ubuntu 14.04 Cloud" + major: "14" + minor: "04" + family: "Debian" + release_name: "trusty" + password_hash: "MD5" + +hostgroupTop: + name: 'test' + classes: + - "ntp" + subnet: "Admin" + params: + password: 'toto' +hostgroups: + hostgroupController: + name: 'controller' + classes: + - "opensteak::base-network" + - "opensteak::libvirt" + params: + foreman_sshkey: 'xxxx' + hostgroupControllerVM: + name: 'controller_VM' + classes: + - "opensteak::apt" + params: + foreman_sshkey: 'xxxx' + password: 'toto' + hostgroupCompute: + name: 'compute' + classes: + - "opensteak::neutron-compute" + - "opensteak::nova-compute" +subnets: + Admin: + shared: False + data: + network: "192.168.4.0" + mask: "255.255.255.0" + vlanid: + gateway: "192.168.4.1" + dns_primary: "192.168.1.4" + from: "192.168.4.10" + to: "192.168.4.200" + ipam: "DHCP" + boot_mode: "DHCP" + +foreman: + ip: "192.168.4.2" + admin: "admin" + password: "opnfv" + cpu: "4" + ram: "4194304" + iso: "trusty-server-cloudimg-amd64-disk1.img" + disksize: "5G" + force: True + dns: "8.8.8.8" + bridge: "br-libvirt" + bridge_type: "openvswitch" diff --git a/opensteak/tools/README.rst b/opensteak/tools/README.rst new file mode 100644 index 0000000..188addc --- /dev/null +++ b/opensteak/tools/README.rst @@ -0,0 +1,52 @@ +:Authors: Arnaud Morin (arnaud1.morin@orange.com) +:Version: 0.0.2 + +======================================================= +OPNFV Installation instructions using Foreman/OpenSteak +======================================================= + +Abstract +======== + +This document describes how to setup OPNFV from a foreman Virtual Machine on an Ubuntu server. + +License +======= +OPNFV Installation instructions using Foreman/OpenSteak (c) by Arnaud Morin (Orange) + +OPNFV Installation instructions using Foreman/OpenSteak are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see . + +Version history +=================== + ++--------------------+--------------------+--------------------+--------------------+ +| **Date** | **Ver.** | **Author** | **Comment** | +| | | | | ++--------------------+--------------------+--------------------+--------------------+ +| 2015-06-08 | 0.0.1 | Arnaud Morin | First draft | +| | | (Orange) | | ++--------------------+--------------------+--------------------+--------------------+ + +Table of contents +=================== + +.. contents:: + :backlinks: none + +Introduction +============ + +This document describes how to setup OPNFV from a foreman Virtual Machine on an Ubuntu server. +Before starting, you should have an Ubuntu 14.04 LTS server already installed. + +Here is the manual workflow that you will have to perform: + +- Install +- Manually prepare configuration files from templates. + + +Here is the current workflow of the automated installation: + +- Dependencies installation (such as libvirt, impitools, etc.) +- Foreman Virtual Machine creation +- to be completed diff --git a/opensteak/tools/config.yaml b/opensteak/tools/config.yaml new file mode 100644 index 0000000..c618a52 --- /dev/null +++ b/opensteak/tools/config.yaml @@ -0,0 +1,78 @@ +domains: "test-infra.opensteak.fr" +media: "Ubuntu mirror" +environments: "production" +operatingsystems: "Ubuntu14.04Cloud" +subnets: "Admin" +compute_profiles: "Test" +smart_proxies: "foreman.infra.opensteak.fr" +ptables: "Preseed default" +architectures: "x86_64" + +operatingsystems: + "Ubuntu 14.04.1 LTS": + name: "Ubuntu" + description: "Ubuntu 14.04.1 LTS" + major: "14" + minor: "04" + family: "Debian" + release_name: "trusty" + password_hash: "MD5" + "Ubuntu 14.04 Cloud": + name: "Ubuntu14.04Cloud" + description: "Ubuntu 14.04 Cloud" + major: "14" + minor: "04" + family: "Debian" + release_name: "trusty" + password_hash: "MD5" + +hostgroupTop: + name: 'test' + classes: + - "ntp" + subnet: "Admin" + params: + password: 'toto' +hostgroups: + hostgroupController: + name: 'controller' + classes: + - "opensteak::base-network" + - "opensteak::libvirt" + params: + foreman_sshkey: 'xxxx' + hostgroupControllerVM: + name: 'controller_VM' + classes: + - "opensteak::apt" + params: + foreman_sshkey: 'xxxx' + password: 'toto' + hostgroupCompute: + name: 'compute' + classes: + - "opensteak::neutron-compute" + - "opensteak::nova-compute" +subnets: + Admin: + shared: False + data: + network: "172.16.0.0" + mask: "255.255.255.0" + vlanid: + gateway: "172.16.0.1" + dns_primary: "172.16.0.1" + from: "172.16.0.10" + to: "172.16.0.200" + ipam: "DHCP" + boot_mode: "DHCP" + +foreman: + ip: "172.16.0.2" + password: "opnfv" + cpu: "2" + ram: "2097152" + iso: "trusty-server-cloudimg-amd64-disk1.img" + disksize: "5G" + force: True + dns: "8.8.8.8" diff --git a/opensteak/tools/create_foreman.py b/opensteak/tools/create_foreman.py new file mode 100644 index 0000000..6cf4510 --- /dev/null +++ b/opensteak/tools/create_foreman.py @@ -0,0 +1,236 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +""" +Create Virtual Machines +""" + +# TODO: be sure that we are runnning as root + +from opensteak.conf import OpenSteakConfig +from opensteak.printer import OpenSteakPrinter +# from opensteak.argparser import OpenSteakArgParser +from opensteak.templateparser import OpenSteakTemplateParser +from opensteak.virsh import OpenSteakVirsh +from pprint import pprint as pp +# from ipaddress import IPv4Address +import argparse +import tempfile +import shutil +import os +# import sys + +p = OpenSteakPrinter() + +# +# Check for params +# +p.header("Check parameters") +args = {} + +# Update args with values from CLI +parser = argparse.ArgumentParser(description='This script will create a foreman VM.', usage='%(prog)s [options]') +parser.add_argument('-c', '--config', help='YAML config file to use (default is config/infra.yaml).', default='config/infra.yaml') +args.update(vars(parser.parse_args())) + +# Open config file +conf = OpenSteakConfig(config_file=args["config"]) +# pp(conf.dump()) + +a = {} +a["name"] = "foreman" +a["ip"] = conf["foreman"]["ip"] +a["netmask"] = conf["subnets"]["Admin"]["data"]["mask"] +a["netmaskshort"] = sum([bin(int(x)).count('1') + for x in conf["subnets"]["Admin"] + ["data"]["mask"] + .split('.')]) +a["gateway"] = conf["subnets"]["Admin"]["data"]["gateway"] +a["network"] = conf["subnets"]["Admin"]["data"]["network"] +a["admin"] = conf["foreman"]["admin"] +a["password"] = conf["foreman"]["password"] +a["cpu"] = conf["foreman"]["cpu"] +a["ram"] = conf["foreman"]["ram"] +a["iso"] = conf["foreman"]["iso"] +a["disksize"] = conf["foreman"]["disksize"] +a["force"] = conf["foreman"]["force"] +a["dhcprange"] = "{0} {1}".format(conf["subnets"]["Admin"] + ["data"]["from"], + conf["subnets"]["Admin"] + ["data"]["to"]) +a["domain"] = conf["domains"] +reverse_octets = str(conf["foreman"]["ip"]).split('.')[-2::-1] +a["reversedns"] = '.'.join(reverse_octets) + '.in-addr.arpa' +a["dns"] = conf["foreman"]["dns"] +a["bridge"] = conf["foreman"]["bridge"] +if conf["foreman"]["bridge_type"] == "openvswitch": + a["bridgeconfig"] = "" +else: + # no specific config for linuxbridge + a["bridgeconfig"] = "" + +# Update args with values from config file +args.update(a) +del a + +p.list_id(args) + +# Ask confirmation +if args["force"] is not True: + p.ask_validation() + +# Create the VM +p.header("Initiate configuration") + +### +# Work on templates +### +# Create temporary folders and files +tempFolder = tempfile.mkdtemp(dir="/tmp") +tempFiles = {} + +for f in os.listdir("templates_foreman/"): + tempFiles[f] = "{0}/{1}".format(tempFolder, f) + try: + OpenSteakTemplateParser("templates_foreman/{0}".format(f), + tempFiles[f], args) + except Exception as err: + p.status(False, msg=("Something went wrong when trying to create " + "the file {0} from the template " + "templates_foreman/{1}").format(tempFiles[f], f), + failed="{0}".format(err)) + +### +# Work on files +### +for f in os.listdir("files_foreman/"): + tempFiles[f] = "{0}/{1}".format(tempFolder, f) + shutil.copyfile("files_foreman/{0}".format(f), tempFiles[f]) + +p.status(True, msg="Temporary files created:") +p.list_id(tempFiles) + + +### +# Delete if already exists +### + +# Get all volumes and VM +p.header("Virsh calls") +OpenSteakVirsh = OpenSteakVirsh() +volumeList = OpenSteakVirsh.volumeList() +domainList = OpenSteakVirsh.domainList() +# p.list_id(volumeList) +# p.list_id(domainList) + +# TODO: check that the default image is in the list +# (trusty-server-cloudimg-amd64-disk1.img by default) + +# Delete the volume if exists +try: + oldVolume = volumeList[args["name"]] + + # Ask confirmation + if args["force"] is not True: + p.ask_validation() + + status = OpenSteakVirsh.volumeDelete(volumeList[args["name"]]) + if (status["stderr"]): + p.status(False, msg=status["stderr"]) + p.status(True, msg=status["stdout"]) +except KeyError as err: + # no old volume, do nothing + pass + +# Delete the VM if exists +try: + vmStatus = domainList[args["name"]] + + # Ask confirmation + if args["force"] is not True: + p.ask_validation() + + # Destroy (stop) + if vmStatus == "running": + status = OpenSteakVirsh.domainDestroy(args["name"]) + if (status["stderr"]): + p.status(False, msg=status["stderr"]) + p.status(True, msg=status["stdout"]) + + # Undefine (delete) + status = OpenSteakVirsh.domainUndefine(args["name"]) + if (status["stderr"]): + p.status(False, msg=status["stderr"]) + p.status(True, msg=status["stdout"]) +except KeyError as err: + # no old VM defined, do nothing + pass + +### +# Create the configuration image file from metadata and userdata +### +status = OpenSteakVirsh.generateConfiguration(args["name"], tempFiles) +if (status["stderr"]): + p.status(False, msg=status["stderr"]) +p.status(True, msg=("Configuration generated successfully in " + "/var/lib/libvirt/images/{0}-configuration.iso") + .format(args["name"])) + +# Refresh the pool +status = OpenSteakVirsh.poolRefresh() +if (status["stderr"]): + p.status(False, msg=status["stderr"]) +p.status(True, msg=status["stdout"]) + +### +# Create the new VM +### +# Create the volume from a clone +status = OpenSteakVirsh.volumeClone(args["iso"], args["name"]) +if (status["stderr"]): + p.status(False, msg=status["stderr"]) +p.status(True, msg=status["stdout"]) + +# Resize the volume +status = OpenSteakVirsh.volumeResize(args["name"], args["disksize"]) +if (status["stderr"]): + p.status(False, msg=status["stderr"]) +p.status(True, msg=status["stdout"]) + +# Create the VM +status = OpenSteakVirsh.domainDefine(tempFiles["kvm-config"]) +if (status["stderr"]): + p.status(False, msg=status["stderr"]) +p.status(True, msg=status["stdout"]) + + +### +# Start the VM +### +status = OpenSteakVirsh.domainStart(args["name"]) +if (status["stderr"]): + p.status(False, msg=status["stderr"]) +p.status(True, msg=status["stdout"]) + +p.status(True, msg="Log file is at: /var/log/libvirt/qemu/{0}-serial.log" + .format(args["name"])) + +p.header("fini") + +# Delete temporary dir +shutil.rmtree(tempFolder) diff --git a/opensteak/tools/files_foreman/id_rsa b/opensteak/tools/files_foreman/id_rsa new file mode 100644 index 0000000..d53ba88 --- /dev/null +++ b/opensteak/tools/files_foreman/id_rsa @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAz0jMplucYXoe0xJ21ASL98PGbwZYCI5Xr4/kHXOdGvHvZr3z +58tWU1Ta4qMf0qa272VsdQiO1pCmSlqrDW5C9rEeqLhhRX/yLbgv35mOdjRoIIAX +6RfNniT/xXrfvPZYdw603fIbbw5igTRwc6W5QvJHRcKRKb762Vw2gPSS0GgFBLCk +vC2kQbW4cfP+9elo86FAhNBs2TbBHLc9H2W+9KzYfgsigjJLsgRXL6/uhu3+sL2d +3F1J9Nhyy3aoUOVxD2YPJlJvzYhLZcSXgXI+Oi0gZmhh3uImc4WRyOihK5jRpJaw +desygyXo4lVskzxBjm7L9ynbCNMOO85ZVVJGxQIDAQABAoIBAQCaOWcSy4yRtiPj +FZTV8MAXS1GD36t2SjoRhLTL+O5GUwW1YtVrfA2xmKv2/jm6KJJpkgPdG83y9NLU +9ZrZNlWaaHQQQocVB7ovrB/qdLzbU+i5bbTcl/pDlPG8g8yeMoflpUqK7AzfV0uR +KGwWj5JErjC7RaVt8wt+164xykbFyZeUu9htNthFD/OPaIPqgv6AoJdEULyGrTbd +SRyJ01n0beGkB0o+0dnOEO34K+pU0Zzk+rAcOEl3UNkpxOzedEFOR6NdnX1eH4t4 +a6OZgskcVjyxFQPAyhcSkQ2iWncQx2ritTclst4NFjBae5hwYgEB4S9ZN5IOueMH +eYhxYthNAoGBAPXtSDmRGPc4EHDBrbgDn4vhxK7QN35bWFW1KvHLD0hBBJO57GqT +jGCJsbkw6peERuFV8qq+Bvz0nvlKl9humB1djlndUETksUTrNz73XxpJJ8L5parF +okx0QLMXONOP5b6yGWYay3QD0gNz/HYVf//oDTdWRhbq5EY6VarOagfjAoGBANfG +UrlxEYHwq3TE7unvgaao5Vpmw8Hqir2bnl2zKmPoV8ds/V+paMnV6Hhzgzu3bKgF +ukZgAizEcfvxrxnfIraRJTI5xgBoIl8gdbsWkLre4qKpVSAkw4JLyzVVlXCyKYHp +ocjeNVbO5Z2Yft0cv30LfeX+DEDeQS12RHLu/Sc3AoGBAMns2ZfC5p/encknje8A +spjVeHwdJOOQNxiwl6FPHK40DIELcO4VVnbRuGaZnpVoHBbbTlQZkX1TkdCZCdLB +BA9giQiKamUW7eLry0HdNW5M0OQLvZZZjih+b71c/ODhTz/j1mz65UDN/jutmYaP +orjJnUhpg0U/+s0bCsojj/YHAoGBAKtsMhiFjaUv8OdJ9Y0A7H3dPKk/b1JF5YeR +dJV4W7sXwXT8T6eKTWfce14GV0JADSDHvB9g8xlh0DSa48OoFEn6shRe9cEo+fWd +Mis6WC0+Gcukv65TxsdjM8PhhGIOCQ/e7ttIPhQDN0Sm/FLqHe9YC+OGm3GFoT5e +8S5mU9StAoGABFwqkFELU84twzKYJCVPZPktwtfrD0Hkbd9pk0ebuSnQ3bATFIyU +CDspTADbY2IgC53u+XAhTd5BOsicTtMM9x1p5EOglbK1ANagWuGlzVfdbp+bmql9 +S8AaH22lha5vCfHHfAN2NSkQ+ABZnNpP66nFx06VcyEYkhuZgd6s5A0= +-----END RSA PRIVATE KEY----- diff --git a/opensteak/tools/files_foreman/id_rsa.pub b/opensteak/tools/files_foreman/id_rsa.pub new file mode 100644 index 0000000..8b4c6a1 --- /dev/null +++ b/opensteak/tools/files_foreman/id_rsa.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPSMymW5xheh7TEnbUBIv3w8ZvBlgIjlevj+Qdc50a8e9mvfPny1ZTVNriox/SprbvZWx1CI7WkKZKWqsNbkL2sR6ouGFFf/ItuC/fmY52NGgggBfpF82eJP/Fet+89lh3DrTd8htvDmKBNHBzpblC8kdFwpEpvvrZXDaA9JLQaAUEsKS8LaRBtbhx8/716WjzoUCE0GzZNsEctz0fZb70rNh+CyKCMkuyBFcvr+6G7f6wvZ3cXUn02HLLdqhQ5XEPZg8mUm/NiEtlxJeBcj46LSBmaGHe4iZzhZHI6KErmNGklrB16zKDJejiVWyTPEGObsv3KdsI0w47zllVUkbF arnaud@l-bibicy diff --git a/opensteak/tools/opensteak/.gitignore b/opensteak/tools/opensteak/.gitignore new file mode 100644 index 0000000..a65d046 --- /dev/null +++ b/opensteak/tools/opensteak/.gitignore @@ -0,0 +1,58 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*,cover + +# Translations +*.mo +*.pot + +# Django stuff: +*.log + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ diff --git a/opensteak/tools/opensteak/__init__.py b/opensteak/tools/opensteak/__init__.py new file mode 100644 index 0000000..01f9c9a --- /dev/null +++ b/opensteak/tools/opensteak/__init__.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +# This directory is a Python package. diff --git a/opensteak/tools/opensteak/argparser.py b/opensteak/tools/opensteak/argparser.py new file mode 100644 index 0000000..de980b6 --- /dev/null +++ b/opensteak/tools/opensteak/argparser.py @@ -0,0 +1,46 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin +# @author: Pawel Chomicki + +""" +Parse arguments from CLI +""" + +import argparse + +class OpenSteakArgParser: + + def __init__(self): + """ + Parse the command line + """ + self.parser = argparse.ArgumentParser(description='This script will create config files for a VM in current folder.', usage='%(prog)s [options] name') + self.parser.add_argument('name', help='Set the name of the machine') + self.parser.add_argument('-i', '--ip', help='Set the ip address of the machine. (Default is 192.168.42.42)', default='192.168.42.42') + self.parser.add_argument('-n', '--netmask', help='Set the netmask in short format. (Default is 24)', default='24') + self.parser.add_argument('-g', '--gateway', help='Set the gateway to ping internet. (Default is 192.168.42.1)', default='192.168.42.1') + self.parser.add_argument('-p', '--password', help='Set the ssh password. Login is ubuntu. (Default password is moutarde)', default='moutarde') + self.parser.add_argument('-u', '--cpu', help='Set number of CPU for the VM. (Default is 2)', default='2') + self.parser.add_argument('-r', '--ram', help='Set quantity of RAM for the VM in kB. (Default is 2097152)', default='2097152') + self.parser.add_argument('-o', '--iso', help='Use this iso file. (Default is trusty-server-cloudimg-amd64-disk1.img)', default='trusty-server-cloudimg-amd64-disk1.img') + self.parser.add_argument('-d', '--disksize', help='Create a disk with that size. (Default is 5G)', default='5G') + self.parser.add_argument('-f', '--force', help='Force creation without asking questions. This is dangerous as it will delete old VM with same name.', default=False, action='store_true') + + def parse(self): + return self.parser.parse_args() + diff --git a/opensteak/tools/opensteak/conf.py b/opensteak/tools/opensteak/conf.py new file mode 100644 index 0000000..65eaf43 --- /dev/null +++ b/opensteak/tools/opensteak/conf.py @@ -0,0 +1,72 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +from yaml import load, dump +try: + from yaml import CLoader as Loader, CDumper as Dumper +except ImportError: + from yaml import Loader, Dumper + + +class OpenSteakConfig: + """OpenSteak config class + Use this object as a dict + """ + + def __init__(self, + config_file="/usr/local/opensteak/infra/config/common.yaml", + autosave=False): + """ Function __init__ + Load saved opensteak config. + + @param PARAM: DESCRIPTION + @return RETURN: DESCRIPTION + @param config_file: the yaml config file to read. + default is '/usr/local/opensteak/infra/config/common.yaml' + @param autosave: save automaticly the config at destroy + default is False + """ + self.config_file = config_file + self.autosave = autosave + with open(self.config_file, 'r') as stream: + self._data = load(stream, Loader=Loader) + + def __getitem__(self, index): + """Get an item of the configuration""" + return self._data[index] + + def __setitem__(self, index, value): + """Set an item of the configuration""" + self._data[index] = value + + def list(self): + """Set an item of the configuration""" + return self._data.keys() + + def dump(self): + """Dump the configuration""" + return dump(self._data, Dumper=Dumper) + + def save(self): + """Save the configuration to the file""" + with open(self.config_file, 'w') as f: + f.write(dump(self._data, Dumper=Dumper)) + + def __del__(self): + if self.autosave: + self.save() diff --git a/opensteak/tools/opensteak/foreman.py b/opensteak/tools/opensteak/foreman.py new file mode 100644 index 0000000..b7cbf42 --- /dev/null +++ b/opensteak/tools/opensteak/foreman.py @@ -0,0 +1,60 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +from opensteak.foreman_objects.api import Api +from opensteak.foreman_objects.objects import ForemanObjects +from opensteak.foreman_objects.domains import Domains +from opensteak.foreman_objects.smart_proxies import SmartProxies +from opensteak.foreman_objects.operatingsystems import OperatingSystems +from opensteak.foreman_objects.hostgroups import HostGroups +from opensteak.foreman_objects.hosts import Hosts +from opensteak.foreman_objects.architectures import Architectures +from opensteak.foreman_objects.subnets import Subnets +from opensteak.foreman_objects.puppetClasses import PuppetClasses +from opensteak.foreman_objects.compute_resources import ComputeResources + + +class OpenSteakForeman: + """ + HostGroup class + """ + def __init__(self, password, login='admin', ip='127.0.0.1'): + """ Function __init__ + Init the API with the connection params + @param password: authentication password + @param password: authentication login - default is admin + @param ip: api ip - default is localhost + @return RETURN: self + """ + self.api = Api(login=login, password=password, ip=ip, + printErrors=False) + self.domains = Domains(self.api) + self.smartProxies = SmartProxies(self.api) + self.puppetClasses = PuppetClasses(self.api) + self.operatingSystems = OperatingSystems(self.api) + self.architectures = Architectures(self.api) + self.subnets = Subnets(self.api) + self.hostgroups = HostGroups(self.api) + self.hosts = Hosts(self.api) + self.computeResources = ComputeResources(self.api) + self.environments = ForemanObjects(self.api, + 'environments', + 'environment') + self.smartClassParameters = ForemanObjects(self.api, + 'smart_class_parameters', + 'smart_class_parameter') diff --git a/opensteak/tools/opensteak/foreman_objects/.gitignore b/opensteak/tools/opensteak/foreman_objects/.gitignore new file mode 100644 index 0000000..a65d046 --- /dev/null +++ b/opensteak/tools/opensteak/foreman_objects/.gitignore @@ -0,0 +1,58 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*,cover + +# Translations +*.mo +*.pot + +# Django stuff: +*.log + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ diff --git a/opensteak/tools/opensteak/foreman_objects/__init__.py b/opensteak/tools/opensteak/foreman_objects/__init__.py new file mode 100644 index 0000000..01f9c9a --- /dev/null +++ b/opensteak/tools/opensteak/foreman_objects/__init__.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +# This directory is a Python package. diff --git a/opensteak/tools/opensteak/foreman_objects/api.py b/opensteak/tools/opensteak/foreman_objects/api.py new file mode 100644 index 0000000..dc99734 --- /dev/null +++ b/opensteak/tools/opensteak/foreman_objects/api.py @@ -0,0 +1,197 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +import json +import requests +from requests_futures.sessions import FuturesSession +from pprint import pformat + + +class Api: + """ + Api class + Class to deal with the foreman API v2 + """ + def __init__(self, password, login='admin', ip='127.0.0.1', printErrors=False): + """ Function __init__ + Init the API with the connection params + + @param password: authentication password + @param password: authentication login - default is admin + @param ip: api ip - default is localhost + @return RETURN: self + """ + self.base_url = 'http://{}/api/v2/'.format(ip) + self.headers = {'Accept': 'version=2', + 'Content-Type': 'application/json; charset=UTF-8'} + self.auth = (login, password) + self.errorMsg = '' + self.printErrors = printErrors + + def list(self, obj, filter=False, only_id=False, limit=20): + """ Function list + Get the list of an object + + @param obj: object name ('hosts', 'puppetclasses'...) + @param filter: filter for objects + @param only_id: boolean to only return dict with name/id + @return RETURN: the list of the object + """ + self.url = '{}{}/?per_page={}'.format(self.base_url, obj, limit) + if filter: + self.url += '&search={}'.format(filter) + self.resp = requests.get(url=self.url, auth=self.auth, + headers=self.headers) + if only_id: + if self.__process_resp__(obj) is False: + return False + if type(self.res['results']) is list: + return dict((x['name'], x['id']) for x in self.res['results']) + elif type(self.res['results']) is dict: + r = {} + for v in self.res['results'].values(): + for vv in v: + r[vv['name']] = vv['id'] + return r + else: + return False + else: + return self.__process_resp__(obj) + + def get(self, obj, id, sub_object=None): + """ Function get + Get an object by id + + @param obj: object name ('hosts', 'puppetclasses'...) + @param id: the id of the object (name or id) + @return RETURN: the targeted object + """ + self.url = '{}{}/{}'.format(self.base_url, obj, id) + if sub_object: + self.url += '/' + sub_object + self.resp = requests.get(url=self.url, auth=self.auth, + headers=self.headers) + if self.__process_resp__(obj): + return self.res + return False + + def get_id_by_name(self, obj, name): + """ Function get_id_by_name + Get the id of an object + + @param obj: object name ('hosts', 'puppetclasses'...) + @param id: the id of the object (name or id) + @return RETURN: the targeted object + """ + list = self.list(obj, filter='name = "{}"'.format(name), + only_id=True, limit=1) + return list[name] if name in list.keys() else False + + def set(self, obj, id, payload, action='', async=False): + """ Function set + Set an object by id + + @param obj: object name ('hosts', 'puppetclasses'...) + @param id: the id of the object (name or id) + @param action: specific action of an object ('power'...) + @param payload: the dict of the payload + @param async: should this request be async, if true use + return.result() to get the response + @return RETURN: the server response + """ + self.url = '{}{}/{}'.format(self.base_url, obj, id) + if action: + self.url += '/{}'.format(action) + self.payload = json.dumps(payload) + if async: + session = FuturesSession() + return session.put(url=self.url, auth=self.auth, + headers=self.headers, data=self.payload) + else: + self.resp = requests.put(url=self.url, auth=self.auth, + headers=self.headers, data=self.payload) + if self.__process_resp__(obj): + return self.res + return False + + def create(self, obj, payload, async=False): + """ Function create + Create an new object + + @param obj: object name ('hosts', 'puppetclasses'...) + @param payload: the dict of the payload + @param async: should this request be async, if true use + return.result() to get the response + @return RETURN: the server response + """ + self.url = self.base_url + obj + self.payload = json.dumps(payload) + if async: + session = FuturesSession() + return session.post(url=self.url, auth=self.auth, + headers=self.headers, data=self.payload) + else: + self.resp = requests.post(url=self.url, auth=self.auth, + headers=self.headers, + data=self.payload) + return self.__process_resp__(obj) + + def delete(self, obj, id): + """ Function delete + Delete an object by id + + @param obj: object name ('hosts', 'puppetclasses'...) + @param id: the id of the object (name or id) + @return RETURN: the server response + """ + self.url = '{}{}/{}'.format(self.base_url, obj, id) + self.resp = requests.delete(url=self.url, + auth=self.auth, + headers=self.headers, ) + return self.__process_resp__(obj) + + def __process_resp__(self, obj): + """ Function __process_resp__ + Process the response sent by the server and store the result + + @param obj: object name ('hosts', 'puppetclasses'...) + @return RETURN: the server response + """ + self.last_obj = obj + if self.resp.status_code > 299: + self.errorMsg = ">> Error {} for object '{}'".format(self.resp.status_code, + self.last_obj) + try: + self.ret = json.loads(self.resp.text) + self.errorMsg += pformat(self.ret[list(self.ret.keys())[0]]) + except: + self.ret = self.resp.text + self.errorMsg += self.ret + if self.printErrors: + print(self.errorMsg) + return False + self.res = json.loads(self.resp.text) + if 'results' in self.res.keys(): + return self.res['results'] + return self.res + + def __str__(self): + ret = pformat(self.base_url) + "\n" + ret += pformat(self.headers) + "\n" + ret += pformat(self.auth) + "\n" + return ret diff --git a/opensteak/tools/opensteak/foreman_objects/architectures.py b/opensteak/tools/opensteak/foreman_objects/architectures.py new file mode 100644 index 0000000..5e4303e --- /dev/null +++ b/opensteak/tools/opensteak/foreman_objects/architectures.py @@ -0,0 +1,49 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +from opensteak.foreman_objects.objects import ForemanObjects + + +class Architectures(ForemanObjects): + """ + Architectures class + """ + objName = 'architectures' + payloadObj = 'architecture' + + def checkAndCreate(self, key, payload, osIds): + """ Function checkAndCreate + Check if an architectures exists and create it if not + + @param key: The targeted architectures + @param payload: The targeted architectures description + @param osIds: The list of os ids liked with this architecture + @return RETURN: The id of the object + """ + if key not in self: + self[key] = payload + oid = self[key]['id'] + if not oid: + return False + #~ To be sure the OS list is good, we ensure our os are in the list + for os in self[key]['operatingsystems']: + osIds.add(os['id']) + self[key]["operatingsystem_ids"] = list(osIds) + if (len(self[key]['operatingsystems']) is not len(osIds)): + return False + return oid diff --git a/opensteak/tools/opensteak/foreman_objects/compute_resources.py b/opensteak/tools/opensteak/foreman_objects/compute_resources.py new file mode 100644 index 0000000..9ada9c4 --- /dev/null +++ b/opensteak/tools/opensteak/foreman_objects/compute_resources.py @@ -0,0 +1,62 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +from opensteak.foreman_objects.objects import ForemanObjects +from opensteak.foreman_objects.item import ForemanItem + + +class ComputeResources(ForemanObjects): + """ + HostGroups class + """ + objName = 'compute_resources' + payloadObj = 'compute_resource' + + def list(self): + """ Function list + list the hostgroups + + @return RETURN: List of ForemanItemHostsGroup objects + """ + return list(map(lambda x: ForemanItem(self.api, x['id'], x), + self.api.list(self.objName))) + + def __getitem__(self, key): + """ Function __getitem__ + Get an hostgroup + + @param key: The hostgroup name or ID + @return RETURN: The ForemanItemHostsGroup object of an host + """ + # Because Hostgroup did not support get by name we need to do it by id + if type(key) is not int: + key = self.getId(key) + ret = self.api.get(self.objName, key) + return ForemanItem(self.api, key, ret) + + def __delitem__(self, key): + """ Function __delitem__ + Delete an hostgroup + + @param key: The hostgroup name or ID + @return RETURN: The API result + """ + # Because Hostgroup did not support get by name we need to do it by id + if type(key) is not int: + key = self.getId(key) + return self.api.delete(self.objName, key) diff --git a/opensteak/tools/opensteak/foreman_objects/domains.py b/opensteak/tools/opensteak/foreman_objects/domains.py new file mode 100644 index 0000000..753833f --- /dev/null +++ b/opensteak/tools/opensteak/foreman_objects/domains.py @@ -0,0 +1,44 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +from opensteak.foreman_objects.objects import ForemanObjects + + +class Domains(ForemanObjects): + """ + Domain class + """ + objName = 'domains' + payloadObj = 'domain' + + def load(self, id='0', name=''): + """ Function load + To be rewriten + + @param id: The Domain ID + @return RETURN: DESCRIPTION + """ + + if name: + id = self.__getIdByName__(name) + self.data = self.foreman.get('domains', id) + if 'parameters' in self.data: + self.params = self.data['parameters'] + else: + self.params = [] + self.name = self.data['name'] diff --git a/opensteak/tools/opensteak/foreman_objects/freeip.py b/opensteak/tools/opensteak/foreman_objects/freeip.py new file mode 100644 index 0000000..86c003f --- /dev/null +++ b/opensteak/tools/opensteak/foreman_objects/freeip.py @@ -0,0 +1,79 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +#~ from foreman.api import Api +import requests +from bs4 import BeautifulSoup +import sys +import json + +class FreeIP: + """ FreeIP return an available IP in the targeted network """ + + def __init__ (self, login, password): + """ Init: get authenticity token """ + with requests.session() as self.session: + try: + #~ 1/ Get login token and authentify + payload = {} + log_soup = BeautifulSoup(self.session.get('https://127.0.0.1/users/login', verify=False).text) + payload['utf8'] = log_soup.findAll('input',attrs={'name':'utf8'})[0].get('value') + payload['authenticity_token'] = log_soup.findAll('input',attrs={'name':'authenticity_token'})[0].get('value') + if payload['authenticity_token'] == None: + raise requests.exceptions.RequestException("Bad catch of authenticity_token") + payload['commit']='Login' + payload['login[login]'] = login + payload['login[password]'] = password + #~ 2/ Log in + r = self.session.post('https://127.0.0.1/users/login', verify=False, data=payload) + if r.status_code != 200: + raise requests.exceptions.RequestException("Bad login or password") + #~ Get token for host creation + log_soup = BeautifulSoup(self.session.get('https://127.0.0.1/hosts/new', verify=False).text) + self.authenticity_token = log_soup.findAll('input',attrs={'name':'authenticity_token'})[0].get('value') + if payload['authenticity_token'] == None: + raise requests.exceptions.RequestException("Bad catch of authenticity_token") + except requests.exceptions.RequestException as e: + print("Error connection Foreman to get a free ip") + print(e) + sys.exit(1) + pass + + def get(self, subnet, mac = ""): + payload = {"host_mac": mac, "subnet_id": subnet} + payload['authenticity_token'] = self.authenticity_token + try: + self.last_ip = json.loads(self.session.post('https://127.0.0.1/subnets/freeip', verify=False, data=payload).text)['ip'] + if payload['authenticity_token'] == None: + raise requests.exceptions.RequestException("Error getting free IP") + except requests.exceptions.RequestException as e: + print("Error connection Foreman to get a free ip") + print(e) + sys.exit(1) + return self.last_ip + + + +if __name__ == "__main__": + import pprint + import sys + if len(sys.argv) == 4: + f = FreeIP(sys.argv[1], sys.argv[2]) + print(f.get(sys.argv[3])) + else: + print('Error: Usage\npython {} foreman_user foreman_password subnet'.format(sys.argv[0])) diff --git a/opensteak/tools/opensteak/foreman_objects/hostgroups.py b/opensteak/tools/opensteak/foreman_objects/hostgroups.py new file mode 100644 index 0000000..55b8ba6 --- /dev/null +++ b/opensteak/tools/opensteak/foreman_objects/hostgroups.py @@ -0,0 +1,103 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +from opensteak.foreman_objects.objects import ForemanObjects +from opensteak.foreman_objects.itemHostsGroup import ItemHostsGroup +from pprint import pprint as pp + + +class HostGroups(ForemanObjects): + """ + HostGroups class + """ + objName = 'hostgroups' + payloadObj = 'hostgroup' + + def list(self): + """ Function list + list the hostgroups + + @return RETURN: List of ItemHostsGroup objects + """ + return list(map(lambda x: ItemHostsGroup(self.api, x['id'], x), + self.api.list(self.objName))) + + def __getitem__(self, key): + """ Function __getitem__ + Get an hostgroup + + @param key: The hostgroup name or ID + @return RETURN: The ItemHostsGroup object of an host + """ + # Because Hostgroup did not support get by name we need to do it by id + if type(key) is not int: + key = self.getId(key) + ret = self.api.get(self.objName, key) + return ItemHostsGroup(self.api, key, ret) + + def __delitem__(self, key): + """ Function __delitem__ + Delete an hostgroup + + @param key: The hostgroup name or ID + @return RETURN: The API result + """ + # Because Hostgroup did not support get by name we need to do it by id + if type(key) is not int: + key = self.getId(key) + return self.api.delete(self.objName, key) + + def checkAndCreate(self, key, payload, + hostgroupConf, + hostgroupParent, + puppetClassesId): + """ Function checkAndCreate + check And Create procedure for an hostgroup + - check the hostgroup is not existing + - create the hostgroup + - Add puppet classes from puppetClassesId + - Add params from hostgroupConf + + @param key: The hostgroup name or ID + @param payload: The description of the hostgroup + @param hostgroupConf: The configuration of the host group from the + foreman.conf + @param hostgroupParent: The id of the parent hostgroup + @param puppetClassesId: The dict of puppet classes ids in foreman + @return RETURN: The ItemHostsGroup object of an host + """ + if key not in self: + self[key] = payload + oid = self[key]['id'] + if not oid: + return False + + # Create Hostgroup classes + hostgroupClassIds = self[key]['puppetclass_ids'] + if 'classes' in hostgroupConf.keys(): + if not self[key].checkAndCreateClasses(puppetClassesId.values()): + print("Failed in classes") + return False + + # Set params + if 'params' in hostgroupConf.keys(): + if not self[key].checkAndCreateParams(hostgroupConf['params']): + print("Failed in params") + return False + + return oid diff --git a/opensteak/tools/opensteak/foreman_objects/hosts.py b/opensteak/tools/opensteak/foreman_objects/hosts.py new file mode 100644 index 0000000..95d47af --- /dev/null +++ b/opensteak/tools/opensteak/foreman_objects/hosts.py @@ -0,0 +1,142 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +from opensteak.foreman_objects.objects import ForemanObjects +from opensteak.foreman_objects.itemHost import ItemHost +import time + + +class Hosts(ForemanObjects): + """ + Host sclass + """ + objName = 'hosts' + payloadObj = 'host' + + def list(self): + """ Function list + list the hosts + + @return RETURN: List of ItemHost objects + """ + return list(map(lambda x: ItemHost(self.api, x['id'], x), + self.api.list(self.objName))) + + def __getitem__(self, key): + """ Function __getitem__ + Get an host + + @param key: The host name or ID + @return RETURN: The ItemHost object of an host + """ + return ItemHost(self.api, key, self.api.get(self.objName, key)) + + def __printProgression__(self, status, msg, eol): + """ Function __printProgression__ + Print the creation progression or not + It uses the foreman.printer lib + + @param status: Status of the message + @param msg: Message + @param eol: End Of Line (to get a new line or not) + @return RETURN: None + """ + if self.printHostProgress: + self.__printProgression__(status, msg, eol=eol) + + def createVM(self, key, attributes, printHostProgress=False): + """ Function createVM + Create a Virtual Machine + + The creation of a VM with libVirt is a bit complexe. + We first create the element in foreman, the ask to start before + the result of the creation. + To do so, we make async calls to the API and check the results + + @param key: The host name or ID + @param attributes:The payload of the host creation + @param printHostProgress: The link to opensteak.printerlib + to print or not the + progression of the host creation + @return RETURN: The API result + """ + + self.printHostProgress = printHostProgress + self.async = True + # Create the VM in foreman + self.__printProgression__('In progress', + key + ' creation: push in Foreman', eol='\r') + future1 = self.api.create('hosts', attributes, async=True) + + # Wait before asking to power on the VM + sleep = 5 + for i in range(0, sleep): + time.sleep(1) + self.__printProgression__('In progress', + key + ' creation: start in {0}s' + .format(sleep - i), + eol='\r') + + # Power on the VM + self.__printProgression__('In progress', + key + ' creation: starting', eol='\r') + future2 = self[key].powerOn() + + # Show Power on result + if future2.result().status_code is 200: + self.__printProgression__('In progress', + key + ' creation: wait for end of boot', + eol='\r') + else: + self.__printProgression__(False, + key + ' creation: Error', + failed=str(future2.result().status_code)) + return False + # Show creation result + if future1.result().status_code is 200: + self.__printProgression__('In progress', + key + ' creation: created', + eol='\r') + else: + self.__printProgression__(False, + key + ' creation: Error', + failed=str(future1.result().status_code)) + return False + + # Wait for puppet catalog to be applied + loop_stop = False + while not loop_stop: + status = self[key].getStatus() + if status == 'No Changes' or status == 'Active': + self.__printProgression__(True, + key + ' creation: provisioning OK') + loop_stop = True + elif status == 'Error': + self.__printProgression__(False, + key + ' creation: Error', + failed="Error during provisioning") + loop_stop = True + return False + else: + self.__printProgression__('In progress', + key + ' creation: provisioning ({})' + .format(status), + eol='\r') + time.sleep(5) + + return True diff --git a/opensteak/tools/opensteak/foreman_objects/item.py b/opensteak/tools/opensteak/foreman_objects/item.py new file mode 100644 index 0000000..f418f8c --- /dev/null +++ b/opensteak/tools/opensteak/foreman_objects/item.py @@ -0,0 +1,135 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# David Blaisonneau +# Arnaud Morin + +from pprint import pprint as pp + + +class ForemanItem(dict): + """ + Item class + Represent the content of a foreman object as a dict + """ + + def __init__(self, api, key, + objName, payloadObj, + *args, **kwargs): + """ Function __init__ + Represent the content of a foreman object as a dict + + @param api: The foreman api + @param key: The object Key + @param *args, **kwargs: the dict representation + @return RETURN: Itself + """ + self.api = api + self.key = key + if objName: + self.objName = objName + if payloadObj: + self.payloadObj = payloadObj + self.store = dict() + if args[0]: + self.update(dict(*args, **kwargs)) + # We get the smart class parameters for the good items + if objName in ['hosts', 'hostgroups', + 'puppet_classes', 'environments']: + from opensteak.foreman_objects.itemSmartClassParameter\ + import ItemSmartClassParameter + scp_ids = map(lambda x: x['id'], + self.api.list('{}/{}/smart_class_parameters' + .format(self.objName, key))) + scp_items = list(map(lambda x: ItemSmartClassParameter(self.api, x, + self.api.get('smart_class_parameters', x)), + scp_ids)) + scp = {'{}::{}'.format(x['puppetclass']['name'], + x['parameter']): x + for x in scp_items} + self.update({'smart_class_parameters_dict': scp}) + + def __setitem__(self, key, attributes): + """ Function __setitem__ + Set a parameter of a foreman object as a dict + + @param key: The key to modify + @param attribute: The data + @return RETURN: The API result + """ + if key is 'puppetclass_ids': + payload = {"puppetclass_id": attributes, + self.payloadObj + "_class": + {"puppetclass_id": attributes}} + return self.api.create("{}/{}/{}" + .format(self.objName, + self.key, + "puppetclass_ids"), + payload) + elif key is 'parameters': + payload = {"parameter": attributes} + return self.api.create("{}/{}/{}" + .format(self.objName, + self.key, + "parameters"), + payload) + else: + payload = {self.payloadObj: {key: attributes}} + return self.api.set(self.objName, self.key, payload) + + def getParam(self, name=None): + """ Function getParam + Return a dict of parameters or a parameter value + + @param key: The parameter name + @return RETURN: dict of parameters or a parameter value + """ + if 'parameters' in self.keys(): + l = {x['name']: x['value'] for x in self['parameters']} + if name: + if name in l.keys(): + return l[name] + else: + return False + else: + return l + + def checkAndCreateClasses(self, classes): + """ Function checkAndCreateClasses + Check and add puppet classe + + @param key: The parameter name + @param classes: The classes ids list + @return RETURN: boolean + """ + actual_classes = self['puppetclass_ids'] + for v in classes: + if v not in actual_classes: + self['puppetclass_ids'] = v + return list(classes).sort() is list(self['puppetclass_ids']).sort() + + def checkAndCreateParams(self, params): + """ Function checkAndCreateParams + Check and add global parameters + + @param key: The parameter name + @param params: The params dict + @return RETURN: boolean + """ + actual_params = self['param_ids'] + for k, v in params.items(): + if k not in actual_params: + self['parameters'] = {"name": k, "value": v} + return self['param_ids'].sort() == list(params.values()).sort() diff --git a/opensteak/tools/opensteak/foreman_objects/itemHost.py b/opensteak/tools/opensteak/foreman_objects/itemHost.py new file mode 100644 index 0000000..c531e5c --- /dev/null +++ b/opensteak/tools/opensteak/foreman_objects/itemHost.py @@ -0,0 +1,141 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +import base64 +from string import Template +from opensteak.foreman_objects.item import ForemanItem + + +class ItemHost(ForemanItem): + """ + ItemHostsGroup class + Represent the content of a foreman hostgroup as a dict + """ + + objName = 'hosts' + payloadObj = 'host' + + def __init__(self, api, key, *args, **kwargs): + """ Function __init__ + Represent the content of a foreman object as a dict + + @param api: The foreman api + @param key: The object Key + @param *args, **kwargs: the dict representation + @return RETURN: Itself + """ + ForemanItem.__init__(self, api, key, + self.objName, self.payloadObj, + *args, **kwargs) + self.update({'puppetclass_ids': + self.api.list('{}/{}/puppetclass_ids' + .format(self.objName, key))}) + self.update({'param_ids': + list(self.api.list('{}/{}/parameters' + .format(self.objName, key), + only_id=True) + .keys())}) + + + def getStatus(self): + """ Function getStatus + Get the status of an host + + @return RETURN: The host status + """ + return self.api.get('hosts', self.key, 'status')['status'] + + def powerOn(self): + """ Function powerOn + Power on a host + + @return RETURN: The API result + """ + return self.api.set('hosts', self.key, + {"power_action": "start"}, + 'power', async=self.async) + + def getParamFromEnv(self, var, default=''): + """ Function getParamFromEnv + Search a parameter in the host environment + + @param var: the var name + @param hostgroup: the hostgroup item linked to this host + @param default: default value + @return RETURN: the value + """ + if self.getParam(var): + return self.getParam(var) + if self.hostgroup: + if self.hostgroup.getParam(var): + return self.hostgroup.getParam(var) + if self.domain.getParam('password'): + return self.domain.getParam('password') + else: + return default + + def getUserData(self, + hostgroup, + domain, + defaultPwd='', + defaultSshKey='', + proxyHostname='', + tplFolder='templates_metadata/'): + """ Function getUserData + Generate a userdata script for metadata server from Foreman API + + @param domain: the domain item linked to this host + @param hostgroup: the hostgroup item linked to this host + @param defaultPwd: the default password if no password is specified + in the host>hostgroup>domain params + @param defaultSshKey: the default ssh key if no password is specified + in the host>hostgroup>domain params + @param proxyHostname: hostname of the smartproxy + @param tplFolder: the templates folder + @return RETURN: the user data + """ + if 'user-data' in self.keys(): + return self['user-data'] + else: + self.hostgroup = hostgroup + self.domain = domain + if proxyHostname == '': + proxyHostname = 'foreman.' + domain + password = self.getParamFromEnv('password', defaultPwd) + sshauthkeys = self.getParamFromEnv('global_sshkey', defaultSshKey) + with open(tplFolder+'puppet.conf', 'rb') as puppet_file: + p = MyTemplate(puppet_file.read()) + enc_puppet_file = base64.b64encode(p.substitute( + foremanHostname=proxyHostname)) + with open(tplFolder+'cloud-init.tpl', 'r') as content_file: + s = MyTemplate(content_file.read()) + if sshauthkeys: + sshauthkeys = ' - '+sshauthkeys + self.userdata = s.substitute( + password=password, + fqdn=self['name'], + sshauthkeys=sshauthkeys, + foremanurlbuilt="http://{}/unattended/built" + .format(proxyHostname), + puppet_conf_content=enc_puppet_file.decode('utf-8')) + return self.userdata + + +class MyTemplate(Template): + delimiter = '%' + idpattern = r'[a-z][_a-z0-9]*' diff --git a/opensteak/tools/opensteak/foreman_objects/itemHostsGroup.py b/opensteak/tools/opensteak/foreman_objects/itemHostsGroup.py new file mode 100644 index 0000000..d6a641c --- /dev/null +++ b/opensteak/tools/opensteak/foreman_objects/itemHostsGroup.py @@ -0,0 +1,50 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +from opensteak.foreman_objects.item import ForemanItem + + +class ItemHostsGroup(ForemanItem): + """ + ItemHostsGroup class + Represent the content of a foreman hostgroup as a dict + """ + + objName = 'hostgroups' + payloadObj = 'hostgroup' + + def __init__(self, api, key, *args, **kwargs): + """ Function __init__ + Represent the content of a foreman object as a dict + + @param api: The foreman api + @param key: The object Key + @param *args, **kwargs: the dict representation + @return RETURN: Itself + """ + ForemanItem.__init__(self, api, key, + self.objName, self.payloadObj, + *args, **kwargs) + self.update({'puppetclass_ids': + self.api.list('{}/{}/puppetclass_ids' + .format(self.objName, key))}) + self.update({'param_ids': + list(self.api.list('{}/{}/parameters' + .format(self.objName, key), + only_id=True) + .keys())}) diff --git a/opensteak/tools/opensteak/foreman_objects/itemOverrideValues.py b/opensteak/tools/opensteak/foreman_objects/itemOverrideValues.py new file mode 100644 index 0000000..936185e --- /dev/null +++ b/opensteak/tools/opensteak/foreman_objects/itemOverrideValues.py @@ -0,0 +1,61 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + + +from opensteak.foreman_objects.item import ForemanItem +from pprint import pprint as pp + +class ItemOverrideValues(ForemanItem): + """ + ItemOverrideValues class + Represent the content of a foreman smart class parameter as a dict + """ + + objName = 'override_values' + payloadObj = 'override_value' + + def __init__(self, api, key, parentName, parentKey, *args, **kwargs): + """ Function __init__ + Represent the content of a foreman object as a dict + + @param api: The foreman api + @param key: The object Key + @param parentName: The object parent name (eg: smart_class_parameter) + @param parentKey: The object parent key + @param *args, **kwargs: the dict representation + @return RETURN: Itself + """ + self.parentName = parentName + self.parentKey = parentKey + ForemanItem.__init__(self, api, key, + self.objName, self.payloadObj, + *args, **kwargs) + + def __setitem__(self, key, attributes): + """ Function __setitem__ + Set a parameter of a foreman object as a dict + + @param key: The key to modify + @param attribute: The data + @return RETURN: The API result + """ + payload = {self.payloadObj: {key: attributes}} + return self.api.set('{}/{}/{}'.format(self.parentName, + self.parentKey, + self.objName), + self.key, payload) diff --git a/opensteak/tools/opensteak/foreman_objects/itemSmartClassParameter.py b/opensteak/tools/opensteak/foreman_objects/itemSmartClassParameter.py new file mode 100644 index 0000000..2d7ca2a --- /dev/null +++ b/opensteak/tools/opensteak/foreman_objects/itemSmartClassParameter.py @@ -0,0 +1,62 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + + +from opensteak.foreman_objects.item import ForemanItem +from opensteak.foreman_objects.itemOverrideValues import ItemOverrideValues + + +class ItemSmartClassParameter(ForemanItem): + """ + ItemSmartClassParameter class + Represent the content of a foreman smart class parameter as a dict + """ + + objName = 'smart_class_parameters' + payloadObj = 'smart_class_parameter' + + def __init__(self, api, key, *args, **kwargs): + """ Function __init__ + Represent the content of a foreman object as a dict + + @param api: The foreman api + @param key: The object Key + @param *args, **kwargs: the dict representation + @return RETURN: Itself + """ + ForemanItem.__init__(self, api, key, + self.objName, self.payloadObj, + *args, **kwargs) + self.update({'override_values': + list(map(lambda x: ItemOverrideValues(self.api, + x['id'], + self.objName, + key, + x), + self['override_values']))}) + + def __setitem__(self, key, attributes): + """ Function __setitem__ + Set a parameter of a foreman object as a dict + + @param key: The key to modify + @param attribute: The data + @return RETURN: The API result + """ + payload = {self.payloadObj: {key: attributes}} + return self.api.set(self.objName, self.key, payload) diff --git a/opensteak/tools/opensteak/foreman_objects/objects.py b/opensteak/tools/opensteak/foreman_objects/objects.py new file mode 100644 index 0000000..c20c5a1 --- /dev/null +++ b/opensteak/tools/opensteak/foreman_objects/objects.py @@ -0,0 +1,136 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +from opensteak.foreman_objects.item import ForemanItem + + +class ForemanObjects: + """ + ForemanObjects class + Parent class for Foreman Objects + """ + + def __init__(self, api, objName=None, payloadObj=None): + """ Function __init__ + Init the foreman object + + @param api: The foreman API + @param objName: The object name (linked with the Foreman API) + @param payloadObj: The object name inside the payload (in general + the singular of objName) + @return RETURN: Itself + """ + + self.api = api + if objName: + self.objName = objName + if payloadObj: + self.payloadObj = payloadObj + # For asynchronous creations + self.async = False + + def __iter__(self): + """ Function __iter__ + + @return RETURN: The iteration of objects list + """ + return iter(self.list()) + + def __getitem__(self, key): + """ Function __getitem__ + + @param key: The targeted object + @return RETURN: A ForemanItem + """ + return ForemanItem(self.api, + key, + self.objName, + self.payloadObj, + self.api.get(self.objName, key)) + + def __setitem__(self, key, attributes): + """ Function __setitem__ + + @param key: The targeted object + @param attributes: The attributes to apply to the object + @return RETURN: API result if the object was not present, or False + """ + if key not in self: + payload = {self.payloadObj: {'name': key}} + payload[self.payloadObj].update(attributes) + return self.api.create(self.objName, payload, async=self.async) + return False + + def __delitem__(self, key): + """ Function __delitem__ + + @return RETURN: API result + """ + return self.api.delete(self.objName, key) + + def __contains__(self, key): + """ Function __contains__ + + @param key: The targeted object + @return RETURN: True if the object exists + """ + return bool(key in self.listName().keys()) + + def getId(self, key): + """ Function getId + Get the id of an object + + @param key: The targeted object + @return RETURN: The ID + """ + return self.api.get_id_by_name(self.objName, key) + + def list(self, limit=20): + """ Function list + Get the list of all objects + + @param key: The targeted object + @param limit: The limit of items to return + @return RETURN: A ForemanItem list + """ + return list(map(lambda x: + ForemanItem(self.api, x['id'], + self.objName, self.payloadObj, + x), + self.api.list(self.objName, limit=limit))) + + def listName(self): + """ Function listName + Get the list of all objects name with Ids + + @param key: The targeted object + @return RETURN: A dict of obejct name:id + """ + return self.api.list(self.objName, limit=999999, only_id=True) + + def checkAndCreate(self, key, payload): + """ Function checkAndCreate + Check if an object exists and create it if not + + @param key: The targeted object + @param payload: The targeted object description + @return RETURN: The id of the object + """ + if key not in self: + self[key] = payload + return self[key]['id'] diff --git a/opensteak/tools/opensteak/foreman_objects/operatingsystems.py b/opensteak/tools/opensteak/foreman_objects/operatingsystems.py new file mode 100644 index 0000000..8cce606 --- /dev/null +++ b/opensteak/tools/opensteak/foreman_objects/operatingsystems.py @@ -0,0 +1,66 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +from opensteak.foreman_objects.objects import ForemanObjects +from opensteak.foreman_objects.item import ForemanItem + + +class OperatingSystems(ForemanObjects): + """ + OperatingSystems class + """ + objName = 'operatingsystems' + payloadObj = 'operatingsystem' + + def __getitem__(self, key): + """ Function __getitem__ + + @param key: The operating system id/name + @return RETURN: The item + """ + ret = self.api.list(self.objName, + filter='title = "{}"'.format(key)) + if len(ret): + return ForemanItem(self.api, key, + self.objName, self.payloadObj, + ret[0]) + else: + return None + + def __setitem__(self, key, attributes): + """ Function __getitem__ + + @param key: The operating system id/name + @param attributes: The content of the operating system to create + @return RETURN: The API result + """ + if key not in self: + payload = {self.payloadObj: {'title': key}} + payload[self.payloadObj].update(attributes) + return self.api.create(self.objName, payload) + return False + + def listName(self): + """ Function listName + Get the list of all objects name with Ids + + @param key: The targeted object + @return RETURN: A dict of obejct name:id + """ + return { x['title']: x['id'] for x in self.api.list(self.objName, + limit=999999)} diff --git a/opensteak/tools/opensteak/foreman_objects/puppetClasses.py b/opensteak/tools/opensteak/foreman_objects/puppetClasses.py new file mode 100644 index 0000000..7f397f2 --- /dev/null +++ b/opensteak/tools/opensteak/foreman_objects/puppetClasses.py @@ -0,0 +1,46 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +from opensteak.foreman_objects.objects import ForemanObjects +from opensteak.foreman_objects.item import ForemanItem +from pprint import pprint as pp + + +class PuppetClasses(ForemanObjects): + """ + OperatingSystems class + """ + objName = 'puppetclasses' + payloadObj = 'puppetclass' + + def list(self, limit=20): + """ Function list + Get the list of all objects + + @param key: The targeted object + @param limit: The limit of items to return + @return RETURN: A ForemanItem list + """ + puppetClassList = list() + for v in self.api.list(self.objName, limit=limit).values(): + puppetClassList.extend(v) + return list(map(lambda x: + ForemanItem(self.api, x['id'], + self.objName, self.payloadObj, + x), + puppetClassList)) diff --git a/opensteak/tools/opensteak/foreman_objects/smart_proxies.py b/opensteak/tools/opensteak/foreman_objects/smart_proxies.py new file mode 100644 index 0000000..2d6518b --- /dev/null +++ b/opensteak/tools/opensteak/foreman_objects/smart_proxies.py @@ -0,0 +1,36 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +from opensteak.foreman_objects.objects import ForemanObjects + + +class SmartProxies(ForemanObjects): + """ + Domain class + """ + objName = 'smart_proxies' + payloadObj = 'smart_proxy' + + def importPuppetClasses(self, smartProxyId): + """ Function importPuppetClasses + Force the reload of puppet classes + + @param smartProxyId: smartProxy Id + @return RETURN: the API result + """ + return self.api.create('smart_proxies/{}/import_puppetclasses'.format(smartProxyId), '{}') diff --git a/opensteak/tools/opensteak/foreman_objects/subnets.py b/opensteak/tools/opensteak/foreman_objects/subnets.py new file mode 100644 index 0000000..b1cac54 --- /dev/null +++ b/opensteak/tools/opensteak/foreman_objects/subnets.py @@ -0,0 +1,67 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +from opensteak.foreman_objects.objects import ForemanObjects + + +class Subnets(ForemanObjects): + """ + Subnets class + """ + objName = 'subnets' + payloadObj = 'subnet' + + def checkAndCreate(self, key, payload, domainId): + """ Function checkAndCreate + Check if a subnet exists and create it if not + + @param key: The targeted subnet + @param payload: The targeted subnet description + @param domainId: The domainId to be attached wiuth the subnet + @return RETURN: The id of the subnet + """ + if key not in self: + self[key] = payload + oid = self[key]['id'] + if not oid: + return False + #~ Ensure subnet contains the domain + subnetDomainIds = [] + for domain in self[key]['domains']: + subnetDomainIds.append(domain['id']) + if domainId not in subnetDomainIds: + subnetDomainIds.append(domainId) + self[key]["domain_ids"] = subnetDomainIds + if len(self[key]["domains"]) is not len(subnetDomainIds): + return False + return oid + + def removeDomain(self, subnetId, domainId): + """ Function removeDomain + Delete a domain from a subnet + + @param subnetId: The subnet Id + @param domainId: The domainId to be attached wiuth the subnet + @return RETURN: boolean + """ + subnetDomainIds = [] + for domain in self[subnetId]['domains']: + subnetDomainIds.append(domain['id']) + subnetDomainIds.remove(domainId) + self[subnetId]["domain_ids"] = subnetDomainIds + return len(self[subnetId]["domains"]) is len(subnetDomainIds) diff --git a/opensteak/tools/opensteak/printer.py b/opensteak/tools/opensteak/printer.py new file mode 100644 index 0000000..98c5af5 --- /dev/null +++ b/opensteak/tools/opensteak/printer.py @@ -0,0 +1,141 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +import sys + + +class OpenSteakPrinter: + """ Just a nice message printer """ + HEADER = '\033[95m' + OKBLUE = '\033[94m' + OKGREEN = '\033[92m' + WARNING = '\033[93m' + FAIL = '\033[91m' + ENDC = '\033[0m' + BOLD = '\033[1m' + UNDERLINE = '\033[4m' + + TABSIZE = 4 + + def header(self, msg): + """ Function header + Print a header for a block + + @param msg: The message to print in the header (limited to 78 chars) + @return RETURN: None + """ + print(""" +# +# {} +# +""".format(msg[0:78])) + + def config(self, msg, name, value=None, indent=0): + """ Function config + Print a line with the value of a parameter + + @param msg: The message to print in the header (limited to 78 chars) + @param name: The name of the prameter + @param value: The value of the parameter + @param indent: Tab size at the beginning of the line + @return RETURN: None + """ + ind = ' ' * indent * self.TABSIZE + if value is None: + print('{} - {} = {}'.format(ind, msg, name)) + elif value is False: + print('{} [{}KO{}] {} > {} (NOT found)'. + format(ind, self.FAIL, self.ENDC, msg, name)) + else: + print('{} [{}OK{}] {} > {} = {}'. + format(ind, self.OKGREEN, self.ENDC, msg, name, str(value))) + + def list(self, msg, indent=0): + """ Function list + Print a list item + + @param msg: The message to print in the header (limited to 78 chars) + @param indent: Tab size at the beginning of the line + @return RETURN: None + """ + print(' ' * indent * self.TABSIZE, '-', msg) + + def list_id(self, dic, indent=0): + """ Function list_id + Print a list of dict items + + @param dic: The dict to print + @param indent: Tab size at the beginning of the line + @return RETURN: None + """ + for (k, v) in dic.items(): + self.list("{}: {}".format(k, v), indent=indent) + + def status(self, res, msg, failed="", eol="\n", quit=True, indent=0): + """ Function status + Print status message + - OK/KO if the result is a boolean + - Else the result text + + @param res: The status to show + @param msg: The message to show + @param eol: End of line + @param quit: Exit the system in case of failure + @param indent: Tab size at the beginning of the line + @return RETURN: None + """ + ind = ' ' * indent * self.TABSIZE + if res is True: + msg = '{} [{}OK{}] {}'.format(ind, self.OKGREEN, self.ENDC, msg) + elif res: + msg = '{} [{}{}{}] {}'.format(ind, self.OKBLUE, res, + self.ENDC, msg) + else: + msg = '{} [{}KO{}] {}'.format(ind, self.FAIL, self.ENDC, msg) + if failed: + msg += '\n > {}'.format(failed) + msg = msg.ljust(140) + eol + sys.stdout.write(msg) + if res is False and quit is True: + sys.exit(0) + + def ask_validation(self, prompt=None, resp=False): + """ Function ask_validation + Ask a validation message + + @param prompt: The question to ask ('Continue ?') if None + @param resp: The default value (Default is False) + @return RETURN: Trie or False + """ + if prompt is None: + prompt = 'Continue ?' + if resp: + prompt += ' [{}Y{}/n]: '.format(self.BOLD, self.ENDC) + else: + prompt += ' [y/{}N{}]: '.format(self.BOLD, self.ENDC) + while True: + ans = input(prompt) + if not ans: + ans = 'y' if resp else 'n' + if ans not in ['y', 'Y', 'n', 'N']: + print('please enter y or n.') + continue + if ans == 'y' or ans == 'Y': + return True + if ans == 'n' or ans == 'N': + sys.exit(0) diff --git a/opensteak/tools/opensteak/templateparser.py b/opensteak/tools/opensteak/templateparser.py new file mode 100644 index 0000000..720f008 --- /dev/null +++ b/opensteak/tools/opensteak/templateparser.py @@ -0,0 +1,34 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +""" +Template parser +""" + +from string import Template + +class OpenSteakTemplateParser: + + def __init__(self, filein, fileout, dictionary): + """ + Parse the files with the dictionary + """ + fin = open(filein) + fout = open(fileout,'w') + template = Template(fin.read()) + fout.write(template.substitute(dictionary)) diff --git a/opensteak/tools/opensteak/virsh.py b/opensteak/tools/opensteak/virsh.py new file mode 100644 index 0000000..594b842 --- /dev/null +++ b/opensteak/tools/opensteak/virsh.py @@ -0,0 +1,174 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +""" +Virsh library +""" + +import subprocess +import os + +class OpenSteakVirsh: + + virsh = "/usr/bin/virsh" + genisoimage = "/usr/bin/genisoimage" + environment = "" + + ### + # INIT + ### + def __init__(self): + self.environment = dict(os.environ) # Copy current environment + self.environment['LANG'] = 'en_US.UTF-8' + + + ### + # VOLUMES + ### + def volumeList(self, pool="default"): + """ + Return all volumes from a pool + """ + p = subprocess.Popen([self.virsh, "-q", "vol-list", pool], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment) + stdout, stderr = p.communicate() + + # Split lines + lines = stdout.splitlines() + + # Foreach line, split with space and construct a dictionnary + newLines = {} + for line in lines: + name, path = line.split(maxsplit=1) + newLines[name.strip()] = path.strip() + + return newLines + + def volumeDelete(self, path): + """ + Delete a volume + """ + p = subprocess.Popen([self.virsh, "-q", "vol-delete", path], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment) + stdout, stderr = p.communicate() + + return {"stdout":stdout, "stderr":stderr} + + def volumeClone(self, origin, name, pool="default"): + """ + Clone a volume + """ + p = subprocess.Popen([self.virsh, "-q", "vol-clone", "--pool", pool, origin, name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment) + stdout, stderr = p.communicate() + + return {"stdout":stdout, "stderr":stderr} + + def volumeResize(self, name, size, pool="default"): + """ + Resize a volume + """ + p = subprocess.Popen([self.virsh, "-q", "vol-resize", "--pool", pool, name, size], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment) + stdout, stderr = p.communicate() + + return {"stdout":stdout, "stderr":stderr} + + ### + # POOLS + ### + def poolRefresh(self, pool="default"): + """ + Refresh a pool + """ + p = subprocess.Popen([self.virsh, "-q", "pool-refresh", pool], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment) + stdout, stderr = p.communicate() + + return {"stdout":stdout, "stderr":stderr} + + ### + # DOMAINS + ### + def domainList(self): + """ + Return all domains (VM) + """ + p = subprocess.Popen([self.virsh, "-q", "list", "--all"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment) + stdout, stderr = p.communicate() + + # Split lines + lines = stdout.splitlines() + + # Foreach line, split with space and construct a dictionnary + newLines = {} + for line in lines: + id, name, status = line.split(maxsplit=2) + newLines[name.strip()] = status.strip() + + return newLines + + def domainDefine(self, xml): + """ + Define a domain (create a VM) + """ + p = subprocess.Popen([self.virsh, "-q", "define", xml], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment) + stdout, stderr = p.communicate() + + return {"stdout":stdout, "stderr":stderr} + + def domainUndefine(self, name): + """ + Undefine a domain (delete a VM) + """ + p = subprocess.Popen([self.virsh, "-q", "undefine", name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment) + stdout, stderr = p.communicate() + + return {"stdout":stdout, "stderr":stderr} + + def domainStart(self, name): + """ + Define a domain (create a VM) + """ + p = subprocess.Popen([self.virsh, "-q", "start", name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment) + stdout, stderr = p.communicate() + + return {"stdout":stdout, "stderr":stderr} + + def domainDestroy(self, name): + """ + Destroy a domain (stop a VM) + """ + p = subprocess.Popen([self.virsh, "-q", "destroy", name], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment) + stdout, stderr = p.communicate() + + return {"stdout":stdout, "stderr":stderr} + + ### + # ISO + ### + def generateConfiguration(self, name, files): + """ + Generate an ISO file + """ + + commandArray = [self.genisoimage, "-quiet", "-o", "/var/lib/libvirt/images/{0}-configuration.iso".format(name), "-volid", "cidata", "-joliet", "-rock"] + for k, f in files.items(): + commandArray.append(f) + + # Generate the iso file + p = subprocess.Popen(commandArray, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, env=self.environment) + stdout, stderr = p.communicate() + + return {"stdout":stdout, "stderr":stderr} + diff --git a/opensteak/tools/templates_foreman/install.sh b/opensteak/tools/templates_foreman/install.sh new file mode 100644 index 0000000..497be86 --- /dev/null +++ b/opensteak/tools/templates_foreman/install.sh @@ -0,0 +1,216 @@ +#!/bin/sh +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Authors: +# @author: David Blaisonneau +# @author: Arnaud Morin + +### Set vars +NAME="${name}" +DOMAIN="${domain}" +DATEE=$$(date +%F-%Hh%M) +IP="${ip}" +MASK="${netmaskshort}" +NET="${network}" +DHCP_RANGE="${dhcprange}" +REVERSE_DNS="${reversedns}" +DNS_FORWARDER="${dns}" +ADMIN="${admin}" +PASSWORD="${password}" + +### Set correct env +#dpkg-reconfigure locales +export LC_CTYPE=en_US.UTF-8 +export LANG=en_US.UTF-8 +unset LC_ALL +umask 0022 + +### Check hostname is on the public interface +echo "* Ensure hostname point to external IP" +# Remove useless lines +perl -i -pe 's/^127.0.1.1.*\n$$//' /etc/hosts +perl -i -pe "s/^$${IP}.*\n$$//" /etc/hosts +# Append a line +echo "$${IP} $${NAME}.$${DOMAIN} $${NAME}" >> /etc/hosts + +### Dependencies +echo "* Install dependencies" +apt-get -y install ca-certificates wget git isc-dhcp-server + +### Set AppArmor +echo "* Set App armor" +cat /etc/apparmor.d/local/usr.sbin.dhcpd | grep '/etc/bind/rndc.key r,' >/dev/null +if [ $$? -eq 1 ] ; then + echo "/etc/bind/rndc.key r," >> /etc/apparmor.d/local/usr.sbin.dhcpd +fi + +### Prepare repos +echo "* Enable Puppet labs repo" +if [ "Z" = "Z$$(dpkg -l |grep 'ii puppetlabs-release')" ] ; then + wget https://apt.puppetlabs.com/puppetlabs-release-trusty.deb + dpkg -i puppetlabs-release-trusty.deb + apt-get update +fi + +# Install puppetmaster +echo "* Install puppetmaster" +if [ "Z" = "Z$$(dpkg -l |grep 'ii puppetmaster')" ] ; then + apt-get -y install puppetmaster +fi + +# Enable the Foreman repo +echo "* Enable Foreman repo" +if [ ! -e /etc/apt/sources.list.d/foreman.list ] ; then + echo "deb http://deb.theforeman.org/ trusty 1.8" > /etc/apt/sources.list.d/foreman.list + echo "deb http://deb.theforeman.org/ plugins 1.8" >> /etc/apt/sources.list.d/foreman.list + wget -q http://deb.theforeman.org/pubkey.gpg -O- | apt-key add - + apt-get update +fi + +### Install Foreman +echo "* Install foreman-installer" +if [ "Z" = "Z$$(dpkg -l |grep 'ii foreman-installer')" ] ; then + apt-get -y install foreman-installer +fi +if [ "Z" = "Z$$(gem list --local |grep rubyipmi)" ] ; then + gem install -q rubyipmi +fi + +### Execute foreman installer +echo "* Execute foreman installer" + +foreman-installer \ + --foreman-admin-username="$$ADMIN" \ + --foreman-admin-password="$$PASSWORD" \ + --enable-foreman-plugin-templates \ + --enable-foreman-plugin-discovery \ + --foreman-plugin-discovery-install-images=true \ + --enable-foreman-compute-libvirt + + +foreman-installer \ + --foreman-admin-username="$$ADMIN" \ + --foreman-admin-password="$$PASSWORD" \ + --enable-foreman-plugin-templates \ + --enable-foreman-plugin-discovery \ + --foreman-plugin-discovery-install-images=true \ + --enable-foreman-compute-libvirt \ + --enable-foreman-proxy \ + --foreman-proxy-bmc=true \ + --foreman-proxy-tftp=true \ + --foreman-proxy-tftp-servername="$$IP" \ + --foreman-proxy-dhcp=true \ + --foreman-proxy-dhcp-interface="eth0" \ + --foreman-proxy-dhcp-gateway="$$IP" \ + --foreman-proxy-dhcp-range="$$DHCP_RANGE" \ + --foreman-proxy-dhcp-nameservers="$$IP" \ + --foreman-proxy-dns=true \ + --foreman-proxy-dns-interface="eth0" \ + --foreman-proxy-dns-zone="$$DOMAIN" \ + --foreman-proxy-dns-reverse="$$REVERSE_DNS" \ + --foreman-proxy-dns-forwarders="$$DNS_FORWARDER" \ + --foreman-proxy-foreman-base-url="https://localhost" + +### Sync community templates for last ubuntu versions + +echo "* Sync community templates for last ubuntu versions" +foreman-rake templates:sync + +### Get and install OpenSteak files + +echo "* Get OpenSteak repos" +if [ -d /usr/local/opensteak ] ; then + cd /usr/local/opensteak + git pull +else + cd /usr/local/ + git clone https://github.com/Orange-OpenSource/opnfv.git -b foreman opensteak +fi +cd /usr/local/opensteak/infra/puppet_master + +echo "* Set puppet auth" +echo "*.$$DOMAIN" > /etc/puppet/autosign.conf +if [ -e /etc/puppet/auth.conf ] ; then + # Make a backup + mv /etc/puppet/auth.conf /etc/puppet/auth.conf.$$DATEE +fi +cp etc/puppet/auth.conf /etc/puppet/auth.conf +perl -i -pe "s/__NET__/$$NET/" /etc/puppet/auth.conf +perl -i -pe "s/__MASK__/$$MASK/" /etc/puppet/auth.conf + +# Set Hiera Conf +echo "* Push Hiera conf into /etc/puppet/" +if [ -e /etc/puppet/hiera.yaml ] ; then + # Make a backup + mv /etc/puppet/hiera.yaml /etc/puppet/hiera.yaml.$$DATEE +fi +cp etc/puppet/hiera.yaml /etc/puppet/hiera.yaml +if [ -e /etc/hiera.yaml ] ; then + rm /etc/hiera.yaml +fi +ln -s /etc/puppet/hiera.yaml /etc/hiera.yaml +cp -rf etc/puppet/hieradata /etc/puppet/ +rename s/DOMAIN/$$DOMAIN/ /etc/puppet/hieradata/production/nodes/*.yaml +cp etc/puppet/manifests/site.pp /etc/puppet/manifests/site.pp +cp ../config/common.yaml /etc/puppet/hieradata/production/common.yaml +chgrp puppet /etc/puppet/hieradata/production/*.yaml + +# Install and config r10k +echo "* Install and setup r10k" +if [ "Z" = "Z$$(gem list --local |grep r10k)" ] ; then + gem install -q r10k +fi +if [ -e /etc/r10k.yaml ] ; then + # Make a backup + mv /etc/r10k.yaml /etc/r10k.yaml.$$DATEE +fi +cp etc/r10k.yaml /etc/r10k.yaml + +# Install opensteak-r10k-update script +echo "* Install opensteak-r10k-update script into /usr/local/bin" +cp usr/local/bin/opensteak-r10k-update /usr/local/bin/opensteak-r10k-update +chmod +x /usr/local/bin/opensteak-r10k-update + +echo "* Run R10k. You can re-run r10k by calling:" +echo " opensteak-r10k-update" +opensteak-r10k-update + +#### Install VIM puppet +echo "* Install VIM puppet" +if [ ! -d ~/.vim/autoload ] ; then + mkdir -p ~/.vim/autoload +fi +if [ ! -d ~/.vim/bundle ] ; then + mkdir -p ~/.vim/bundle +fi +curl -LSso ~/.vim/autoload/pathogen.vim https://tpo.pe/pathogen.vim +cat < ~/.vimrc +execute pathogen#infect() +syntax on +filetype plugin indent on +EOF +cd ~/.vim/bundle +if [ ! -d vim-puppet ] ; then + git clone https://github.com/rodjek/vim-puppet.git > /dev/null +fi + +### Gen SSH key for foreman +echo "* SSH Key" +cp /mnt/id_rsa /usr/share/foreman/.ssh/ +cp /mnt/id_rsa.pub /usr/share/foreman/.ssh/ +chown foreman:foreman /usr/share/foreman/.ssh/ -R + +### Run puppet +puppet agent -t -v + diff --git a/opensteak/tools/templates_foreman/kvm-config b/opensteak/tools/templates_foreman/kvm-config new file mode 100644 index 0000000..7e3d65d --- /dev/null +++ b/opensteak/tools/templates_foreman/kvm-config @@ -0,0 +1,65 @@ + + ${name} + ${ram} + ${ram} + ${cpu} + + hvm + + + + + + + + preserve + restart + restart + + /usr/bin/qemu-system-x86_64 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +