summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbuild.sh16
-rw-r--r--build/arch/RedHat/make_kibana.sh1
-rw-r--r--build/build.conf16
-rwxr-xr-xbuild/make_repo.sh42
-rw-r--r--build/os/centos/rhel7/compass/Dockerfile.tmpl25
-rw-r--r--build/templates/RedHat_juno.tmpl18
-rw-r--r--build/templates/RedHat_kilo.tmpl17
-rw-r--r--build/templates/RedHat_liberty.tmpl8
-rw-r--r--build/templates/compass_core.tmpl47
-rwxr-xr-xdeploy.sh11
-rw-r--r--deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml31
-rw-r--r--deploy/adapters/ansible/openstack/templates/nova.conf9
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/tasks/main.yml6
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_compute/templates/ceilometer.j213
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/tasks/main.yml6
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/templates/ceilometer.j223
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/templates/cinder.j21
-rw-r--r--deploy/adapters/ansible/roles/ceilometer_controller/templates/glance.j22
-rw-r--r--deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh10
-rw-r--r--deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_Debian.yml1
-rw-r--r--deploy/adapters/ansible/roles/ceph-deploy/tasks/main.yml2
-rw-r--r--deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_config.yml2
-rw-r--r--deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_install.yml2
-rw-r--r--deploy/adapters/ansible/roles/cinder-controller/tasks/main.yml2
-rw-r--r--deploy/adapters/ansible/roles/cinder-controller/templates/cinder.conf6
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/files/create_img.sh3
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/files/losetup.sh7
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/tasks/loop.yml20
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml16
-rw-r--r--deploy/adapters/ansible/roles/cinder-volume/templates/cinder.conf3
-rw-r--r--deploy/adapters/ansible/roles/common/tasks/main.yml33
-rw-r--r--deploy/adapters/ansible/roles/common/vars/Debian.yml13
-rw-r--r--deploy/adapters/ansible/roles/common/vars/RedHat.yml4
-rw-r--r--deploy/adapters/ansible/roles/dashboard/tasks/main.yml40
-rwxr-xr-xdeploy/adapters/ansible/roles/dashboard/templates/local_settings_debian511
-rw-r--r--deploy/adapters/ansible/roles/dashboard/templates/local_settings_redhat643
-rw-r--r--deploy/adapters/ansible/roles/dashboard/vars/RedHat.yml1
-rw-r--r--deploy/adapters/ansible/roles/dashboard/vars/main.yml4
-rw-r--r--deploy/adapters/ansible/roles/database/tasks/main.yml15
-rw-r--r--deploy/adapters/ansible/roles/database/tasks/mariadb.yml79
-rw-r--r--deploy/adapters/ansible/roles/database/tasks/mariadb_cluster.yml3
-rw-r--r--deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_debian.yml33
-rw-r--r--deploy/adapters/ansible/roles/database/tasks/mariadb_config.yml24
-rw-r--r--deploy/adapters/ansible/roles/database/tasks/mariadb_install.yml49
-rwxr-xr-xdeploy/adapters/ansible/roles/database/tasks/mongodb_config.yml47
-rwxr-xr-x[-rw-r--r--]deploy/adapters/ansible/roles/database/tasks/mongodb_install.yml (renamed from deploy/adapters/ansible/roles/database/tasks/mongodb.yml)16
-rw-r--r--deploy/adapters/ansible/roles/database/tasks/mysql.yml19
-rw-r--r--deploy/adapters/ansible/roles/database/templates/mongodb.conf3
-rw-r--r--deploy/adapters/ansible/roles/database/templates/my-huge.cnf151
-rw-r--r--deploy/adapters/ansible/roles/database/templates/my.cnf162
-rw-r--r--deploy/adapters/ansible/roles/database/templates/replica.js8
-rw-r--r--deploy/adapters/ansible/roles/database/templates/server.cnf47
-rw-r--r--deploy/adapters/ansible/roles/database/templates/wsrep.cnf36
-rw-r--r--deploy/adapters/ansible/roles/database/vars/Debian.yml10
-rw-r--r--deploy/adapters/ansible/roles/database/vars/RedHat.yml17
-rw-r--r--deploy/adapters/ansible/roles/glance/tasks/glance_install.yml2
-rw-r--r--deploy/adapters/ansible/roles/glance/tasks/main.yml2
-rw-r--r--deploy/adapters/ansible/roles/glance/tasks/nfs.yml17
-rw-r--r--deploy/adapters/ansible/roles/glance/vars/Debian.yml4
-rw-r--r--deploy/adapters/ansible/roles/glance/vars/RedHat.yml4
-rw-r--r--deploy/adapters/ansible/roles/ha/tasks/main.yml4
-rw-r--r--deploy/adapters/ansible/roles/ha/templates/haproxy.cfg42
-rw-r--r--deploy/adapters/ansible/roles/heat/tasks/heat_install.yml2
-rw-r--r--deploy/adapters/ansible/roles/heat/tasks/main.yml1
-rw-r--r--deploy/adapters/ansible/roles/heat/templates/heat.j23
-rw-r--r--deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml2
-rw-r--r--deploy/adapters/ansible/roles/keystone/tasks/main.yml2
-rw-r--r--deploy/adapters/ansible/roles/keystone/templates/admin-openrc.sh3
-rw-r--r--deploy/adapters/ansible/roles/keystone/templates/keystone.conf11
-rw-r--r--deploy/adapters/ansible/roles/keystone/vars/main.yml8
-rw-r--r--deploy/adapters/ansible/roles/memcached/tasks/main.yml15
-rw-r--r--deploy/adapters/ansible/roles/memcached/vars/Debian.yml7
-rw-r--r--deploy/adapters/ansible/roles/memcached/vars/RedHat.yml7
-rw-r--r--deploy/adapters/ansible/roles/memcached/vars/main.yml6
-rw-r--r--deploy/adapters/ansible/roles/monitor/tasks/main.yml1
-rw-r--r--deploy/adapters/ansible/roles/mq/tasks/main.yml9
-rw-r--r--deploy/adapters/ansible/roles/mq/tasks/rabbitmq.yml45
-rw-r--r--deploy/adapters/ansible/roles/mq/tasks/rabbitmq_cluster.yml30
-rw-r--r--deploy/adapters/ansible/roles/mq/tasks/rabbitmq_config.yml15
-rwxr-xr-xdeploy/adapters/ansible/roles/mq/tasks/rabbitmq_install.yml68
-rw-r--r--deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml3
-rw-r--r--deploy/adapters/ansible/roles/neutron-controller/tasks/main.yml2
-rw-r--r--deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml2
-rw-r--r--deploy/adapters/ansible/roles/neutron-network/tasks/main.yml21
-rw-r--r--deploy/adapters/ansible/roles/nova-compute/tasks/main.yml6
-rw-r--r--deploy/adapters/ansible/roles/nova-controller/tasks/main.yml2
-rw-r--r--deploy/adapters/ansible/roles/nova-controller/tasks/nova_install.yml2
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/files/recover_network.py65
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/tasks/main.yml6
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml95
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/tasks/openvswitch.yml90
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg14
-rwxr-xr-xdeploy/adapters/ansible/roles/odl_cluster/vars/main.yml4
-rw-r--r--[-rwxr-xr-x]deploy/adapters/ansible/roles/onos_cluster/files/networking-onos.tarbin20480 -> 153600 bytes
-rwxr-xr-xdeploy/adapters/ansible/roles/onos_cluster/tasks/main.yml35
-rwxr-xr-xdeploy/adapters/ansible/roles/onos_cluster/tasks/onos_controller.yml29
-rwxr-xr-xdeploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml70
-rwxr-xr-xdeploy/adapters/ansible/roles/onos_cluster/vars/main.yml2
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/install/install-collector.yml6
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/install/install-common.yml20
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/install/install-compute.yml12
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/install/install-config.yml8
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/install/install-control.yml8
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/install/install-database.yml6
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/install/install-kernel.yml7
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/install/install-webui.yml6
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/main.yml13
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/-vrouter-compute-setup.yml26
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-add-nodes.yml2
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-collector.yml8
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-compute.yml40
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-config.yml35
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-control.yml10
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-database.yml39
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-rabbitmq.yml3
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-toragent.yml4
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-tsn.yml2
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-webui.yml6
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-analytics-api-conf.j26
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-api-conf.j28
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-collector-conf.j22
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-device-manager-conf.j24
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-discovery-conf.j26
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-dns-conf.j22
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-query-engine-conf.j25
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-schema-conf.j210
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-svc-monitor-conf.j26
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-tor-agent-conf.j22
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/haproxy-contrail-cfg.j278
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-env-conf.j22
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/templates/provision/vrouter-nodemgr-param.j22
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/vars/Debian.yml22
-rwxr-xr-xdeploy/adapters/ansible/roles/open-contrail/vars/main.yml28
-rw-r--r--deploy/adapters/ansible/roles/secgroup/tasks/main.yml2
-rw-r--r--deploy/adapters/ansible/roles/setup-network/tasks/main.yml10
-rwxr-xr-xdeploy/adapters/ansible/roles/storage/files/create_img.sh4
-rwxr-xr-x[-rw-r--r--]deploy/adapters/ansible/roles/storage/files/get_var_size.sh (renamed from deploy/adapters/ansible/roles/cinder-volume/files/get_var_size.sh)2
-rwxr-xr-x[-rw-r--r--]deploy/adapters/ansible/roles/storage/files/loop.yml (renamed from deploy/adapters/ansible/roles/cinder-volume/files/loop.yml)0
-rwxr-xr-xdeploy/adapters/ansible/roles/storage/files/losetup.sh7
-rwxr-xr-xdeploy/adapters/ansible/roles/storage/tasks/loop.yml23
-rwxr-xr-xdeploy/adapters/ansible/roles/storage/tasks/main.yml20
-rwxr-xr-x[-rw-r--r--]deploy/adapters/ansible/roles/storage/tasks/real.yml (renamed from deploy/adapters/ansible/roles/cinder-volume/tasks/real.yml)4
-rw-r--r--deploy/client.py65
-rwxr-xr-x[-rw-r--r--]deploy/compass_vm.sh20
-rw-r--r--deploy/conf/base.conf6
-rw-r--r--deploy/conf/cluster.conf25
-rw-r--r--deploy/conf/virtual.conf2
-rwxr-xr-x[-rw-r--r--]deploy/deploy_host.sh9
-rwxr-xr-xdeploy/deploy_parameter.sh1
-rwxr-xr-x[-rw-r--r--]deploy/host_baremetal.sh9
-rwxr-xr-x[-rw-r--r--]deploy/host_virtual.sh19
-rwxr-xr-xdeploy/launch.sh70
-rwxr-xr-xdeploy/network.sh46
-rwxr-xr-xdeploy/prepare.sh15
-rw-r--r--deploy/restful.py5
-rw-r--r--deploy/template/network/bridge_ovs.xml6
-rwxr-xr-xdeploy_compass.sh4
-rwxr-xr-xdeploy_host.sh4
-rwxr-xr-xredeploy_host.sh4
-rw-r--r--util/isolinux.cfg122
-rw-r--r--util/ks.cfg133
161 files changed, 1805 insertions, 2368 deletions
diff --git a/build.sh b/build.sh
index 988ddae7..e24df446 100755
--- a/build.sh
+++ b/build.sh
@@ -32,12 +32,18 @@ function download_git()
file_dir=$CACHE_DIR/${1%.*}
if [[ -d $file_dir/.git ]]; then
cd $file_dir
- git pull origin master
+ source=`git remote -v | head -n 1 | awk '{print $2}'`
+ if [[ $2 == $source ]]; then
+ git pull origin master
+ if [[ $? -eq 0 ]]; then
+ cd -
+ return
+ fi
+ fi
cd -
- else
- rm -rf $CACHE_DIR/$file_dir
- git clone $2 $file_dir
fi
+ rm -rf $CACHE_DIR/${1%.*}
+ git clone $2 $file_dir
}
function download_url()
@@ -92,8 +98,6 @@ function copy_file()
mkdir -p $new/compass $new/bootstrap $new/pip $new/guestimg $new/app_packages $new/ansible
mkdir -p $new/repos/cobbler/{ubuntu,centos}/{iso,ppa}
- cp -rf $SCRIPT_DIR/util/ks.cfg $new/isolinux/ks.cfg
-
rm -rf $new/.rr_moved
if [[ $UBUNTU_ISO ]]; then
diff --git a/build/arch/RedHat/make_kibana.sh b/build/arch/RedHat/make_kibana.sh
index 51af5209..95047022 100644
--- a/build/arch/RedHat/make_kibana.sh
+++ b/build/arch/RedHat/make_kibana.sh
@@ -48,5 +48,4 @@ rpmbuild -bb rpmbuild/SPECS/kibana.spec
cp -rf rpmbuild/RPMS/* /var/cache/yum/
-find /var/cache/yum -name "perl*" | xargs rm -rf
popd
diff --git a/build/build.conf b/build/build.conf
index c72451ee..3fb6d02f 100644
--- a/build/build.conf
+++ b/build/build.conf
@@ -2,17 +2,17 @@ TIMEOUT=10
export PACKAGE_URL=${PACKAGE_URL:-http://205.177.226.235:9999}
#begin: package download
-export CENTOS_BASE=${CENTOS_BASE:-$PACKAGE_URL/centos_base.iso}
-export COMPASS_CORE=${COMPASS_CORE:-http://github.com/baigk/compass-core.git}
+export CENTOS_BASE=${CENTOS_BASE:-$PACKAGE_URL/CentOS-7-x86_64-Minimal-1511.iso}
+export COMPASS_CORE=${COMPASS_CORE:-https://github.com/openstack/compass-core.git}
export COMPASS_WEB=${COMPASS_WEB:-http://github.com/baigk/compass-web.git}
export COMPASS_INSTALL=${COMPASS_INSTALL:-http://github.com/baigk/compass-install.git}
export UBUNTU_ISO=${UBUNTU_ISO:-$PACKAGE_URL/ubuntu-14.04.3-server-amd64.iso}
export TRUSTY_JUNO_PPA=${TRUSTY_JUNO_PPA:-$PACKAGE_URL/trusty-juno-ppa.tar.gz}
export TRUSTY_LIBERTY_PPA=${TRUSTY_LIBERTY_PPA:-$PACKAGE_URL/trusty-liberty-ppa.tar.gz}
-export CENTOS_ISO=${CENTOS_ISO:-$PACKAGE_URL/CentOS-7-x86_64-Minimal-1503-01.iso}
+export CENTOS_ISO=${CENTOS_ISO:-$PACKAGE_URL/CentOS-7-x86_64-Minimal-1511.iso}
export CENTOS7_JUNO_PPA=${CENTOS7_JUNO_PPA:-$PACKAGE_URL/centos7-juno-ppa.tar.gz}
#export CENTOS7_KILO_PPA=${CENTOS7_KILO_PPA:-$PACKAGE_URL/centos7-kilo-ppa.tar.gz}
-export COMPASS_PKG=${COMPASS_PKG:-$PACKAGE_URL/centos6-package.tar.gz}
+export COMPASS_PKG=${COMPASS_PKG:-$PACKAGE_URL/centos7-compass-core.tar.gz}
export LOADERS=${LOADERS:-$PACKAGE_URL/loaders.tar.gz}
export CIRROS=${CIRROS:-$PACKAGE_URL/cirros-0.3.3-x86_64-disk.img}
export APP_PACKAGE=${APP_PACKAGE:-$PACKAGE_URL/packages.tar.gz}
@@ -74,5 +74,9 @@ export PIP_CONF="https://bootstrap.pypa.io/ez_setup.py \
https://pypi.python.org/packages/source/M/MarkupSafe/MarkupSafe-0.23.tar.gz \
https://pypi.python.org/packages/source/l/lazypy/lazypy-0.5.tar.gz \
https://pypi.python.org/packages/source/p/pycrypto-on-pypi/pycrypto-on-pypi-2.3.tar.gz \
- https://pypi.python.org/packages/2.6/p/pymongo/pymongo-3.0.3-cp26-none-macosx_10_10_intel.whl \
- https://pypi.python.org/packages/source/c/crudini/crudini-0.7.tar.gz"
+ https://pypi.python.org/packages/source/p/pymongo/pymongo-3.0.3.tar.gz \
+ https://pypi.python.org/packages/source/c/crudini/crudini-0.7.tar.gz \
+ https://pypi.python.org/packages/source/n/networking-odl/networking-odl-1.0.1.tar.gz \
+ https://pypi.python.org/packages/source/p/python-keyczar/python-keyczar-0.715.tar.gz \
+ https://pypi.python.org/packages/source/p/pyasn1/pyasn1-0.1.9.tar.gz \
+ https://pypi.python.org/packages/source/p/pycrypto/pycrypto-2.6.1.tar.gz "
diff --git a/build/make_repo.sh b/build/make_repo.sh
index c4afd4fa..c165c99f 100755
--- a/build/make_repo.sh
+++ b/build/make_repo.sh
@@ -23,24 +23,6 @@ set -ex
cp /*.tar.gz /result -f
EOF
-cat << EOF >${WORK_PATH}/work/repo/elasticsearch.repo
-[elasticsearch-2.x]
-name=Elasticsearch repository for 2.x packages
-baseurl=http://packages.elastic.co/elasticsearch/2.x/centos
-gpgcheck=1
-gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch
-enabled=1
-EOF
-
-cat << EOF > ${WORK_PATH}/work/repo/logstash.repo
-[logstash-2.0]
-name=Logstash repository for 2.0.x packages
-baseurl=http://packages.elastic.co/logstash/2.0/centos
-gpgcheck=1
-gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch
-enabled=1
-EOF
-
sudo apt-get install python-yaml -y
sudo apt-get install python-cheetah -y
}
@@ -179,9 +161,13 @@ function make_all_repo()
{
make_repo --package-tag pip
- make_repo --os-ver rhel6 --package-tag compass \
+ make_repo --os-ver rhel7 --package-tag compass \
--tmpl "${WORK_PATH}/build/templates/compass_core.tmpl" \
- --default-package "epel-release python-yaml python-jinja2 python-paramiko elasticsearch logstash" \
+ --default-package "kernel-devel epel-release wget libxml2 glibc gcc perl openssl-libs mkisofs createrepo lsof \
+ python-yaml python-jinja2 python-paramiko elasticsearch logstash bind-license vim nmap-ncat \
+ yum cobbler cobbler-web createrepo mkisofs syslinux pykickstart bind rsync fence-agents \
+ dhcp xinetd tftp-server httpd libselinux-python python-setuptools python-devel mysql-devel \
+ mysql-server mysql MySQL-python redis mod_wsgi net-tools rabbitmq-server nfs-utils" \
--special-package "kibana jdk"
make_repo --os-ver trusty --package-tag juno \
@@ -201,11 +187,13 @@ function make_all_repo()
make_repo --os-ver rhel7 --package-tag juno \
--ansible-dir $WORK_PATH/deploy/adapters/ansible \
- --default-package "rsyslog-7.6.7-1.el7 strace net-tools wget vim openssh-server dracut-config-rescue-033-241.el7_1.3 dracut-network-033-241.el7_1.3"
+ --default-package "rsyslog-7.6.7-1.el7 strace net-tools wget vim openssh-server \
+ dracut-config-rescue-033-241.el7_1.3 dracut-network-033-241.el7_1.3"
make_repo --os-ver rhel7 --package-tag kilo \
--ansible-dir $WORK_PATH/deploy/adapters/ansible \
- --default-package "rsyslog-7.6.7-1.el7 strace net-tools wget vim openssh-server dracut-config-rescue-033-241.el7_1.5 dracut-network-033-241.el7_1.5"
+ --default-package "rsyslog-7.6.7-1.el7 strace net-tools wget vim openssh-server \
+ dracut-config-rescue-033-241.el7_1.5 dracut-network-033-241.el7_1.5"
}
function main()
@@ -214,16 +202,6 @@ function main()
if [[ $# -eq 0 ]]; then
make_all_repo
- elif [ "$1" = "rhel7" ]; then
- echo "make rhel7"
- make_repo --os-ver rhel7 --package-tag juno \
- --ansible-dir $WORK_PATH/deploy/adapters/ansible \
- --default-package "rsyslog-7.6.7-1.el7 strace net-tools wget vim openssh-server dracut-config-rescue-033-241.el7_1.3 dracut-network-033-241.el7_1.3"
- elif [ "$1" = "rhel6" ]; then
- make_repo --os-ver rhel6 --package-tag compass \
- --tmpl "${WORK_PATH}/build/templates/compass_core.tmpl" \
- --default-package "epel-release python-yaml python-jinja2 python-paramiko elasticsearch logstash" \
- --special-package "kibana jdk"
else
make_repo $*
fi
diff --git a/build/os/centos/rhel7/compass/Dockerfile.tmpl b/build/os/centos/rhel7/compass/Dockerfile.tmpl
new file mode 100644
index 00000000..0d0fbe10
--- /dev/null
+++ b/build/os/centos/rhel7/compass/Dockerfile.tmpl
@@ -0,0 +1,25 @@
+FROM centos:7.2.1511
+MAINTAINER Chigang(Justin) <chigang@huawei.com>
+
+# set cache enable
+RUN sed -i 's/keepcache=0/keepcache=1/g' /etc/yum.conf
+#set packages = $getVar('spcial_packages', [])
+#for package in $packages
+ADD ./RedHat/packages/$package /var/cache/yum/x86_64/7/base/packages/
+#end for
+#set scripts = $getVar('scripts', [])
+#for script in $scripts
+ADD ./RedHat/script/$script /tmp/chigang/$script
+RUN chmod +x /tmp/chigang/$script
+RUN /tmp/chigang/$script
+#end for
+ADD ./install_packages.sh /tmp/chigang/install_packages.sh
+RUN chmod +x /tmp/chigang/install_packages.sh
+RUN /tmp/chigang/install_packages.sh
+
+ADD ./cp_repo.sh /tmp/chigang/cp_repo.sh
+RUN chmod +x /tmp/chigang/cp_repo.sh
+RUN mkdir /result
+
+CMD ["/tmp/chigang/cp_repo.sh"]
+#VOLUME /tmp/chigang
diff --git a/build/templates/RedHat_juno.tmpl b/build/templates/RedHat_juno.tmpl
index e8a54ac8..60a03b9f 100644
--- a/build/templates/RedHat_juno.tmpl
+++ b/build/templates/RedHat_juno.tmpl
@@ -18,6 +18,7 @@ yum install http://rdo.fedorapeople.org/openstack-juno/rdo-release-juno.rpm -y
# modify centos7 repo for workaround
sed -i 's/epel-Derived from Red Hat Enterprise Linux 7.1 (Source)/epel-7/g' /etc/yum.repos.d/rdo-release.repo
+
# add galeracluster repo
cat <<EOF >/etc/yum.repos.d/MariaDB.repo
[mariadb]
@@ -57,19 +58,20 @@ type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
EOF
+cat <<EOF >>/etc/yum.conf
+exclude=mongodb-org,mongodb-org-server
+EOF
+
yum update -y
-yum install createrepo -y
-yum install tar -y
+yum install createrepo tar -y
+yum -y install --downloadonly MariaDB-Galera-server
#download packages
#set packages = $getVar('default_packages', [])
-#for pkg in $packages
-yum -y install $pkg --downloadonly
-#end for
+yum -y install --skip-broken --downloadonly #echo ' '.join(packages)
#set packages = $getVar('packages', [])
-#for pkg in $packages
-yum -y install $pkg --downloadonly
-#end for
+#silent packages.pop(packages.index('MariaDB-Galera-server'))
+yum -y install --skip-broken --downloadonly #echo ' '.join(packages)
#make repo
diff --git a/build/templates/RedHat_kilo.tmpl b/build/templates/RedHat_kilo.tmpl
index 3d587250..826acb8d 100644
--- a/build/templates/RedHat_kilo.tmpl
+++ b/build/templates/RedHat_kilo.tmpl
@@ -64,19 +64,20 @@ baseurl=http://downloads-distro.mongodb.org/repo/redhat/os/x86_64
gpgcheck=0
EOF
+cat <<EOF >>/etc/yum.conf
+exclude=mongodb-org,mongodb-org-server
+EOF
+
yum update -y
-yum install createrepo -y
-yum install tar -y
+yum install createrepo tar -y
+yum -y install --downloadonly MariaDB-Galera-server
#download packages
#set packages = $getVar('default_packages', [])
-#for pkg in $packages
-yum -y install $pkg --downloadonly
-#end for
+yum -y install --skip-broken --downloadonly #echo ' '.join(packages)
#set packages = $getVar('packages', [])
-#for pkg in $packages
-yum -y install $pkg --downloadonly
-#end for
+#silent packages.pop(packages.index('MariaDB-Galera-server'))
+yum -y install --skip-broken --downloadonly #echo ' '.join(packages)
#make repo
diff --git a/build/templates/RedHat_liberty.tmpl b/build/templates/RedHat_liberty.tmpl
index 8473f070..29469437 100644
--- a/build/templates/RedHat_liberty.tmpl
+++ b/build/templates/RedHat_liberty.tmpl
@@ -53,16 +53,20 @@ type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
EOF
+cat <<EOF >>/etc/yum.conf
+exclude=mongodb-org,mongodb-org-server
+EOF
+
yum update -y
yum install createrepo tar -y
yum -y install --downloadonly MariaDB-Galera-server
#download packages
#set packages = $getVar('default_packages', [])
-yum -y install --downloadonly #echo ' '.join(packages)
+yum -y install --skip-broken --downloadonly #echo ' '.join(packages)
#set packages = $getVar('packages', [])
#silent packages.pop(packages.index('MariaDB-Galera-server'))
-yum -y install --downloadonly #echo ' '.join(packages)
+yum -y install --skip-broken --downloadonly #echo ' '.join(packages)
#make repo
mkdir -p /centos7-liberty-ppa/{Packages,repodata}
diff --git a/build/templates/compass_core.tmpl b/build/templates/compass_core.tmpl
index afecba58..1cb34b49 100644
--- a/build/templates/compass_core.tmpl
+++ b/build/templates/compass_core.tmpl
@@ -1,23 +1,42 @@
#!/bin/bash
set -x
-yum install tar -y
+
+# add openstack kilo repo
+yum install http://repo.mysql.com/mysql-community-release-el7-5.noarch.rpm -y
+yum install http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm -y
+
+cat << EOF >/etc/yum.repos.d/elasticsearch.repo
+[elasticsearch-2.x]
+name=Elasticsearch repository for 2.x packages
+baseurl=http://packages.elastic.co/elasticsearch/2.x/centos
+gpgcheck=1
+gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch
+enabled=1
+EOF
+
+cat << EOF >/etc/yum.repos.d/logstash.repo
+[logstash-2.0]
+name=Logstash repository for 2.0.x packages
+baseurl=http://packages.elastic.co/logstash/2.0/centos
+gpgcheck=1
+gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch
+enabled=1
+EOF
+
+yum update -y
+
+mkdir -p /centos7-compass-core
+yum -y reinstall bind-license --downloadonly --downloaddir=/centos7-compass-core
+yum -y reinstall openssl-libs --downloadonly --downloaddir=/centos7-compass-core
#download packages
#set packages = $getVar('default_packages', [])
-#for pkg in $packages
-yum -y install $pkg
-#end for
+yum -y install --skip-broken --downloadonly --downloaddir=/centos7-compass-core #echo ' '.join(packages)
#set packages = $getVar('packages', [])
-#for pkg in $packages
-yum -y install $pkg
-#end for
-
+yum -y install --skip-broken --downloadonly --downloaddir=/centos7-compass-core #echo ' '.join(packages)
#make repo
-mkdir -p /centos6-package
-
-find /var/cache/yum/ -name "*.rpm" | xargs -i cp {} /centos6-package
+find /var/cache/yum/ -name "*.rpm" | xargs -i cp {} /centos7-compass-core
-find /centos6-package -size 0 -name "*.rpm" | xargs rm -f
-
-tar -zcvf /centos6-package.tar.gz /centos6-package
+yum install tar -y
+tar -zcvf /centos7-compass-core.tar.gz /centos7-compass-core
diff --git a/deploy.sh b/deploy.sh
index 4cfe89e8..5b315c5c 100755
--- a/deploy.sh
+++ b/deploy.sh
@@ -1,15 +1,18 @@
+#!/bin/bash
#set -x
COMPASS_DIR=`cd ${BASH_SOURCE[0]%/*}/;pwd`
export COMPASS_DIR
-for i in python-cheetah python-yaml screen; do
+if [[ -z $DEPLOY_COMPASS && -z $DEPLOY_HOST && -z $REDEPLOY_HOST ]]; then
+ export DEPLOY_COMPASS="true"
+ export DEPLOY_HOST="true"
+fi
+
+for i in python-cheetah python-yaml; do
if [[ `dpkg-query -l $i` == 0 ]]; then
continue
fi
sudo apt-get install -y --force-yes $i
done
-screen -ls |grep deploy|awk -F. '{print $1}'|xargs kill -9
-screen -wipe
-#screen -dmSL deploy bash $COMPASS_DIR/ci/launch.sh $*
$COMPASS_DIR/deploy/launch.sh $*
diff --git a/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml b/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
index e66354e1..199c14ab 100644
--- a/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
+++ b/deploy/adapters/ansible/openstack/HA-ansible-multinodes.yml
@@ -1,21 +1,26 @@
---
- hosts: all
remote_user: root
- sudo: True
roles:
- common
+
+- hosts: all
+ remote_user: root
+ accelerate: true
+ roles:
- setup-network
- hosts: ha
remote_user: root
- sudo: True
+ accelerate: true
roles:
- ha
- hosts: controller
remote_user: root
- sudo: True
+ accelerate: true
roles:
+ - memcached
- database
- mq
- keystone
@@ -30,9 +35,15 @@
- dashboard
- heat
+- hosts: all
+ remote_user: root
+ accelerate: true
+ roles:
+ - storage
+
- hosts: compute
remote_user: root
- sudo: True
+ accelerate: true
roles:
- nova-compute
- neutron-compute
@@ -41,13 +52,13 @@
- hosts: all
remote_user: root
- sudo: True
+ accelerate: true
roles:
- odl_cluster
- hosts: all
remote_user: root
- sudo: True
+ accelerate: true
roles:
- onos_cluster
@@ -59,24 +70,24 @@
- hosts: controller
remote_user: root
- sudo: True
+ accelerate: true
roles:
- ext-network
- hosts: ceph_adm
remote_user: root
- sudo: True
+ accelerate: true
roles:
- ceph-deploy
- hosts: all
remote_user: root
- sudo: True
+ accelerate: true
roles:
- monitor
- hosts: all
remote_user: root
- sudo: True
+ accelerate: true
roles:
- secgroup
diff --git a/deploy/adapters/ansible/openstack/templates/nova.conf b/deploy/adapters/ansible/openstack/templates/nova.conf
index 2364132e..cec11440 100644
--- a/deploy/adapters/ansible/openstack/templates/nova.conf
+++ b/deploy/adapters/ansible/openstack/templates/nova.conf
@@ -1,3 +1,9 @@
+{% set memcached_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
+
[DEFAULT]
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
@@ -49,7 +55,7 @@ notify_on_state_change = vm_and_task_state
notification_driver = nova.openstack.common.notifier.rpc_notifier
notification_driver = ceilometer.compute.nova_notifier
-memcached_servers = {{ internal_vip.ip }}:11211
+memcached_servers = {{ memcached_servers }}
[database]
# The SQLAlchemy connection string used to connect to the database
@@ -64,6 +70,7 @@ identity_uri = http://{{ internal_vip.ip }}:35357
admin_tenant_name = service
admin_user = nova
admin_password = {{ NOVA_PASS }}
+memcached_servers = {{ memcached_servers }}
[glance]
host = {{ internal_vip.ip }}
diff --git a/deploy/adapters/ansible/roles/ceilometer_compute/tasks/main.yml b/deploy/adapters/ansible/roles/ceilometer_compute/tasks/main.yml
index cb78acb6..a6ee0000 100644
--- a/deploy/adapters/ansible/roles/ceilometer_compute/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/tasks/main.yml
@@ -15,3 +15,9 @@
- ceilometer_configs_templates
- dest
notify: restart ceilometer relation service
+
+- name: write services to monitor list
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
+ with_items: ceilometer_services
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/ceilometer_compute/templates/ceilometer.j2 b/deploy/adapters/ansible/roles/ceilometer_compute/templates/ceilometer.j2
index 716317da..ee78de01 100644
--- a/deploy/adapters/ansible/roles/ceilometer_compute/templates/ceilometer.j2
+++ b/deploy/adapters/ansible/roles/ceilometer_compute/templates/ceilometer.j2
@@ -1,6 +1,9 @@
[DEFAULT]
verbose = True
rpc_backend = rabbit
+auth_strategy = keystone
+
+[oslo_messaging_rabbit]
rabbit_host = {{ internal_vip.ip }}
rabbit_userid = {{ RABBIT_USER }}
rabbit_password = {{ RABBIT_PASS }}
@@ -8,6 +11,16 @@ rabbit_password = {{ RABBIT_PASS }}
[publisher]
metering_secret = {{ metering_secret }}
+[keystone_authtoken]
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
+auth_plugin = password
+project_domain_id = default
+user_domain_id = default
+project_name = service
+username = ceilometer
+password = {{ CEILOMETER_PASS }}
+
[service_credentials]
os_auth_url = http://{{ internal_vip.ip }}:5000/v2.0
os_username = ceilometer
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/tasks/main.yml b/deploy/adapters/ansible/roles/ceilometer_controller/tasks/main.yml
index cb78acb6..a6ee0000 100644
--- a/deploy/adapters/ansible/roles/ceilometer_controller/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/tasks/main.yml
@@ -15,3 +15,9 @@
- ceilometer_configs_templates
- dest
notify: restart ceilometer relation service
+
+- name: write services to monitor list
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
+ with_items: ceilometer_services
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/templates/ceilometer.j2 b/deploy/adapters/ansible/roles/ceilometer_controller/templates/ceilometer.j2
index c2993885..b262a26b 100644
--- a/deploy/adapters/ansible/roles/ceilometer_controller/templates/ceilometer.j2
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/templates/ceilometer.j2
@@ -1,26 +1,37 @@
[DEFAULT]
rpc_backend = rabbit
+auth_strategy = keystone
+verbose = True
+
+[oslo_messaging_rabbit]
rabbit_host = {{ internal_vip.ip }}
rabbit_userid = {{ RABBIT_USER }}
rabbit_password = {{ RABBIT_PASS }}
-auth_strategy = keystone
-verbose = True
[database]
connection = mongodb://ceilometer:{{ CEILOMETER_DBPASS }}@{{ internal_vip.ip }}:27017/ceilometer
[keystone_authtoken]
-auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
+auth_uri = http://{{ internal_vip.ip }}:5000
+auth_url = http://{{ internal_vip.ip }}:35357
identity_uri = http://{{ internal_vip.ip }}:35357
-admin_tenant_name = service
-admin_user = ceilometer
-admin_password = {{ CEILOMETER_PASS }}
+auth_plugin = password
+project_domain_id = default
+user_domain_id = default
+project_name = service
+username = ceilometer
+password = {{ CEILOMETER_PASS }}
[service_credentials]
os_auth_url = http://{{ internal_vip.ip }}:5000/v2.0
os_username = ceilometer
os_tenant_name = service
os_password = {{ CEILOMETER_PASS }}
+os_endpoint_type = internalURL
+os_region_name = regionOne
[publisher]
metering_secret = {{ metering_secret }}
+
+[api]
+host = {{ internal_ip }}
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/templates/cinder.j2 b/deploy/adapters/ansible/roles/ceilometer_controller/templates/cinder.j2
index 390bd870..dfd04739 100644
--- a/deploy/adapters/ansible/roles/ceilometer_controller/templates/cinder.j2
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/templates/cinder.j2
@@ -1,3 +1,2 @@
[DEFAULT]
-control_exchange = cinder
notification_driver = messagingv2
diff --git a/deploy/adapters/ansible/roles/ceilometer_controller/templates/glance.j2 b/deploy/adapters/ansible/roles/ceilometer_controller/templates/glance.j2
index 616e7e05..a513d2c1 100644
--- a/deploy/adapters/ansible/roles/ceilometer_controller/templates/glance.j2
+++ b/deploy/adapters/ansible/roles/ceilometer_controller/templates/glance.j2
@@ -1,6 +1,8 @@
[DEFAULT]
notification_driver = messagingv2
rpc_backend = rabbit
+
+[oslo_messaging_rabbit]
rabbit_host = {{ internal_vip.ip }}
rabbit_userid = {{ RABBIT_USER }}
rabbit_password = {{ RABBIT_PASS }}
diff --git a/deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh b/deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh
index d0c631fb..2c9e57f1 100644
--- a/deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh
+++ b/deploy/adapters/ansible/roles/ceph-deploy/files/create_osd.sh
@@ -12,20 +12,20 @@ ps -ef |grep vgremove |awk '{print $2}' |xargs kill -9
ps -ef |grep vgcreate |awk '{print $2}' |xargs kill -9
ps -ef |grep lvcreate |awk '{print $2}' |xargs kill -9
-if [ -L "/dev/cinder-volumes/ceph0" ]; then
+if [ -L "/dev/storage-volumes/ceph0" ]; then
echo "remove lv vg"
-lvremove -f /dev/cinder-volumes/ceph0
+lvremove -f /dev/storage-volumes/ceph0
fi
echo "lvcreate"
-lvcreate -l 100%FREE -nceph0 cinder-volumes
+lvcreate -l 100%FREE -nceph0 storage-volumes
echo "mkfs"
-mkfs.xfs -f /dev/cinder-volumes/ceph0
+mkfs.xfs -f /dev/storage-volumes/ceph0
if [ ! -d "/var/local/osd" ]; then
echo "mount osd"
mkdir -p /var/local/osd
-mount /dev/cinder-volumes/ceph0 /var/local/osd
+mount /dev/storage-volumes/ceph0 /var/local/osd
fi
diff --git a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_Debian.yml b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_Debian.yml
index 49a34dab..5c047566 100644
--- a/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_Debian.yml
+++ b/deploy/adapters/ansible/roles/ceph-deploy/tasks/ceph_install_Debian.yml
@@ -9,6 +9,7 @@
- name: install ceph for every nodes includes jumpserver
shell: cd {{ ceph_cluster_dir[0] }} && ceph-deploy install {{ ceph_cluster_hosts.stdout_lines[0]}}
+ ignore_errors: true
- name: create monitor node in controller group
shell: cd {{ ceph_cluster_dir[0] }} && ceph-deploy --overwrite-conf mon create-initial
diff --git a/deploy/adapters/ansible/roles/ceph-deploy/tasks/main.yml b/deploy/adapters/ansible/roles/ceph-deploy/tasks/main.yml
index 75ed1da2..1b04f3c6 100644
--- a/deploy/adapters/ansible/roles/ceph-deploy/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/ceph-deploy/tasks/main.yml
@@ -28,3 +28,5 @@
- ceph_deploy
- ceph_openstack_conf
- ceph_openstack
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_config.yml b/deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_config.yml
index b06aac50..c01ad4f6 100644
--- a/deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_config.yml
+++ b/deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_config.yml
@@ -5,5 +5,7 @@
- name: sync cinder db
#cinder_manage: action=dbsync
shell: su -s /bin/sh -c 'cinder-manage db sync' cinder
+ ignore_errors: true
+ changed_when: true
notify:
- restart cinder control serveice
diff --git a/deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_install.yml b/deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_install.yml
index 686ce1f5..66bb5f06 100644
--- a/deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_install.yml
+++ b/deploy/adapters/ansible/roles/cinder-controller/tasks/cinder_install.yml
@@ -4,7 +4,7 @@
with_items: packages | union(packages_noarch)
- name: generate common cinder service list
- shell: echo {{ item }} >> /opt/service
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: services | union(services_noarch)
- name: upload cinder conf
diff --git a/deploy/adapters/ansible/roles/cinder-controller/tasks/main.yml b/deploy/adapters/ansible/roles/cinder-controller/tasks/main.yml
index 5491dcf0..1df53e24 100644
--- a/deploy/adapters/ansible/roles/cinder-controller/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/cinder-controller/tasks/main.yml
@@ -13,3 +13,5 @@
- config
- cinder-config
- cinder
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/cinder-controller/templates/cinder.conf b/deploy/adapters/ansible/roles/cinder-controller/templates/cinder.conf
index b61e6562..8ecda649 100644
--- a/deploy/adapters/ansible/roles/cinder-controller/templates/cinder.conf
+++ b/deploy/adapters/ansible/roles/cinder-controller/templates/cinder.conf
@@ -3,7 +3,7 @@ rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
iscsi_helper = tgtadm
volume_name_template = volume-%s
-volume_group = cinder-volumes
+volume_group = storage-volumes
verbose = {{ VERBOSE }}
debug = {{ DEBUG }}
auth_strategy = keystone
@@ -39,7 +39,6 @@ volume_name_template = volume-%s
snapshot_name_template = snapshot-%s
max_gigabytes=10000
-volume_group=cinder-volumes
volume_clear=zero
volume_clear_size=10
@@ -62,3 +61,6 @@ admin_password = {{ CINDER_PASS }}
[database]
connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder
idle_timeout = 30
+
+[keymgr]
+encryption_auth_url=http://{{ internal_vip.ip }}:5000/v3
diff --git a/deploy/adapters/ansible/roles/cinder-volume/files/create_img.sh b/deploy/adapters/ansible/roles/cinder-volume/files/create_img.sh
deleted file mode 100644
index 2e734188..00000000
--- a/deploy/adapters/ansible/roles/cinder-volume/files/create_img.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-if [[ ! -f /var/cinder.img ]]; then
- dd if=/dev/zero of=/var/cinder.img bs=1 count=0 seek=$1
-fi
diff --git a/deploy/adapters/ansible/roles/cinder-volume/files/losetup.sh b/deploy/adapters/ansible/roles/cinder-volume/files/losetup.sh
deleted file mode 100644
index d0e6c776..00000000
--- a/deploy/adapters/ansible/roles/cinder-volume/files/losetup.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-loop_dev=`losetup -a |grep "/var/cinder.img"|awk -F':' '{print $1}'`
-if [[ -z $loop_dev ]]; then
- losetup -f --show /var/cinder.img
-else
- echo $loop_dev
-fi
-
diff --git a/deploy/adapters/ansible/roles/cinder-volume/tasks/loop.yml b/deploy/adapters/ansible/roles/cinder-volume/tasks/loop.yml
deleted file mode 100644
index b44253c4..00000000
--- a/deploy/adapters/ansible/roles/cinder-volume/tasks/loop.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- include_vars: "{{ ansible_os_family }}.yml"
-
-- name: get available /var partition size
- script: get_var_size.sh
- register: part_size
-
-- name: create cinder file if not exitst
- script: create_img.sh {{ part_size.stdout }}
-
-- name: do a losetup on /mnt/cinder-volumes
- script: losetup.sh
- register: loop_device
-
-- name: debug loop device
- debug: msg={{ loop_device.stdout }}
-
-- name: create physical and group volumes
- lvg: vg=cinder-volumes pvs={{ loop_device.stdout }}
- vg_options=--force
diff --git a/deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml b/deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml
index 6b193d6e..c0da9984 100644
--- a/deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/cinder-volume/tasks/main.yml
@@ -6,23 +6,13 @@
with_items: packages | union(packages_noarch)
- name: generate cinder volume service list
- shell: echo {{ item }} >> /opt/service
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: services | union(services_noarch)
-- name: check if physical device exists
- stat: path={{ physical_device }}
- register: status
-
-- name: load loop.yml
- include: loop.yml
- when: status.stat.exists == False or status.stat.isblk == False
-
-- name: load real.yml
- include: real.yml
- when: status.stat.exists == True and status.stat.isblk == True
-
- name: upload cinder-volume configuration
template: src=cinder.conf dest=/etc/cinder/cinder.conf
backup=yes
notify:
- restart cinder-volume services
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/cinder-volume/templates/cinder.conf b/deploy/adapters/ansible/roles/cinder-volume/templates/cinder.conf
index fdcac69d..4d8e49ce 100644
--- a/deploy/adapters/ansible/roles/cinder-volume/templates/cinder.conf
+++ b/deploy/adapters/ansible/roles/cinder-volume/templates/cinder.conf
@@ -3,7 +3,7 @@ rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
iscsi_helper = tgtadm
volume_name_template = volume-%s
-volume_group = cinder-volumes
+volume_group = storage-volumes
verbose = True
auth_strategy = keystone
state_path = /var/lib/cinder
@@ -38,7 +38,6 @@ volume_name_template = volume-%s
snapshot_name_template = snapshot-%s
max_gigabytes=10000
-volume_group=cinder-volumes
volume_clear=zero
volume_clear_size=10
diff --git a/deploy/adapters/ansible/roles/common/tasks/main.yml b/deploy/adapters/ansible/roles/common/tasks/main.yml
index 3097d092..b9c52c3e 100644
--- a/deploy/adapters/ansible/roles/common/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/common/tasks/main.yml
@@ -21,11 +21,8 @@
echo "# compass" >> /etc/hosts
echo {{ COMPASS_SERVER.stdout_lines[0] }} {{ name.stdout_lines[0] }} >> /etc/hosts
-- name: first update pkgs
- shell: "{{ ansible_pkg_mgr }} update"
-
- name: install packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest"
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest update_cache=yes"
with_items: packages | union(packages_noarch)
- name: make config template dir exist
@@ -41,12 +38,14 @@
pip: name={{ item }} state=present extra_args='--pre'
with_items: pip_packages
+- name: install keyczar for accelerate
+ pip: name=python-keyczar state=present extra_args='--pre'
+ delegate_to: 127.0.0.1
+ run_once: true
+
- name: update ntp conf
template: src=ntp.conf dest=/etc/ntp.conf backup=yes
-- name: stop ntp
- service: name={{ ntp_service }} state=stopped enabled=yes
-
- name: use ntpdate once for initial sync time
shell: ntpdate {{ ntp_server }}
ignore_errors: True
@@ -55,9 +54,21 @@
shell: hwclock --systohc
ignore_errors: True
-- name: restart ntp
- service: name={{ ntp_service }} state=restarted enabled=yes
+- name: create fireball keys dir
+ file: path=~/.fireball.keys state=directory mode=0700
+ delegate_to: 127.0.0.1
+ run_once: true
-- name: restart openvswitch
+- name: restart services
service: name={{ item }} state=restarted enabled=yes
- with_items: services
+ with_items: services| union(services_noarch)
+
+- name: write services to monitor list
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
+ with_items: services| union(services_noarch)
+
+- name: kill daemon for accelerate
+ shell: lsof -ni :5099|grep LISTEN|awk '{print $2}'|xargs kill -9
+ ignore_errors: true
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/common/vars/Debian.yml b/deploy/adapters/ansible/roles/common/vars/Debian.yml
index 8dce2729..304517a6 100644
--- a/deploy/adapters/ansible/roles/common/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/common/vars/Debian.yml
@@ -4,12 +4,17 @@ packages:
#- python-dev
- openvswitch-datapath-dkms
- openvswitch-switch
- - crudini
+ - python-memcache
+ - python-iniparse
+ - python-d* #TODO, need remove
-pip_packages: []
+pip_packages:
+ - crudini
+ - python-keyczar
pip_conf: pip.conf
-services: []
+services:
+ - ntp
+
-ntp_service: ntp
diff --git a/deploy/adapters/ansible/roles/common/vars/RedHat.yml b/deploy/adapters/ansible/roles/common/vars/RedHat.yml
index 6618748f..e1f179ad 100644
--- a/deploy/adapters/ansible/roles/common/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/common/vars/RedHat.yml
@@ -1,6 +1,7 @@
---
packages:
- openvswitch
+ - python-memcached
pip_packages:
- crudini
@@ -9,5 +10,4 @@ pip_conf: pip.conf
services:
- openvswitch
-
-ntp_service: ntpd
+ - ntpd
diff --git a/deploy/adapters/ansible/roles/dashboard/tasks/main.yml b/deploy/adapters/ansible/roles/dashboard/tasks/main.yml
index c9eccfb4..86b4f93d 100644
--- a/deploy/adapters/ansible/roles/dashboard/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/dashboard/tasks/main.yml
@@ -11,6 +11,9 @@
- name: restart http services
service: name={{ http_service }} state=restarted enabled=yes
+- name: write services to monitor list
+ lineinfile: dest=/opt/service create=yes line='{{ http_service }}'
+
- name: install dashboard packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
with_items: packages | union(packages_noarch)
@@ -19,15 +22,40 @@
action: "{{ ansible_pkg_mgr }} name=openstack-dashboard-ubuntu-theme state=absent"
- name: update ubuntu horizon settings
- template: src=local_settings_debian
- dest=/etc/openstack-dashboard/local_settings.py
- backup=yes
+ lineinfile:
+ dest: /etc/openstack-dashboard/local_settings.py
+ regexp: '{{ item.regexp }}'
+ line: '{{ item.line }}'
+ with_items:
+ - regexp: '^WEBROOT[ \t]*=.*'
+ line: 'WEBROOT = "/horizon"'
+ - regexp: '^COMPRESS_OFFLINE[ \t]*=.*'
+ line: 'COMPRESS_OFFLINE=True'
+ - regexp: '^ALLOWED_HOSTS[ \t]*=.*'
+ line: 'ALLOWED_HOSTS = ["*"]'
+ - regexp: '^OPENSTACK_HOST[ \t]*=.*'
+ line: 'OPENSTACK_HOST = "{{ internal_ip }}"'
+ when: ansible_os_family == 'Debian'
+
+- name: precompile horizon css
+ shell: /usr/bin/python /usr/share/openstack-dashboard/manage.py compress --force
+ ignore_errors: True
when: ansible_os_family == 'Debian'
- name: update redhat version horizon settings
- template: src=local_settings_redhat
- dest=/etc/openstack-dashboard/local_settings
- backup=yes
+ lineinfile:
+ dest: /etc/openstack-dashboard/local_settings
+ regexp: '{{ item.regexp }}'
+ line: '{{ item.line }}'
+ with_items:
+ - regexp: '^WEBROOT[ \t]*=.*'
+ line: 'WEBROOT = "/horizon"'
+ - regexp: '^COMPRESS_OFFLINE[ \t]*=.*'
+ line: 'COMPRESS_OFFLINE=False'
+ - regexp: '^ALLOWED_HOSTS[ \t]*=.*'
+ line: 'ALLOWED_HOSTS = ["*"]'
+ - regexp: '^OPENSTACK_HOST[ \t]*=.*'
+ line: 'OPENSTACK_HOST = "{{ internal_ip }}"'
when: ansible_os_family == 'RedHat'
- name: restart dashboard services
diff --git a/deploy/adapters/ansible/roles/dashboard/templates/local_settings_debian b/deploy/adapters/ansible/roles/dashboard/templates/local_settings_debian
deleted file mode 100755
index ff709549..00000000
--- a/deploy/adapters/ansible/roles/dashboard/templates/local_settings_debian
+++ /dev/null
@@ -1,511 +0,0 @@
-import os
-
-from django.utils.translation import ugettext_lazy as _
-
-from openstack_dashboard import exceptions
-
-DEBUG = True
-TEMPLATE_DEBUG = DEBUG
-
-# Required for Django 1.5.
-# If horizon is running in production (DEBUG is False), set this
-# with the list of host/domain names that the application can serve.
-# For more information see:
-# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
-#ALLOWED_HOSTS = ['horizon.example.com', ]
-
-# Set SSL proxy settings:
-# For Django 1.4+ pass this header from the proxy after terminating the SSL,
-# and don't forget to strip it from the client's request.
-# For more information see:
-# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header
-# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
-
-# If Horizon is being served through SSL, then uncomment the following two
-# settings to better secure the cookies from security exploits
-#CSRF_COOKIE_SECURE = True
-#SESSION_COOKIE_SECURE = True
-
-# Overrides for OpenStack API versions. Use this setting to force the
-# OpenStack dashboard to use a specific API version for a given service API.
-# NOTE: The version should be formatted as it appears in the URL for the
-# service API. For example, The identity service APIs have inconsistent
-# use of the decimal point, so valid options would be "2.0" or "3".
-# OPENSTACK_API_VERSIONS = {
-# "identity": 3,
-# "volume": 2
-# }
-
-# Set this to True if running on multi-domain model. When this is enabled, it
-# will require user to enter the Domain name in addition to username for login.
-# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
-
-# Overrides the default domain used when running on single-domain model
-# with Keystone V3. All entities will be created in the default domain.
-# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
-
-# Set Console type:
-# valid options would be "AUTO", "VNC", "SPICE" or "RDP"
-# CONSOLE_TYPE = "AUTO"
-
-# Default OpenStack Dashboard configuration.
-HORIZON_CONFIG = {
- 'dashboards': ('project', 'admin', 'settings',),
- 'default_dashboard': 'project',
- 'user_home': 'openstack_dashboard.views.get_user_home',
- 'ajax_queue_limit': 10,
- 'auto_fade_alerts': {
- 'delay': 3000,
- 'fade_duration': 1500,
- 'types': ['alert-success', 'alert-info']
- },
- 'help_url': "http://docs.openstack.org",
- 'exceptions': {'recoverable': exceptions.RECOVERABLE,
- 'not_found': exceptions.NOT_FOUND,
- 'unauthorized': exceptions.UNAUTHORIZED},
-}
-
-# Specify a regular expression to validate user passwords.
-# HORIZON_CONFIG["password_validator"] = {
-# "regex": '.*',
-# "help_text": _("Your password does not meet the requirements.")
-# }
-
-# Disable simplified floating IP address management for deployments with
-# multiple floating IP pools or complex network requirements.
-# HORIZON_CONFIG["simple_ip_management"] = False
-
-# Turn off browser autocompletion for the login form if so desired.
-# HORIZON_CONFIG["password_autocomplete"] = "off"
-
-LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
-
-# Set custom secret key:
-# You can either set it to a specific value or you can let horizion generate a
-# default secret key that is unique on this machine, e.i. regardless of the
-# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there
-# may be situations where you would want to set this explicitly, e.g. when
-# multiple dashboard instances are distributed on different machines (usually
-# behind a load-balancer). Either you have to make sure that a session gets all
-# requests routed to the same dashboard instance or you set the same SECRET_KEY
-# for all of them.
-from horizon.utils import secret_key
-SECRET_KEY = 'AJDSKLAJDKASJDKASJDKSAJDKSJAKDSA'
-# We recommend you use memcached for development; otherwise after every reload
-# of the django development server, you will have to login again. To use
-# memcached set CACHES to something like
-CACHES = {
- 'default': {
- 'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache',
- 'LOCATION' : '127.0.0.1:11211',
- }
-}
-
-#CACHES = {
-# 'default': {
-# 'BACKEND' : 'django.core.cache.backends.locmem.LocMemCache'
-# }
-#}
-
-# Enable the Ubuntu theme if it is present.
-try:
- from ubuntu_theme import *
-except ImportError:
- pass
-
-# Default Ubuntu apache configuration uses /horizon as the application root.
-# Configure auth redirects here accordingly.
-LOGIN_URL='/horizon/auth/login/'
-LOGOUT_URL='/horizon/auth/logout/'
-LOGIN_REDIRECT_URL='/horizon'
-
-# The Ubuntu package includes pre-compressed JS and compiled CSS to allow
-# offline compression by default. To enable online compression, install
-# the node-less package and enable the following option.
-COMPRESS_OFFLINE = True
-
-# By default, validation of the HTTP Host header is disabled. Production
-# installations should have this set accordingly. For more information
-# see https://docs.djangoproject.com/en/dev/ref/settings/.
-ALLOWED_HOSTS = ['{{ dashboard_host }}']
-
-# Send email to the console by default
-EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
-# Or send them to /dev/null
-#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
-
-# Configure these for your outgoing email host
-# EMAIL_HOST = 'smtp.my-company.com'
-# EMAIL_PORT = 25
-# EMAIL_HOST_USER = 'djangomail'
-# EMAIL_HOST_PASSWORD = 'top-secret!'
-
-# For multiple regions uncomment this configuration, and add (endpoint, title).
-# AVAILABLE_REGIONS = [
-# ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
-# ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
-# ]
-
-OPENSTACK_HOST = "{{ internal_vip.ip }}"
-OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
-OPENSTACK_KEYSTONE_DEFAULT_ROLE = "admin"
-
-# Disable SSL certificate checks (useful for self-signed certificates):
-# OPENSTACK_SSL_NO_VERIFY = True
-
-# The CA certificate to use to verify SSL connections
-# OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'
-
-# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
-# capabilities of the auth backend for Keystone.
-# If Keystone has been configured to use LDAP as the auth backend then set
-# can_edit_user to False and name to 'ldap'.
-#
-# TODO(tres): Remove these once Keystone has an API to identify auth backend.
-OPENSTACK_KEYSTONE_BACKEND = {
- 'name': 'native',
- 'can_edit_user': True,
- 'can_edit_group': True,
- 'can_edit_project': True,
- 'can_edit_domain': True,
- 'can_edit_role': True
-}
-
-#Setting this to True, will add a new "Retrieve Password" action on instance,
-#allowing Admin session password retrieval/decryption.
-#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False
-
-# The Xen Hypervisor has the ability to set the mount point for volumes
-# attached to instances (other Hypervisors currently do not). Setting
-# can_set_mount_point to True will add the option to set the mount point
-# from the UI.
-OPENSTACK_HYPERVISOR_FEATURES = {
- 'can_set_mount_point': False,
- 'can_set_password': False,
-}
-
-# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
-# services provided by neutron. Options currently available are load
-# balancer service, security groups, quotas, VPN service.
-OPENSTACK_NEUTRON_NETWORK = {
- 'enable_lb': False,
- 'enable_firewall': False,
- 'enable_quotas': True,
- 'enable_vpn': False,
- # The profile_support option is used to detect if an external router can be
- # configured via the dashboard. When using specific plugins the
- # profile_support can be turned on if needed.
- 'profile_support': None,
- #'profile_support': 'cisco',
-}
-
-# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
-# in the OpenStack Dashboard related to the Image service, such as the list
-# of supported image formats.
-# OPENSTACK_IMAGE_BACKEND = {
-# 'image_formats': [
-# ('', ''),
-# ('aki', _('AKI - Amazon Kernel Image')),
-# ('ami', _('AMI - Amazon Machine Image')),
-# ('ari', _('ARI - Amazon Ramdisk Image')),
-# ('iso', _('ISO - Optical Disk Image')),
-# ('qcow2', _('QCOW2 - QEMU Emulator')),
-# ('raw', _('Raw')),
-# ('vdi', _('VDI')),
-# ('vhd', _('VHD')),
-# ('vmdk', _('VMDK'))
-# ]
-# }
-
-# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for
-# image custom property attributes that appear on image detail pages.
-IMAGE_CUSTOM_PROPERTY_TITLES = {
- "architecture": _("Architecture"),
- "kernel_id": _("Kernel ID"),
- "ramdisk_id": _("Ramdisk ID"),
- "image_state": _("Euca2ools state"),
- "project_id": _("Project ID"),
- "image_type": _("Image Type")
-}
-
-# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
-# in the Keystone service catalog. Use this setting when Horizon is running
-# external to the OpenStack environment. The default is 'publicURL'.
-#OPENSTACK_ENDPOINT_TYPE = "publicURL"
-
-# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the
-# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints
-# in the Keystone service catalog. Use this setting when Horizon is running
-# external to the OpenStack environment. The default is None. This
-# value should differ from OPENSTACK_ENDPOINT_TYPE if used.
-#SECONDARY_ENDPOINT_TYPE = "publicURL"
-
-# The number of objects (Swift containers/objects or images) to display
-# on a single page before providing a paging element (a "more" link)
-# to paginate results.
-API_RESULT_LIMIT = 1000
-API_RESULT_PAGE_SIZE = 20
-
-# The timezone of the server. This should correspond with the timezone
-# of your entire OpenStack installation, and hopefully be in UTC.
-TIME_ZONE = "UTC"
-
-# When launching an instance, the menu of available flavors is
-# sorted by RAM usage, ascending. If you would like a different sort order,
-# you can provide another flavor attribute as sorting key. Alternatively, you
-# can provide a custom callback method to use for sorting. You can also provide
-# a flag for reverse sort. For more info, see
-# http://docs.python.org/2/library/functions.html#sorted
-# CREATE_INSTANCE_FLAVOR_SORT = {
-# 'key': 'name',
-# # or
-# 'key': my_awesome_callback_method,
-# 'reverse': False,
-# }
-
-# The Horizon Policy Enforcement engine uses these values to load per service
-# policy rule files. The content of these files should match the files the
-# OpenStack services are using to determine role based access control in the
-# target installation.
-
-# Path to directory containing policy.json files
-#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
-# Map of local copy of service policy files
-#POLICY_FILES = {
-# 'identity': 'keystone_policy.json',
-# 'compute': 'nova_policy.json',
-# 'volume': 'cinder_policy.json',
-# 'image': 'glance_policy.json',
-#}
-
-# Trove user and database extension support. By default support for
-# creating users and databases on database instances is turned on.
-# To disable these extensions set the permission here to something
-# unusable such as ["!"].
-# TROVE_ADD_USER_PERMS = []
-# TROVE_ADD_DATABASE_PERMS = []
-
-LOGGING = {
- 'version': 1,
- # When set to True this will disable all logging except
- # for loggers specified in this configuration dictionary. Note that
- # if nothing is specified here and disable_existing_loggers is True,
- # django.db.backends will still log unless it is disabled explicitly.
- 'disable_existing_loggers': False,
- 'handlers': {
- 'null': {
- 'level': 'DEBUG',
- 'class': 'django.utils.log.NullHandler',
- },
- 'console': {
- # Set the level to "DEBUG" for verbose output logging.
- 'level': 'INFO',
- 'class': 'logging.StreamHandler',
- },
- },
- 'loggers': {
- # Logging from django.db.backends is VERY verbose, send to null
- # by default.
- 'django.db.backends': {
- 'handlers': ['null'],
- 'propagate': False,
- },
- 'requests': {
- 'handlers': ['null'],
- 'propagate': False,
- },
- 'horizon': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'openstack_dashboard': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'novaclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'cinderclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'keystoneclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'glanceclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'neutronclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'heatclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'ceilometerclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'troveclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'swiftclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'openstack_auth': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'nose.plugins.manager': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'django': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'iso8601': {
- 'handlers': ['null'],
- 'propagate': False,
- },
- }
-}
-
-# 'direction' should not be specified for all_tcp/udp/icmp.
-# It is specified in the form.
-SECURITY_GROUP_RULES = {
- 'all_tcp': {
- 'name': 'ALL TCP',
- 'ip_protocol': 'tcp',
- 'from_port': '1',
- 'to_port': '65535',
- },
- 'all_udp': {
- 'name': 'ALL UDP',
- 'ip_protocol': 'udp',
- 'from_port': '1',
- 'to_port': '65535',
- },
- 'all_icmp': {
- 'name': 'ALL ICMP',
- 'ip_protocol': 'icmp',
- 'from_port': '-1',
- 'to_port': '-1',
- },
- 'ssh': {
- 'name': 'SSH',
- 'ip_protocol': 'tcp',
- 'from_port': '22',
- 'to_port': '22',
- },
- 'smtp': {
- 'name': 'SMTP',
- 'ip_protocol': 'tcp',
- 'from_port': '25',
- 'to_port': '25',
- },
- 'dns': {
- 'name': 'DNS',
- 'ip_protocol': 'tcp',
- 'from_port': '53',
- 'to_port': '53',
- },
- 'http': {
- 'name': 'HTTP',
- 'ip_protocol': 'tcp',
- 'from_port': '80',
- 'to_port': '80',
- },
- 'pop3': {
- 'name': 'POP3',
- 'ip_protocol': 'tcp',
- 'from_port': '110',
- 'to_port': '110',
- },
- 'imap': {
- 'name': 'IMAP',
- 'ip_protocol': 'tcp',
- 'from_port': '143',
- 'to_port': '143',
- },
- 'ldap': {
- 'name': 'LDAP',
- 'ip_protocol': 'tcp',
- 'from_port': '389',
- 'to_port': '389',
- },
- 'https': {
- 'name': 'HTTPS',
- 'ip_protocol': 'tcp',
- 'from_port': '443',
- 'to_port': '443',
- },
- 'smtps': {
- 'name': 'SMTPS',
- 'ip_protocol': 'tcp',
- 'from_port': '465',
- 'to_port': '465',
- },
- 'imaps': {
- 'name': 'IMAPS',
- 'ip_protocol': 'tcp',
- 'from_port': '993',
- 'to_port': '993',
- },
- 'pop3s': {
- 'name': 'POP3S',
- 'ip_protocol': 'tcp',
- 'from_port': '995',
- 'to_port': '995',
- },
- 'ms_sql': {
- 'name': 'MS SQL',
- 'ip_protocol': 'tcp',
- 'from_port': '1433',
- 'to_port': '1433',
- },
- 'mysql': {
- 'name': 'MYSQL',
- 'ip_protocol': 'tcp',
- 'from_port': '3306',
- 'to_port': '3306',
- },
- 'rdp': {
- 'name': 'RDP',
- 'ip_protocol': 'tcp',
- 'from_port': '3389',
- 'to_port': '3389',
- },
-}
-
-FLAVOR_EXTRA_KEYS = {
- 'flavor_keys': [
- ('quota:read_bytes_sec', _('Quota: Read bytes')),
- ('quota:write_bytes_sec', _('Quota: Write bytes')),
- ('quota:cpu_quota', _('Quota: CPU')),
- ('quota:cpu_period', _('Quota: CPU period')),
- ('quota:inbound_average', _('Quota: Inbound average')),
- ('quota:outbound_average', _('Quota: Outbound average')),
- ]
-}
-
diff --git a/deploy/adapters/ansible/roles/dashboard/templates/local_settings_redhat b/deploy/adapters/ansible/roles/dashboard/templates/local_settings_redhat
deleted file mode 100644
index de1efd7f..00000000
--- a/deploy/adapters/ansible/roles/dashboard/templates/local_settings_redhat
+++ /dev/null
@@ -1,643 +0,0 @@
-import os
-
-from django.utils.translation import ugettext_lazy as _
-
-from openstack_dashboard import exceptions
-
-DEBUG = False
-TEMPLATE_DEBUG = DEBUG
-
-
-# WEBROOT is the location relative to Webserver root
-# should end with a slash.
-WEBROOT = '/dashboard/'
-# LOGIN_URL = WEBROOT + 'auth/login/'
-# LOGOUT_URL = WEBROOT + 'auth/logout/'
-#
-# LOGIN_REDIRECT_URL can be used as an alternative for
-# HORIZON_CONFIG.user_home, if user_home is not set.
-# Do not set it to '/home/', as this will cause circular redirect loop
-# LOGIN_REDIRECT_URL = WEBROOT
-
-
-# Required for Django 1.5.
-# If horizon is running in production (DEBUG is False), set this
-# with the list of host/domain names that the application can serve.
-# For more information see:
-# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
-ALLOWED_HOSTS = ['*']
-
-# Set SSL proxy settings:
-# For Django 1.4+ pass this header from the proxy after terminating the SSL,
-# and don't forget to strip it from the client's request.
-# For more information see:
-# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header
-#SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
-
-# If Horizon is being served through SSL, then uncomment the following two
-# settings to better secure the cookies from security exploits
-#CSRF_COOKIE_SECURE = True
-#SESSION_COOKIE_SECURE = True
-
-# Overrides for OpenStack API versions. Use this setting to force the
-# OpenStack dashboard to use a specific API version for a given service API.
-# Versions specified here should be integers or floats, not strings.
-# NOTE: The version should be formatted as it appears in the URL for the
-# service API. For example, The identity service APIs have inconsistent
-# use of the decimal point, so valid options would be 2.0 or 3.
-#OPENSTACK_API_VERSIONS = {
-# "data-processing": 1.1,
-# "identity": 3,
-# "volume": 2,
-#}
-
-# Set this to True if running on multi-domain model. When this is enabled, it
-# will require user to enter the Domain name in addition to username for login.
-#OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
-
-# Overrides the default domain used when running on single-domain model
-# with Keystone V3. All entities will be created in the default domain.
-#OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
-
-# Set Console type:
-# valid options would be "AUTO"(default), "VNC", "SPICE", "RDP", "SERIAL" or None
-# Set to None explicitly if you want to deactivate the console.
-#CONSOLE_TYPE = "AUTO"
-
-# Default OpenStack Dashboard configuration.
-HORIZON_CONFIG = {
- 'user_home': 'openstack_dashboard.views.get_user_home',
- 'ajax_queue_limit': 10,
- 'auto_fade_alerts': {
- 'delay': 3000,
- 'fade_duration': 1500,
- 'types': ['alert-success', 'alert-info']
- },
- 'help_url': "http://docs.openstack.org",
- 'exceptions': {'recoverable': exceptions.RECOVERABLE,
- 'not_found': exceptions.NOT_FOUND,
- 'unauthorized': exceptions.UNAUTHORIZED},
- 'modal_backdrop': 'static',
- 'angular_modules': [],
- 'js_files': [],
- 'js_spec_files': [],
-}
-
-# Specify a regular expression to validate user passwords.
-#HORIZON_CONFIG["password_validator"] = {
-# "regex": '.*',
-# "help_text": _("Your password does not meet the requirements."),
-#}
-
-# Disable simplified floating IP address management for deployments with
-# multiple floating IP pools or complex network requirements.
-#HORIZON_CONFIG["simple_ip_management"] = False
-
-# Turn off browser autocompletion for forms including the login form and
-# the database creation workflow if so desired.
-#HORIZON_CONFIG["password_autocomplete"] = "off"
-
-# Setting this to True will disable the reveal button for password fields,
-# including on the login form.
-#HORIZON_CONFIG["disable_password_reveal"] = False
-
-
-# Set custom secret key:
-# You can either set it to a specific value or you can let horizon generate a
-# default secret key that is unique on this machine, e.i. regardless of the
-# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there
-# may be situations where you would want to set this explicitly, e.g. when
-# multiple dashboard instances are distributed on different machines (usually
-# behind a load-balancer). Either you have to make sure that a session gets all
-# requests routed to the same dashboard instance or you set the same SECRET_KEY
-# for all of them.
-LOCAL_PATH = '/tmp'
-SECRET_KEY='19fb045b203d1168d6d2'
-
-# We recommend you use memcached for development; otherwise after every reload
-# of the django development server, you will have to login again. To use
-# memcached set CACHES to something like
-#CACHES = {
-# 'default': {
-# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
-# 'LOCATION': '127.0.0.1:11211',
-# }
-#}
-
-CACHES = {
- 'default': {
- 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
- 'LOCATION': '127.0.0.1:11211',
- }
-}
-# Send email to the console by default
-EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
-# Or send them to /dev/null
-#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
-
-# Configure these for your outgoing email host
-#EMAIL_HOST = 'smtp.my-company.com'
-#EMAIL_PORT = 25
-#EMAIL_HOST_USER = 'djangomail'
-#EMAIL_HOST_PASSWORD = 'top-secret!'
-
-# For multiple regions uncomment this configuration, and add (endpoint, title).
-#AVAILABLE_REGIONS = [
-# ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
-# ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
-#]
-
-OPENSTACK_HOST = '{{ internal_vip.ip }}'
-OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
-OPENSTACK_KEYSTONE_DEFAULT_ROLE = "admin"
-
-# Enables keystone web single-sign-on if set to True.
-#WEBSSO_ENABLED = False
-
-# Determines which authentication choice to show as default.
-#WEBSSO_INITIAL_CHOICE = "credentials"
-
-# The list of authentication mechanisms
-# which include keystone federation protocols.
-# Current supported protocol IDs are 'saml2' and 'oidc'
-# which represent SAML 2.0, OpenID Connect respectively.
-# Do not remove the mandatory credentials mechanism.
-#WEBSSO_CHOICES = (
-# ("credentials", _("Keystone Credentials")),
-# ("oidc", _("OpenID Connect")),
-# ("saml2", _("Security Assertion Markup Language")))
-
-# Disable SSL certificate checks (useful for self-signed certificates):
-#OPENSTACK_SSL_NO_VERIFY = True
-
-# The CA certificate to use to verify SSL connections
-#OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'
-
-# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
-# capabilities of the auth backend for Keystone.
-# If Keystone has been configured to use LDAP as the auth backend then set
-# can_edit_user to False and name to 'ldap'.
-#
-# TODO(tres): Remove these once Keystone has an API to identify auth backend.
-OPENSTACK_KEYSTONE_BACKEND = {
- 'name': 'native',
- 'can_edit_user': True,
- 'can_edit_group': True,
- 'can_edit_project': True,
- 'can_edit_domain': True,
- 'can_edit_role': True,
-}
-
-# Setting this to True, will add a new "Retrieve Password" action on instance,
-# allowing Admin session password retrieval/decryption.
-#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False
-
-# The Launch Instance user experience has been significantly enhanced.
-# You can choose whether to enable the new launch instance experience,
-# the legacy experience, or both. The legacy experience will be removed
-# in a future release, but is available as a temporary backup setting to ensure
-# compatibility with existing deployments. Further development will not be
-# done on the legacy experience. Please report any problems with the new
-# experience via the Launchpad tracking system.
-#
-# Toggle LAUNCH_INSTANCE_LEGACY_ENABLED and LAUNCH_INSTANCE_NG_ENABLED to
-# determine the experience to enable. Set them both to true to enable
-# both.
-#LAUNCH_INSTANCE_LEGACY_ENABLED = True
-#LAUNCH_INSTANCE_NG_ENABLED = False
-
-# The Xen Hypervisor has the ability to set the mount point for volumes
-# attached to instances (other Hypervisors currently do not). Setting
-# can_set_mount_point to True will add the option to set the mount point
-# from the UI.
-OPENSTACK_HYPERVISOR_FEATURES = {
- 'can_set_mount_point': False,
- 'can_set_password': False,
-}
-
-# The OPENSTACK_CINDER_FEATURES settings can be used to enable optional
-# services provided by cinder that is not exposed by its extension API.
-OPENSTACK_CINDER_FEATURES = {
- 'enable_backup': False,
-}
-
-# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
-# services provided by neutron. Options currently available are load
-# balancer service, security groups, quotas, VPN service.
-OPENSTACK_NEUTRON_NETWORK = {
- 'enable_router': True,
- 'enable_quotas': True,
- 'enable_ipv6': True,
- 'enable_distributed_router': False,
- 'enable_ha_router': False,
- 'enable_lb': True,
- 'enable_firewall': True,
- 'enable_vpn': True,
-
- # The profile_support option is used to detect if an external router can be
- # configured via the dashboard. When using specific plugins the
- # profile_support can be turned on if needed.
- 'profile_support': None,
- #'profile_support': 'cisco',
-
- # Set which provider network types are supported. Only the network types
- # in this list will be available to choose from when creating a network.
- # Network types include local, flat, vlan, gre, and vxlan.
- 'supported_provider_types': ['*'],
-
- # Set which VNIC types are supported for port binding. Only the VNIC
- # types in this list will be available to choose from when creating a
- # port.
- # VNIC types include 'normal', 'macvtap' and 'direct'.
- 'supported_vnic_types': ['*']
-}
-
-# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
-# in the OpenStack Dashboard related to the Image service, such as the list
-# of supported image formats.
-#OPENSTACK_IMAGE_BACKEND = {
-# 'image_formats': [
-# ('', _('Select format')),
-# ('aki', _('AKI - Amazon Kernel Image')),
-# ('ami', _('AMI - Amazon Machine Image')),
-# ('ari', _('ARI - Amazon Ramdisk Image')),
-# ('iso', _('ISO - Optical Disk Image')),
-# ('ova', _('OVA - Open Virtual Appliance')),
-# ('qcow2', _('QCOW2 - QEMU Emulator')),
-# ('raw', _('Raw')),
-# ('vdi', _('VDI - Virtual Disk Image')),
-# ('vhd', ('VHD - Virtual Hard Disk')),
-# ('vmdk', _('VMDK - Virtual Machine Disk')),
-# ]
-#}
-
-# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for
-# image custom property attributes that appear on image detail pages.
-IMAGE_CUSTOM_PROPERTY_TITLES = {
- "architecture": _("Architecture"),
- "kernel_id": _("Kernel ID"),
- "ramdisk_id": _("Ramdisk ID"),
- "image_state": _("Euca2ools state"),
- "project_id": _("Project ID"),
- "image_type": _("Image Type"),
-}
-
-# The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image
-# custom properties should not be displayed in the Image Custom Properties
-# table.
-IMAGE_RESERVED_CUSTOM_PROPERTIES = []
-
-# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
-# in the Keystone service catalog. Use this setting when Horizon is running
-# external to the OpenStack environment. The default is 'publicURL'.
-#OPENSTACK_ENDPOINT_TYPE = "publicURL"
-
-# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the
-# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints
-# in the Keystone service catalog. Use this setting when Horizon is running
-# external to the OpenStack environment. The default is None. This
-# value should differ from OPENSTACK_ENDPOINT_TYPE if used.
-#SECONDARY_ENDPOINT_TYPE = "publicURL"
-
-# The number of objects (Swift containers/objects or images) to display
-# on a single page before providing a paging element (a "more" link)
-# to paginate results.
-API_RESULT_LIMIT = 1000
-API_RESULT_PAGE_SIZE = 20
-
-# The size of chunk in bytes for downloading objects from Swift
-SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024
-
-# Specify a maximum number of items to display in a dropdown.
-DROPDOWN_MAX_ITEMS = 30
-
-# The timezone of the server. This should correspond with the timezone
-# of your entire OpenStack installation, and hopefully be in UTC.
-TIME_ZONE = "UTC"
-
-# When launching an instance, the menu of available flavors is
-# sorted by RAM usage, ascending. If you would like a different sort order,
-# you can provide another flavor attribute as sorting key. Alternatively, you
-# can provide a custom callback method to use for sorting. You can also provide
-# a flag for reverse sort. For more info, see
-# http://docs.python.org/2/library/functions.html#sorted
-#CREATE_INSTANCE_FLAVOR_SORT = {
-# 'key': 'name',
-# # or
-# 'key': my_awesome_callback_method,
-# 'reverse': False,
-#}
-
-# Set this to True to display an 'Admin Password' field on the Change Password
-# form to verify that it is indeed the admin logged-in who wants to change
-# the password.
-# ENFORCE_PASSWORD_CHECK = False
-
-# Modules that provide /auth routes that can be used to handle different types
-# of user authentication. Add auth plugins that require extra route handling to
-# this list.
-#AUTHENTICATION_URLS = [
-# 'openstack_auth.urls',
-#]
-
-# The Horizon Policy Enforcement engine uses these values to load per service
-# policy rule files. The content of these files should match the files the
-# OpenStack services are using to determine role based access control in the
-# target installation.
-
-# Path to directory containing policy.json files
-# POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
-POLICY_FILES_PATH = '/etc/openstack-dashboard'
-# Map of local copy of service policy files
-#POLICY_FILES = {
-# 'identity': 'keystone_policy.json',
-# 'compute': 'nova_policy.json',
-# 'volume': 'cinder_policy.json',
-# 'image': 'glance_policy.json',
-# 'orchestration': 'heat_policy.json',
-# 'network': 'neutron_policy.json',
-# 'telemetry': 'ceilometer_policy.json',
-#}
-
-# Trove user and database extension support. By default support for
-# creating users and databases on database instances is turned on.
-# To disable these extensions set the permission here to something
-# unusable such as ["!"].
-# TROVE_ADD_USER_PERMS = []
-# TROVE_ADD_DATABASE_PERMS = []
-
-# Change this patch to the appropriate static directory containing
-# two files: _variables.scss and _styles.scss
-#CUSTOM_THEME_PATH = 'static/themes/default'
-
-LOGGING = {
- 'version': 1,
- # When set to True this will disable all logging except
- # for loggers specified in this configuration dictionary. Note that
- # if nothing is specified here and disable_existing_loggers is True,
- # django.db.backends will still log unless it is disabled explicitly.
- 'disable_existing_loggers': False,
- 'handlers': {
- 'null': {
- 'level': 'DEBUG',
- 'class': 'django.utils.log.NullHandler',
- },
- 'console': {
- # Set the level to "DEBUG" for verbose output logging.
- 'level': 'INFO',
- 'class': 'logging.StreamHandler',
- },
- },
- 'loggers': {
- # Logging from django.db.backends is VERY verbose, send to null
- # by default.
- 'django.db.backends': {
- 'handlers': ['null'],
- 'propagate': False,
- },
- 'requests': {
- 'handlers': ['null'],
- 'propagate': False,
- },
- 'horizon': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'openstack_dashboard': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'novaclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'cinderclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'keystoneclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'glanceclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'neutronclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'heatclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'ceilometerclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'troveclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'swiftclient': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'openstack_auth': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'nose.plugins.manager': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'django': {
- 'handlers': ['console'],
- 'level': 'DEBUG',
- 'propagate': False,
- },
- 'iso8601': {
- 'handlers': ['null'],
- 'propagate': False,
- },
- 'scss': {
- 'handlers': ['null'],
- 'propagate': False,
- },
- }
-}
-
-# 'direction' should not be specified for all_tcp/udp/icmp.
-# It is specified in the form.
-SECURITY_GROUP_RULES = {
- 'all_tcp': {
- 'name': _('All TCP'),
- 'ip_protocol': 'tcp',
- 'from_port': '1',
- 'to_port': '65535',
- },
- 'all_udp': {
- 'name': _('All UDP'),
- 'ip_protocol': 'udp',
- 'from_port': '1',
- 'to_port': '65535',
- },
- 'all_icmp': {
- 'name': _('All ICMP'),
- 'ip_protocol': 'icmp',
- 'from_port': '-1',
- 'to_port': '-1',
- },
- 'ssh': {
- 'name': 'SSH',
- 'ip_protocol': 'tcp',
- 'from_port': '22',
- 'to_port': '22',
- },
- 'smtp': {
- 'name': 'SMTP',
- 'ip_protocol': 'tcp',
- 'from_port': '25',
- 'to_port': '25',
- },
- 'dns': {
- 'name': 'DNS',
- 'ip_protocol': 'tcp',
- 'from_port': '53',
- 'to_port': '53',
- },
- 'http': {
- 'name': 'HTTP',
- 'ip_protocol': 'tcp',
- 'from_port': '80',
- 'to_port': '80',
- },
- 'pop3': {
- 'name': 'POP3',
- 'ip_protocol': 'tcp',
- 'from_port': '110',
- 'to_port': '110',
- },
- 'imap': {
- 'name': 'IMAP',
- 'ip_protocol': 'tcp',
- 'from_port': '143',
- 'to_port': '143',
- },
- 'ldap': {
- 'name': 'LDAP',
- 'ip_protocol': 'tcp',
- 'from_port': '389',
- 'to_port': '389',
- },
- 'https': {
- 'name': 'HTTPS',
- 'ip_protocol': 'tcp',
- 'from_port': '443',
- 'to_port': '443',
- },
- 'smtps': {
- 'name': 'SMTPS',
- 'ip_protocol': 'tcp',
- 'from_port': '465',
- 'to_port': '465',
- },
- 'imaps': {
- 'name': 'IMAPS',
- 'ip_protocol': 'tcp',
- 'from_port': '993',
- 'to_port': '993',
- },
- 'pop3s': {
- 'name': 'POP3S',
- 'ip_protocol': 'tcp',
- 'from_port': '995',
- 'to_port': '995',
- },
- 'ms_sql': {
- 'name': 'MS SQL',
- 'ip_protocol': 'tcp',
- 'from_port': '1433',
- 'to_port': '1433',
- },
- 'mysql': {
- 'name': 'MYSQL',
- 'ip_protocol': 'tcp',
- 'from_port': '3306',
- 'to_port': '3306',
- },
- 'rdp': {
- 'name': 'RDP',
- 'ip_protocol': 'tcp',
- 'from_port': '3389',
- 'to_port': '3389',
- },
-}
-
-# Deprecation Notice:
-#
-# The setting FLAVOR_EXTRA_KEYS has been deprecated.
-# Please load extra spec metadata into the Glance Metadata Definition Catalog.
-#
-# The sample quota definitions can be found in:
-# <glance_source>/etc/metadefs/compute-quota.json
-#
-# The metadata definition catalog supports CLI and API:
-# $glance --os-image-api-version 2 help md-namespace-import
-# $glance-manage db_load_metadefs <directory_with_definition_files>
-#
-# See Metadata Definitions on: http://docs.openstack.org/developer/glance/
-
-# Indicate to the Sahara data processing service whether or not
-# automatic floating IP allocation is in effect. If it is not
-# in effect, the user will be prompted to choose a floating IP
-# pool for use in their cluster. False by default. You would want
-# to set this to True if you were running Nova Networking with
-# auto_assign_floating_ip = True.
-#SAHARA_AUTO_IP_ALLOCATION_ENABLED = False
-
-# The hash algorithm to use for authentication tokens. This must
-# match the hash algorithm that the identity server and the
-# auth_token middleware are using. Allowed values are the
-# algorithms supported by Python's hashlib library.
-#OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5'
-
-# Hashing tokens from Keystone keeps the Horizon session data smaller, but it
-# doesn't work in some cases when using PKI tokens. Uncomment this value and
-# set it to False if using PKI tokens and there are 401 errors due to token
-# hashing.
-#OPENSTACK_TOKEN_HASH_ENABLED = True
-
-# AngularJS requires some settings to be made available to
-# the client side. Some settings are required by in-tree / built-in horizon
-# features. These settings must be added to REST_API_REQUIRED_SETTINGS in the
-# form of ['SETTING_1','SETTING_2'], etc.
-#
-# You may remove settings from this list for security purposes, but do so at
-# the risk of breaking a built-in horizon feature. These settings are required
-# for horizon to function properly. Only remove them if you know what you
-# are doing. These settings may in the future be moved to be defined within
-# the enabled panel configuration.
-# You should not add settings to this list for out of tree extensions.
-# See: https://wiki.openstack.org/wiki/Horizon/RESTAPI
-REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES']
-
-# Additional settings can be made available to the client side for
-# extensibility by specifying them in REST_API_ADDITIONAL_SETTINGS
-# !! Please use extreme caution as the settings are transferred via HTTP/S
-# and are not encrypted on the browser. This is an experimental API and
-# may be deprecated in the future without notice.
-#REST_API_ADDITIONAL_SETTINGS = []
diff --git a/deploy/adapters/ansible/roles/dashboard/vars/RedHat.yml b/deploy/adapters/ansible/roles/dashboard/vars/RedHat.yml
index 5e84901c..481757bf 100644
--- a/deploy/adapters/ansible/roles/dashboard/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/dashboard/vars/RedHat.yml
@@ -6,7 +6,6 @@ http_service: httpd
packages:
- mod_wsgi
- - python-memcached
services: []
diff --git a/deploy/adapters/ansible/roles/dashboard/vars/main.yml b/deploy/adapters/ansible/roles/dashboard/vars/main.yml
index 39843a3e..fe9d5223 100644
--- a/deploy/adapters/ansible/roles/dashboard/vars/main.yml
+++ b/deploy/adapters/ansible/roles/dashboard/vars/main.yml
@@ -1,7 +1,5 @@
---
packages_noarch:
- - memcached
- openstack-dashboard
-services_noarch:
- - memcached
+services_noarch: []
diff --git a/deploy/adapters/ansible/roles/database/tasks/main.yml b/deploy/adapters/ansible/roles/database/tasks/main.yml
index ad5c5b71..969ee4d6 100644
--- a/deploy/adapters/ansible/roles/database/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/database/tasks/main.yml
@@ -1,11 +1,16 @@
---
- include_vars: "{{ ansible_os_family }}.yml"
-- include: mysql.yml
- when: haproxy_hosts is not defined
+- include: mariadb_install.yml
+- include: mariadb_cluster.yml
-- include: mariadb.yml
- when: haproxy_hosts is defined
+- include: mariadb_config.yml
+ when:
+ - inventory_hostname == haproxy_hosts.keys()[0]
-- include: mongodb.yml
+- include: mongodb_install.yml
+- include: mongodb_config.yml
+ when:
+ - inventory_hostname == haproxy_hosts.keys()[0]
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/database/tasks/mariadb.yml b/deploy/adapters/ansible/roles/database/tasks/mariadb.yml
deleted file mode 100644
index 9a9ebda5..00000000
--- a/deploy/adapters/ansible/roles/database/tasks/mariadb.yml
+++ /dev/null
@@ -1,79 +0,0 @@
----
-#- name: delete default maridb-libs
-# action: "{{ ansible_pkg_mgr }} name=mariadb-libs state=absent"
-# when: ansible_os_family == "RedHat"
-# ignore_errors: True
-
-- name: install python-mysqldb
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: maridb_packages | union(packages_noarch)
-
-- name: create mysql log directy
- file: path=/var/log/mysql state=directory owner=mysql group=mysql mode=0755
-
-- name: update mariadb config file
- template: src={{ item }} dest={{ mysql_config_file_path }}/{{ item }} backup=yes
- with_items: mysql_config_file_name
-
-- name: update galera wsrep.cnf
- template: src=wsrep.cnf dest={{ wsrep_config_file_path }}/wsrep.cnf backup=yes
-
-- name: update wsrep_sst_rsync uid
- lineinfile: dest=/usr/bin/wsrep_sst_rsync state=absent regexp="\s*uid = \$MYUID$" backup=yes
-
-- name: update wsrep_sst_rsync gid
- lineinfile: dest=/usr/bin/wsrep_sst_rsync state=absent regexp="\s*gid = \$MYGID$" backup=yes
-
-- stat: path=/opt/mysql_init_complete
- register: mysql_init_complete
-
-- name: restart first mysql server
- shell: service mysql restart --wsrep-cluster-address="gcomm://" && touch /opt/mysql_init_complete
- when: inventory_hostname == haproxy_hosts.keys()[0] and mysql_init_complete.stat.exists == False
- tags:
- - mysql_restart
- #register: result
- #until: result|success
- #retries: 5
- #delay: 5
-
-- name: restart other mysql server
- shell: service mysql restart && touch /opt/mysql_init_complete
- tags:
- - mysql_restart
- when: inventory_hostname != haproxy_hosts.keys()[0] and mysql_init_complete.stat.exists == False
- #register: result
- #until: result|success
- #retries: 5
- #delay: 5
-
-- name: generate mysql service list
- shell: echo {{ item }} >> /opt/service
- with_items: services_noarch
-
-- name: create all needed db
- run_once: yes
- mysql_db: name={{ item.db }} state=present
- with_items: "{{ credentials }}"
-
-- name: create service db user
- run_once: yes
- mysql_user:
- name={{ item[0].user }}
- password={{ item[0].password }}
- priv=*.*:ALL,GRANT
- host={{ item[1] }}
- state=present
- with_nested:
- - "{{ credentials }}"
- - ['%', 'localhost', inventory_hostname]
-
-- name: create wsrep db user
- run_once: yes
- mysql_user:
- name={{ WSREP_SST_USER }}
- password={{ WSREP_SST_PASS }}
- priv=*.*:ALL,GRANT
- host={{ item }}
- state=present
- with_items: ['%', 'localhost', inventory_hostname]
diff --git a/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster.yml b/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster.yml
new file mode 100644
index 00000000..69c8997a
--- /dev/null
+++ b/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster.yml
@@ -0,0 +1,3 @@
+---
+- include: mariadb_cluster_debian.yml
+ when: ansible_os_family == "Debian"
diff --git a/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_debian.yml b/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_debian.yml
new file mode 100644
index 00000000..e0afcaec
--- /dev/null
+++ b/deploy/adapters/ansible/roles/database/tasks/mariadb_cluster_debian.yml
@@ -0,0 +1,33 @@
+---
+- name: start first node to create new cluster
+ service:
+ name: mysql
+ state: restarted
+ enabled: yes
+ args: "--wsrep-new-cluster"
+ when:
+ - inventory_hostname == haproxy_hosts.keys()[0]
+
+- name: wait for cluster ready
+ command: mysql --silent --skip-column-names -e 'SHOW STATUS LIKE "wsrep_evs_state"'
+ register: cluster_status
+ until: cluster_status|success
+ failed_when: not cluster_status.stdout | search("OPERATIONAL")
+ retries: 10
+ delay: 3
+ when:
+ - inventory_hostname == haproxy_hosts.keys()[0]
+
+- name: restart other nodes and join cluster
+ service:
+ name: mysql
+ state: restarted
+ enabled: yes
+ when:
+ - inventory_hostname != haproxy_hosts.keys()[0]
+
+- name: restart first nodes
+ service:
+ name: mysql
+ state: restarted
+ when: inventory_hostname == haproxy_hosts.keys()[0] and haproxy_hosts|length > 1
diff --git a/deploy/adapters/ansible/roles/database/tasks/mariadb_config.yml b/deploy/adapters/ansible/roles/database/tasks/mariadb_config.yml
new file mode 100644
index 00000000..16783ed4
--- /dev/null
+++ b/deploy/adapters/ansible/roles/database/tasks/mariadb_config.yml
@@ -0,0 +1,24 @@
+---
+- name: create all needed db
+ mysql_db: name={{ item.db }} state=present
+ with_items: "{{ credentials }}"
+
+- name: create service db user
+ mysql_user:
+ name={{ item[0].user }}
+ password={{ item[0].password }}
+ priv=*.*:ALL,GRANT
+ host={{ item[1] }}
+ state=present
+ with_nested:
+ - "{{ credentials }}"
+ - ['%', 'localhost', inventory_hostname]
+
+- name: create wsrep db user
+ mysql_user:
+ name={{ WSREP_SST_USER }}
+ password={{ WSREP_SST_PASS }}
+ priv=*.*:ALL,GRANT
+ host={{ item }}
+ state=present
+ with_items: ['%', 'localhost', inventory_hostname]
diff --git a/deploy/adapters/ansible/roles/database/tasks/mariadb_install.yml b/deploy/adapters/ansible/roles/database/tasks/mariadb_install.yml
new file mode 100644
index 00000000..97e00503
--- /dev/null
+++ b/deploy/adapters/ansible/roles/database/tasks/mariadb_install.yml
@@ -0,0 +1,49 @@
+---
+- name: disable auto start
+ copy:
+ content: "#!/bin/sh\nexit 101"
+ dest: "/usr/sbin/policy-rc.d"
+ mode: 0755
+ when: ansible_os_family == "Debian"
+
+- name: install python-mysqldb
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+ with_items: maridb_packages | union(packages_noarch)
+
+- name: change open file limit
+ copy:
+ content: "* - nofile 65536 }}"
+ dest: "/etc/security/limits.conf"
+ mode: 0755
+
+- name: update mariadb config file
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ backup: yes
+ mode: 0644
+ with_items: mysql_config
+
+- name: bugfix for rsync version 3.1
+ lineinfile:
+ dest: /usr/bin/wsrep_sst_rsync
+ state: absent
+ regexp: '{{ item }}'
+ with_items:
+ - "\\s*uid = \\$MYUID$"
+ - "\\s*gid = \\$MYGID$"
+
+- stat: path=/opt/mysql_init_complete
+ register: mysql_init_complete
+
+- name: enable auto start
+ file:
+ path=/usr/sbin/policy-rc.d
+ state=absent
+ when: ansible_os_family == "Debian"
+
+- name: rm old datafiles
+ shell: rm -rf /var/lib/mysql/ib_logfile*
+
+- name: set owner
+ file: path=/var/lib/mysql owner=mysql group=mysql recurse=yes state=directory
diff --git a/deploy/adapters/ansible/roles/database/tasks/mongodb_config.yml b/deploy/adapters/ansible/roles/database/tasks/mongodb_config.yml
new file mode 100755
index 00000000..e3a68c55
--- /dev/null
+++ b/deploy/adapters/ansible/roles/database/tasks/mongodb_config.yml
@@ -0,0 +1,47 @@
+---
+- name: copy mongo js
+ template:
+ src: '{{ item.src }}'
+ dest: '{{ item.dest }}'
+ with_items:
+ - src: replica.js
+ dest: /opt/replica.js
+
+- name: init replica servers
+ shell: mongo compass /opt/replica.js
+
+- name: wait replica servers are ready
+ shell: mongo compass --eval 'printjson(rs.status())'|grep -E 'PRIMARY|SECONDARY'|wc -l
+ register: servers
+ until: servers.stdout|int == {{ haproxy_hosts|length }}
+ retries: 60
+ delay: 10
+
+- debug: msg='{{ servers.stdout |int }}'
+
+- name: wait replica servers are ready
+ shell: mongo compass --eval 'printjson(rs.status())'|grep -E 'PRIMARY'|wc -l
+ register: servers
+ until: servers.stdout|int == 1
+ retries: 60
+ delay: 10
+
+- debug: msg='{{ servers.stdout |int }}'
+
+- name: create mongodb user and db
+ mongodb_user:
+ login_host: "{{ internal_ip }}"
+ database: ceilometer
+ name: ceilometer
+ password: "{{ CEILOMETER_DBPASS }}"
+ roles: 'readWrite,dbAdmin'
+ state: present
+
+- name: grant user privilege
+ mongodb_user:
+ login_host: "{{ internal_ip }}"
+ database: ceilometer
+ name: ceilometer
+ password: "{{ CEILOMETER_DBPASS }}"
+ roles: 'readWrite,dbAdmin'
+ state: present
diff --git a/deploy/adapters/ansible/roles/database/tasks/mongodb.yml b/deploy/adapters/ansible/roles/database/tasks/mongodb_install.yml
index c46a2932..1e7988d2 100644..100755
--- a/deploy/adapters/ansible/roles/database/tasks/mongodb.yml
+++ b/deploy/adapters/ansible/roles/database/tasks/mongodb_install.yml
@@ -21,16 +21,10 @@
- /var/lib/mongodb/journal/*
- name: manually restart mongodb server
- service: name={{ mongodb_serveice }} state=restarted
+ service: name={{ mongodb_service }} state=restarted enabled=yes
-- wait_for: port=27017 delay=3 timeout=60 host={{ internal_vip.ip }}
+- name: write mongodb to monitor list
+ lineinfile: dest=/opt/service create=yes line={{ mongodb_service}}
-- name: create mongodb user
- run_once: True
- mongodb_user:
- login_host: "{{ internal_vip.ip }}"
- database: ceilometer
- name: ceilometer
- password: "{{ CEILOMETER_DBPASS }}"
- roles: 'readWrite,dbAdmin'
- state: present
+- name: wait for mongod ready
+ wait_for: host=0.0.0.0 port=27017 delay=10
diff --git a/deploy/adapters/ansible/roles/database/tasks/mysql.yml b/deploy/adapters/ansible/roles/database/tasks/mysql.yml
deleted file mode 100644
index 9e272d1b..00000000
--- a/deploy/adapters/ansible/roles/database/tasks/mysql.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- name: install mysql client and server packages
- apt: name={{ item }} state=present
- with_items: mysql_packages
-
-- name: create mysql log directy
- file: path=/var/log/mysql state=directory owner=mysql group=mysql mode=0755
-
-- name: update mysql config file
- copy: src={{ item }} dest={{ mysql_config_file_path }}/{{ item }} backup=yes
- with_items: mysql_config_file_name
-
-- name: manually restart mysql server
- shell: service mysql restart
-
-- name: create database/user
- shell: /opt/data.sh
- tags:
- - mysql_user
diff --git a/deploy/adapters/ansible/roles/database/templates/mongodb.conf b/deploy/adapters/ansible/roles/database/templates/mongodb.conf
index 68b84d7a..18306154 100644
--- a/deploy/adapters/ansible/roles/database/templates/mongodb.conf
+++ b/deploy/adapters/ansible/roles/database/templates/mongodb.conf
@@ -1,4 +1,5 @@
# mongodb.conf
-bind_ip = {{ internal_vip.ip }}
+bind_ip = 0.0.0.0
+replSet = compass
port = 27017
smallfiles = true
diff --git a/deploy/adapters/ansible/roles/database/templates/my-huge.cnf b/deploy/adapters/ansible/roles/database/templates/my-huge.cnf
deleted file mode 100644
index c561858c..00000000
--- a/deploy/adapters/ansible/roles/database/templates/my-huge.cnf
+++ /dev/null
@@ -1,151 +0,0 @@
-# Example MariaDB config file for very large systems.
-#
-# This is for a large system with memory of 1G-2G where the system runs mainly
-# MariaDB.
-#
-# MariaDB programs look for option files in a set of
-# locations which depend on the deployment platform.
-# You can copy this option file to one of those
-# locations. For information about these locations, do:
-# 'my_print_defaults --help' and see what is printed under
-# Default options are read from the following files in the given order:
-# More information at: http://dev.mysql.com/doc/mysql/en/option-files.html
-#
-# In this file, you can use all long options that a program supports.
-# If you want to know which options a program supports, run the program
-# with the "--help" option.
-
-# The following options will be passed to all MySQL clients
-[client]
-#password = your_password
-port = 3306
-socket = /var/lib/mysql/mysql.sock
-
-# Here follows entries for some specific programs
-
-# The MySQL server
-[mysqld]
-port = 3306
-socket = /var/lib/mysql/mysql.sock
-skip-external-locking
-key_buffer_size = 384M
-max_allowed_packet = 1M
-table_open_cache = 512
-sort_buffer_size = 2M
-read_buffer_size = 2M
-read_rnd_buffer_size = 8M
-myisam_sort_buffer_size = 64M
-thread_cache_size = 8
-query_cache_size = 32M
-# Try number of CPU's*2 for thread_concurrency
-thread_concurrency = 8
-
-# Point the following paths to a dedicated disk
-#tmpdir = /tmp/
-
-# Don't listen on a TCP/IP port at all. This can be a security enhancement,
-# if all processes that need to connect to mysqld run on the same host.
-# All interaction with mysqld must be made via Unix sockets or named pipes.
-# Note that using this option without enabling named pipes on Windows
-# (via the "enable-named-pipe" option) will render mysqld useless!
-#
-#skip-networking
-
-# Replication Master Server (default)
-# binary logging is required for replication
-log-bin=mysql-bin
-
-# required unique id between 1 and 2^32 - 1
-# defaults to 1 if master-host is not set
-# but will not function as a master if omitted
-server-id = 1
-
-# Replication Slave (comment out master section to use this)
-#
-# To configure this host as a replication slave, you can choose between
-# two methods :
-#
-# 1) Use the CHANGE MASTER TO command (fully described in our manual) -
-# the syntax is:
-#
-# CHANGE MASTER TO MASTER_HOST=<host>, MASTER_PORT=<port>,
-# MASTER_USER=<user>, MASTER_PASSWORD=<password> ;
-#
-# where you replace <host>, <user>, <password> by quoted strings and
-# <port> by the master's port number (3306 by default).
-#
-# Example:
-#
-# CHANGE MASTER TO MASTER_HOST='125.564.12.1', MASTER_PORT=3306,
-# MASTER_USER='joe', MASTER_PASSWORD='secret';
-#
-# OR
-#
-# 2) Set the variables below. However, in case you choose this method, then
-# start replication for the first time (even unsuccessfully, for example
-# if you mistyped the password in master-password and the slave fails to
-# connect), the slave will create a master.info file, and any later
-# change in this file to the variables' values below will be ignored and
-# overridden by the content of the master.info file, unless you shutdown
-# the slave server, delete master.info and restart the slaver server.
-# For that reason, you may want to leave the lines below untouched
-# (commented) and instead use CHANGE MASTER TO (see above)
-#
-# required unique id between 2 and 2^32 - 1
-# (and different from the master)
-# defaults to 2 if master-host is set
-# but will not function as a slave if omitted
-#server-id = 2
-#
-# The replication master for this slave - required
-#master-host = <hostname>
-#
-# The username the slave will use for authentication when connecting
-# to the master - required
-#master-user = <username>
-#
-# The password the slave will authenticate with when connecting to
-# the master - required
-#master-password = <password>
-#
-# The port the master is listening on.
-# optional - defaults to 3306
-#master-port = <port>
-#
-# binary logging - not required for slaves, but recommended
-#log-bin=mysql-bin
-#
-# binary logging format - mixed recommended
-#binlog_format=mixed
-
-# Uncomment the following if you are using InnoDB tables
-#innodb_data_home_dir = /var/lib/mysql
-#innodb_data_file_path = ibdata1:2000M;ibdata2:10M:autoextend
-#innodb_log_group_home_dir = /var/lib/mysql
-# You can set .._buffer_pool_size up to 50 - 80 %
-# of RAM but beware of setting memory usage too high
-#innodb_buffer_pool_size = 384M
-#innodb_additional_mem_pool_size = 20M
-# Set .._log_file_size to 25 % of buffer pool size
-#innodb_log_file_size = 100M
-#innodb_log_buffer_size = 8M
-#innodb_flush_log_at_trx_commit = 1
-#innodb_lock_wait_timeout = 50
-
-[mysqldump]
-quick
-max_allowed_packet = 16M
-
-[mysql]
-no-auto-rehash
-# Remove the next comment character if you are not familiar with SQL
-#safe-updates
-
-[myisamchk]
-key_buffer_size = 256M
-sort_buffer_size = 256M
-read_buffer = 2M
-write_buffer = 2M
-
-[mysqlhotcopy]
-interactive-timeout
diff --git a/deploy/adapters/ansible/roles/database/templates/my.cnf b/deploy/adapters/ansible/roles/database/templates/my.cnf
index 2023185d..b8016849 100644
--- a/deploy/adapters/ansible/roles/database/templates/my.cnf
+++ b/deploy/adapters/ansible/roles/database/templates/my.cnf
@@ -1,133 +1,59 @@
-#
-# The MySQL database server configuration file.
-#
-# You can copy this to one of:
-# - "/etc/mysql/my.cnf" to set global options,
-# - "~/.my.cnf" to set user-specific options.
-#
-# One can use all long options that the program supports.
-# Run program with --help to get a list of available options and with
-# --print-defaults to see which it would actually understand and use.
-#
-# For explanations see
-# http://dev.mysql.com/doc/mysql/en/server-system-variables.html
-
-# This will be passed to all mysql clients
-# It has been reported that passwords should be enclosed with ticks/quotes
-# escpecially if they contain "#" chars...
-# Remember to edit /etc/mysql/debian.cnf when changing the socket location.
[client]
-port = 3306
-socket = /var/run/mysqld/mysqld.sock
+port = 3306
+socket = /var/run/mysqld/mysqld.sock
-# Here is entries for some specific programs
-# The following values assume you have at least 32M ram
-# This was formally known as [safe_mysqld]. Both versions are currently parsed.
[mysqld_safe]
-socket = /var/run/mysqld/mysqld.sock
-nice = 0
+socket = /var/run/mysqld/mysqld.sock
+nice = 0
+
+
+[mysql]
+default-character-set = utf8
[mysqld]
-#
-# * Basic Settings
-#
-user = mysql
-pid-file = /var/run/mysqld/mysqld.pid
-socket = /var/run/mysqld/mysqld.sock
-port = 3306
-basedir = /usr
-datadir = /var/lib/mysql
-tmpdir = /tmp
-lc-messages-dir = /usr/share/mysql
-skip-external-locking
-skip-name-resolve
-#
-# Instead of skip-networking the default is now to listen only on
-# localhost which is more compatible and is not less secure.
-bind-address = {{ internal_vip.ip }}
-#
-# * Fine Tuning
-#
-key_buffer = 16M
-max_allowed_packet = 16M
-thread_stack = 192K
-thread_cache_size = 8
-# This replaces the startup script and checks MyISAM tables if needed
-# the first time they are touched
-myisam-recover = BACKUP
-max_connections = 2000
-max_connect_errors = 8000
-#table_cache = 64
-#thread_concurrency = 10
-#
-# * Query Cache Configuration
-#
-query_cache_limit = 1M
-query_cache_size = 16M
-#
-# * Logging and Replication
-#
-# Both location gets rotated by the cronjob.
-# Be aware that this log type is a performance killer.
-# As of 5.1 you can enable the log at runtime!
-general_log_file = /var/log/mysql/mysql.log
-#general_log = 1
-#
-# Error log - should be very few entries.
-#
-log_error = /var/log/mysql/error.log
-#
-# Here you can see queries with especially long duration
-#log_slow_queries = /var/log/mysql/mysql-slow.log
-#long_query_time = 2
-#log-queries-not-using-indexes
-#
-# The following can be used as easy to replay backup logs or for replication.
-# note: if you are setting up a replication slave, see README.Debian about
-# other settings you may need to change.
-#server-id = 1
-#log_bin = /var/log/mysql/mysql-bin.log
-expire_logs_days = 10
-max_binlog_size = 100M
-#binlog_do_db = include_database_name
-#binlog_ignore_db = include_database_name
-#
-# * InnoDB
-#
-# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/.
-# Read the manual for more InnoDB related options. There are many!
-#
-# * Security Features
-#
-# Read the manual, too, if you want chroot!
-# chroot = /var/lib/mysql/
-#
-# For generating SSL certificates I recommend the OpenSSL GUI "tinyca".
-#
-# ssl-ca=/etc/mysql/cacert.pem
-# ssl-cert=/etc/mysql/server-cert.pem
-# ssl-key=/etc/mysql/server-key.pem
-default-storage-engine = innodb
-innodb_file_per_table
-collation-server = utf8_general_ci
+user = mysql
+collation-server = utf8_unicode_ci
init-connect = 'SET NAMES utf8'
character-set-server = utf8
+datadir = /var/lib/mysql
+bind-address = {{ internal_ip }}
+
+max-allowed-packet = 16M
+max-connect-errors = 1000000
+
+max_connections = {{ ansible_processor_vcpus * 100 }}
+
+wait_timeout = 28800
+tmp-table-size = 32M
+max-heap-table-size = 32M
+query-cache-type = 0
+query-cache-size = 0M
+thread-cache-size = 50
+open-files-limit = 65535
+table-definition-cache = 4096
+table-open-cache = 10240
+
+innodb-flush-method = O_DIRECT
+innodb-additional-mem-pool-size = 24M
+innodb-log-file-size = 1024M
+innodb-file-per-table = 1
+innodb-buffer-pool-size = 4096M
+
+innodb-read-io-threads = 4
+innodb-write-io-threads = 4
+innodb-doublewrite = 1
+innodb-log-buffer-size = 1024M
+innodb-buffer-pool-instances = 8
+innodb-log-files-in-group = 2
+innodb-thread-concurrency = {{ ansible_processor_vcpus * 2 }}
+
+innodb_stats_on_metadata = 0
[mysqldump]
quick
quote-names
-max_allowed_packet = 16M
+max_allowed_packet = 16M
-[mysql]
-#no-auto-rehash # faster start of mysql but no tab completition
-
-[isamchk]
-key_buffer = 16M
-#
-# * IMPORTANT: Additional settings that can override those from this file!
-# The files must end with '.cnf', otherwise they'll be ignored.
-#
!includedir /etc/mysql/conf.d/
-
diff --git a/deploy/adapters/ansible/roles/database/templates/replica.js b/deploy/adapters/ansible/roles/database/templates/replica.js
new file mode 100644
index 00000000..d19db50d
--- /dev/null
+++ b/deploy/adapters/ansible/roles/database/templates/replica.js
@@ -0,0 +1,8 @@
+config = { _id:"compass", members:[
+{% for host in haproxy_hosts.values() %}
+{% set pair = '%s:27017' % host %}
+ {_id:{{ loop.index0 }},host:"{{ pair }}",priority:{{ haproxy_hosts|length - loop.index0 }}},
+{% endfor %}
+]
+};
+rs.initiate(config);
diff --git a/deploy/adapters/ansible/roles/database/templates/server.cnf b/deploy/adapters/ansible/roles/database/templates/server.cnf
deleted file mode 100644
index 57441ddf..00000000
--- a/deploy/adapters/ansible/roles/database/templates/server.cnf
+++ /dev/null
@@ -1,47 +0,0 @@
-#
-# These groups are read by MariaDB server.
-# Use it for options that only the server (but not clients) should see
-#
-# See the examples of server my.cnf files in /usr/share/mysql/
-#
-
-# this is read by the standalone daemon and embedded servers
-[server]
-
-# this is only for the mysqld standalone daemon
-[mysqld]
-log_error = /var/log/mysql/error.log
-max_connections = 2000
-max_connect_errors = 8000
-skip-host-cache
-skip-name-resolve
-bind-address = {{ internal_vip.ip }}
-#
-# * Galera-related settings
-#
-[galera]
-# Mandatory settings
-#wsrep_provider=
-#wsrep_cluster_address=
-#binlog_format=row
-#default_storage_engine=InnoDB
-#innodb_autoinc_lock_mode=2
-#bind-address=0.0.0.0
-#
-# Optional setting
-#wsrep_slave_threads=1
-#innodb_flush_log_at_trx_commit=0
-
-# this is only for embedded server
-[embedded]
-
-# This group is only read by MariaDB servers, not by MySQL.
-# If you use the same .cnf file for MySQL and MariaDB,
-# you can put MariaDB-only options here
-[mariadb]
-
-# This group is only read by MariaDB-10.0 servers.
-# If you use the same .cnf file for MariaDB of different versions,
-# use this group for options that older servers don't understand
-[mariadb-10.0]
-
diff --git a/deploy/adapters/ansible/roles/database/templates/wsrep.cnf b/deploy/adapters/ansible/roles/database/templates/wsrep.cnf
index 197640c9..4dc6fb7b 100644
--- a/deploy/adapters/ansible/roles/database/templates/wsrep.cnf
+++ b/deploy/adapters/ansible/roles/database/templates/wsrep.cnf
@@ -1,37 +1,13 @@
-# This file contains wsrep-related mysqld options. It should be included
-# in the main MySQL configuration file.
-#
-# Options that need to be customized:
-# - wsrep_provider
-# - wsrep_cluster_address
-# - wsrep_sst_auth
-# The rest of defaults should work out of the box.
-
-##
-## mysqld options _MANDATORY_ for correct opration of the cluster
-##
[mysqld]
-
-# (This must be substituted by wsrep_format)
binlog_format=ROW
-
-# Currently only InnoDB storage engine is supported
-default-storage-engine=innodb
-
-# to avoid issues with 'bulk mode inserts' using autoinc
innodb_autoinc_lock_mode=2
-
-# This is a must for paralell applying
innodb_locks_unsafe_for_binlog=1
# Query Cache is not supported with wsrep
query_cache_size=0
query_cache_type=0
-# Override bind-address
-# In some systems bind-address defaults to 127.0.0.1, and with mysqldump SST
-# it will have (most likely) disastrous consequences on donor node
-bind-address={{ internal_vip.ip }}
+default_storage_engine = InnoDB
##
## WSREP options
@@ -41,13 +17,17 @@ bind-address={{ internal_vip.ip }}
wsrep_provider={{ wsrep_provider_file }}
# Provider specific configuration options
-#wsrep_provider_options=
+wsrep_provider_options="gcache.size=1024M"
# Logical cluster name. Should be the same for all nodes.
wsrep_cluster_name="my_wsrep_cluster"
# Group communication system handle
+{% if haproxy_hosts|length == 1 %}
+wsrep_cluster_address=gcomm://
+{% else %}
wsrep_cluster_address=gcomm://{{ haproxy_hosts.values()|join(",") }}
+{% endif %}
# Human-readable node name (non-unique). Hostname by default.
#wsrep_node_name=
@@ -61,7 +41,7 @@ wsrep_node_address={{ internal_ip }}
#wsrep_node_incoming_address=
# How many threads will process writesets from other nodes
-wsrep_slave_threads=1
+wsrep_slave_threads={{ ansible_processor_vcpus }}
# DBUG options for wsrep provider
#wsrep_dbug_option
@@ -83,7 +63,7 @@ wsrep_debug=1
wsrep_convert_LOCK_to_trx=0
# how many times to retry deadlocked autocommits
-wsrep_retry_autocommit=1
+wsrep_retry_autocommit=3
# change auto_increment_increment and auto_increment_offset automatically
wsrep_auto_increment_control=1
diff --git a/deploy/adapters/ansible/roles/database/vars/Debian.yml b/deploy/adapters/ansible/roles/database/vars/Debian.yml
index 7035c2bf..66480ebb 100644
--- a/deploy/adapters/ansible/roles/database/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/database/vars/Debian.yml
@@ -19,9 +19,11 @@ pip_packages: []
services: []
-mongodb_serveice: mongodb
+mongodb_service: mongodb
+mysql_config:
+ - dest: /etc/mysql/my.cnf
+ src: my.cnf
+ - dest: /etc/mysql/conf.d/wsrep.cnf
+ src: wsrep.cnf
-mysql_config_file_path: "/etc/mysql"
-mysql_config_file_name: ["my.cnf"]
-wsrep_config_file_path: "/etc/mysql/conf.d"
wsrep_provider_file: "/usr/lib/galera/libgalera_smm.so"
diff --git a/deploy/adapters/ansible/roles/database/vars/RedHat.yml b/deploy/adapters/ansible/roles/database/vars/RedHat.yml
index ac2f2f22..b8a5dd21 100644
--- a/deploy/adapters/ansible/roles/database/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/database/vars/RedHat.yml
@@ -11,17 +11,20 @@ maridb_packages:
- MySQL-python
mongodb_packages:
- - mongo-10gen-server
- - mongo-10gen
+ - mongo-10gen-server
+ - mongo-10gen
pip_packages:
- - pymongo
+ - pymongo
services: []
-mongodb_serveice: mongod
+mongodb_service: mongodb
+
+mysql_config:
+ - dest: /etc/mysql/my.cnf
+ src: my.cnf
+ - dest: /etc/mysql/conf.d/wsrep.cnf
+ src: wsrep.cnf
-mysql_config_file_path: "/etc/my.cnf.d"
-mysql_config_file_name: ["my-huge.cnf", "server.cnf"]
-wsrep_config_file_path: "/etc/my.cnf.d"
wsrep_provider_file: "/usr/lib64/galera/libgalera_smm.so"
diff --git a/deploy/adapters/ansible/roles/glance/tasks/glance_install.yml b/deploy/adapters/ansible/roles/glance/tasks/glance_install.yml
index 1a34c6f5..bd6b582b 100644
--- a/deploy/adapters/ansible/roles/glance/tasks/glance_install.yml
+++ b/deploy/adapters/ansible/roles/glance/tasks/glance_install.yml
@@ -4,7 +4,7 @@
with_items: packages | union(packages_noarch)
- name: generate glance service list
- shell: echo {{ item }} >> /opt/service
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: services | union(services_noarch)
- name: update glance conf
diff --git a/deploy/adapters/ansible/roles/glance/tasks/main.yml b/deploy/adapters/ansible/roles/glance/tasks/main.yml
index f5ec3c79..4759fdf4 100644
--- a/deploy/adapters/ansible/roles/glance/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/glance/tasks/main.yml
@@ -17,3 +17,5 @@
- config
- glance_config
- glance
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/glance/tasks/nfs.yml b/deploy/adapters/ansible/roles/glance/tasks/nfs.yml
index ff96c64a..758ec082 100644
--- a/deploy/adapters/ansible/roles/glance/tasks/nfs.yml
+++ b/deploy/adapters/ansible/roles/glance/tasks/nfs.yml
@@ -1,19 +1,22 @@
---
-- name: install packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: nfs_packages
-
- name: install nfs
- local_action: yum name=nfs-utils state=present
+ local_action: yum name={{ item }} state=present
+ with_items:
+ - rpcbind
+ - nfs-utils
run_once: True
- name: create image directory
local_action: file path=/opt/images state=directory mode=0777
run_once: True
+- name: remove nfs config item if exist
+ local_action: lineinfile dest=/etc/exports state=absent
+ regexp="^/opt/images"
+ run_once: True
+
- name: update nfs config
local_action: lineinfile dest=/etc/exports state=present
- regexp="/opt/images *(rw,insecure,sync,all_squash)"
line="/opt/images *(rw,insecure,sync,all_squash)"
run_once: True
@@ -21,7 +24,7 @@
local_action: service name={{ item }} state=restarted enabled=yes
with_items:
- rpcbind
- - nfs
+ - nfs-server
run_once: True
- name: get mount info
diff --git a/deploy/adapters/ansible/roles/glance/vars/Debian.yml b/deploy/adapters/ansible/roles/glance/vars/Debian.yml
index 974ada23..5cee8f90 100644
--- a/deploy/adapters/ansible/roles/glance/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/glance/vars/Debian.yml
@@ -1,12 +1,10 @@
---
-
packages:
- glance
-
-nfs_packages:
- nfs-common
nfs_services: []
+
services:
- glance-registry
- glance-api
diff --git a/deploy/adapters/ansible/roles/glance/vars/RedHat.yml b/deploy/adapters/ansible/roles/glance/vars/RedHat.yml
index bd80d21d..fbf77833 100644
--- a/deploy/adapters/ansible/roles/glance/vars/RedHat.yml
+++ b/deploy/adapters/ansible/roles/glance/vars/RedHat.yml
@@ -2,10 +2,6 @@
packages:
- openstack-glance
-nfs_packages:
- - nfs-utils
- - rpcbind
-
nfs_services:
- rpcbind
- rpc-statd
diff --git a/deploy/adapters/ansible/roles/ha/tasks/main.yml b/deploy/adapters/ansible/roles/ha/tasks/main.yml
index 668f6847..8eb9f1b7 100644
--- a/deploy/adapters/ansible/roles/ha/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/ha/tasks/main.yml
@@ -6,7 +6,7 @@
with_items: packages | union(packages_noarch)
- name: generate ha service list
- shell: echo {{ item }} >> /opt/service
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: services | union(services_noarch)
- name: install pexpect
@@ -84,3 +84,5 @@
- name: restart rsyslog
shell: service rsyslog restart
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg b/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg
index 6b91a248..edbd998d 100644
--- a/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg
+++ b/deploy/adapters/ansible/roles/ha/templates/haproxy.cfg
@@ -25,6 +25,32 @@ defaults
timeout check 10s
retries 3
+listen proxy-mysql
+ bind {{ internal_vip.ip }}:3306
+ option tcpka
+ option tcplog
+ balance source
+{% for host, ip in haproxy_hosts.items() %}
+{% if loop.index == 1 %}
+ server {{ host }} {{ ip }}:3306 weight 1 check inter 2000 rise 2 fall 5
+{% else %}
+ server {{ host }} {{ ip }}:3306 weight 1 check inter 2000 rise 2 fall 5
+{% endif %}
+{% endfor %}
+
+listen proxy-rabbit
+ bind {{ internal_vip.ip }}:5672
+ bind {{ public_vip.ip }}:5672
+
+ option tcpka
+ option tcplog
+ timeout client 3h
+ timeout server 3h
+ balance source
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:5672 weight 1 check inter 2000 rise 2 fall 5
+{% endfor %}
+
listen proxy-glance_registry_cluster
bind {{ internal_vip.ip }}:9191
bind {{ public_vip.ip }}:9191
@@ -134,7 +160,7 @@ listen proxy-ceilometer_api_cluster
bind {{ internal_vip.ip }}:8777
bind {{ public_vip.ip }}:8777
mode tcp
- option httpchk
+ option tcp-check
option tcplog
balance source
{% for host,ip in haproxy_hosts.items() %}
@@ -143,12 +169,16 @@ listen proxy-ceilometer_api_cluster
listen proxy-dashboarad
bind {{ public_vip.ip }}:80
- option tcpka
- option httpchk
- option tcplog
- balance source
+ mode http
+ balance source
+ capture cookie vgnvisitor= len 32
+ cookie SERVERID insert indirect nocache
+ option forwardfor
+ option httpchk
+ option httpclose
+ rspidel ^Set-cookie:\ IP=
{% for host,ip in haproxy_hosts.items() %}
- server {{ host }} {{ ip }}:80 weight 1 check inter 2000 rise 2 fall 5
+ server {{ host }} {{ ip }}:80 cookie {{ host }} weight 1 check inter 2000 rise 2 fall 5
{% endfor %}
listen stats
diff --git a/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml b/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml
index ab23f873..32d8f42e 100644
--- a/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml
+++ b/deploy/adapters/ansible/roles/heat/tasks/heat_install.yml
@@ -6,7 +6,7 @@
with_items: packages | union(packages_noarch)
- name: generate heat service list
- shell: echo {{ item }} >> /opt/service
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: services | union(services_noarch)
- name: update heat conf
diff --git a/deploy/adapters/ansible/roles/heat/tasks/main.yml b/deploy/adapters/ansible/roles/heat/tasks/main.yml
index cf259eb5..d0823d1e 100644
--- a/deploy/adapters/ansible/roles/heat/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/heat/tasks/main.yml
@@ -12,3 +12,4 @@
- heat_config
- heat
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/heat/templates/heat.j2 b/deploy/adapters/ansible/roles/heat/templates/heat.j2
index 67beb1ca..aec6b2eb 100644
--- a/deploy/adapters/ansible/roles/heat/templates/heat.j2
+++ b/deploy/adapters/ansible/roles/heat/templates/heat.j2
@@ -9,6 +9,9 @@ log_dir = /var/log/heat
[database]
connection = mysql://heat:{{ HEAT_DBPASS }}@{{ db_host }}/heat
+idle_timeout = 30
+use_db_reconnect = True
+pool_timeout = 10
[ec2authtoken]
auth_uri = http://{{ internal_vip.ip }}:5000/v2.0
diff --git a/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml b/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml
index e4488016..172d356e 100644
--- a/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml
+++ b/deploy/adapters/ansible/roles/keystone/tasks/keystone_install.yml
@@ -6,7 +6,7 @@
with_items: packages | union(packages_noarch)
- name: generate keystone service list
- shell: echo {{ item }} >> /opt/service
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: services | union(services_noarch)
- name: update keystone conf
diff --git a/deploy/adapters/ansible/roles/keystone/tasks/main.yml b/deploy/adapters/ansible/roles/keystone/tasks/main.yml
index aa3ff1d5..f084a38e 100644
--- a/deploy/adapters/ansible/roles/keystone/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/keystone/tasks/main.yml
@@ -11,3 +11,5 @@
- config
- keystone_config
- keystone
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/keystone/templates/admin-openrc.sh b/deploy/adapters/ansible/roles/keystone/templates/admin-openrc.sh
index 544fe31d..0ac21a41 100644
--- a/deploy/adapters/ansible/roles/keystone/templates/admin-openrc.sh
+++ b/deploy/adapters/ansible/roles/keystone/templates/admin-openrc.sh
@@ -2,5 +2,6 @@
export OS_PASSWORD={{ ADMIN_PASS }}
export OS_TENANT_NAME=admin
export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0
-export OS_USERNAME=ADMIN
+export OS_USERNAME=admin
+export OS_VOLUME_API_VERSION=2
diff --git a/deploy/adapters/ansible/roles/keystone/templates/keystone.conf b/deploy/adapters/ansible/roles/keystone/templates/keystone.conf
index b022a084..22173e44 100644
--- a/deploy/adapters/ansible/roles/keystone/templates/keystone.conf
+++ b/deploy/adapters/ansible/roles/keystone/templates/keystone.conf
@@ -1,3 +1,8 @@
+{% set memcached_servers = [] %}
+{% for host in haproxy_hosts.values() %}
+{% set _ = memcached_servers.append('%s:11211'% host) %}
+{% endfor %}
+{% set memcached_servers = memcached_servers|join(',') %}
[DEFAULT]
admin_token={{ ADMIN_TOKEN }}
@@ -518,8 +523,8 @@ log_dir = /var/log/keystone
# (dogpile.cache.redis) be used in production deployments.
# Small workloads (single process) like devstack can use the
# dogpile.cache.memory backend. (string value)
-#backend=keystone.common.cache.noop
-
+backend=keystone.cache.memcache_pool
+memcache_servers={{ memcached_servers}}
# Use a key-mangling function (sha1) to ensure fixed length
# cache-keys. This is toggle-able for debugging purposes, it
# is highly recommended to always leave this set to True.
@@ -540,7 +545,7 @@ log_dir = /var/log/keystone
# Global toggle for all caching using the should_cache_fn
# mechanism. (boolean value)
-#enabled=false
+enabled=true
# Extra debugging from the cache backend (cache keys,
# get/set/delete/etc calls) This is only really useful if you
diff --git a/deploy/adapters/ansible/roles/keystone/vars/main.yml b/deploy/adapters/ansible/roles/keystone/vars/main.yml
index d743b4ee..73582252 100644
--- a/deploy/adapters/ansible/roles/keystone/vars/main.yml
+++ b/deploy/adapters/ansible/roles/keystone/vars/main.yml
@@ -40,9 +40,9 @@ os_services:
type: metering
region: regionOne
description: "OpenStack Telemetry"
- publicurl: "http://{{ public_vip.ip }}:8777/v2.0"
- internalurl: "http://{{ internal_vip.ip }}:8777/v2.0"
- adminurl: "http://{{ internal_vip.ip }}:8777/v2.0"
+ publicurl: "http://{{ public_vip.ip }}:8777"
+ internalurl: "http://{{ internal_vip.ip }}:8777"
+ adminurl: "http://{{ internal_vip.ip }}:8777"
- name: cinder
type: volume
@@ -99,7 +99,7 @@ os_users:
tenant_description: "Service Tenant"
- user: keystone
- password: "{{ keystone_PASS }}"
+ password: "{{ KEYSTONE_PASS }}"
email: keystone@admin.com
role: admin
tenant: service
diff --git a/deploy/adapters/ansible/roles/memcached/tasks/main.yml b/deploy/adapters/ansible/roles/memcached/tasks/main.yml
new file mode 100644
index 00000000..58a7ae3e
--- /dev/null
+++ b/deploy/adapters/ansible/roles/memcached/tasks/main.yml
@@ -0,0 +1,15 @@
+---
+- include_vars: "{{ ansible_os_family }}.yml"
+
+- name: install packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest update_cache=yes"
+ with_items: packages | union(packages_noarch)
+
+- name: change memcache listen ip
+ lineinfile: dest=/etc/memcached.conf regexp="^-l " line="-l 0.0.0.0"
+
+- name: restart services
+ service: name={{ item }} state=restarted enabled=yes
+ with_items: services| union(services_noarch)
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/memcached/vars/Debian.yml b/deploy/adapters/ansible/roles/memcached/vars/Debian.yml
new file mode 100644
index 00000000..7a0d09de
--- /dev/null
+++ b/deploy/adapters/ansible/roles/memcached/vars/Debian.yml
@@ -0,0 +1,7 @@
+---
+packages:
+ - python-memcache
+
+services: []
+
+
diff --git a/deploy/adapters/ansible/roles/memcached/vars/RedHat.yml b/deploy/adapters/ansible/roles/memcached/vars/RedHat.yml
new file mode 100644
index 00000000..8cd2462c
--- /dev/null
+++ b/deploy/adapters/ansible/roles/memcached/vars/RedHat.yml
@@ -0,0 +1,7 @@
+---
+packages:
+ - python-memcached
+
+pip_packages: []
+
+services: []
diff --git a/deploy/adapters/ansible/roles/memcached/vars/main.yml b/deploy/adapters/ansible/roles/memcached/vars/main.yml
new file mode 100644
index 00000000..40c400a6
--- /dev/null
+++ b/deploy/adapters/ansible/roles/memcached/vars/main.yml
@@ -0,0 +1,6 @@
+---
+packages_noarch:
+ - memcached
+
+services_noarch:
+ - memcached
diff --git a/deploy/adapters/ansible/roles/monitor/tasks/main.yml b/deploy/adapters/ansible/roles/monitor/tasks/main.yml
index 6e446944..1cc21fe0 100644
--- a/deploy/adapters/ansible/roles/monitor/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/monitor/tasks/main.yml
@@ -10,4 +10,5 @@
- name: restart cron
service: name={{ cron }} state=restarted
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/mq/tasks/main.yml b/deploy/adapters/ansible/roles/mq/tasks/main.yml
index 47e41a25..521f5995 100644
--- a/deploy/adapters/ansible/roles/mq/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/mq/tasks/main.yml
@@ -1,6 +1,9 @@
---
- include_vars: "{{ ansible_os_family }}.yml"
-- include: rabbitmq.yml
-#- include: rabbitmq_cluster.yml
-# when: HA_CLUSTER is defined
+- include: rabbitmq_install.yml
+
+- include: rabbitmq_config.yml
+ when: inventory_hostname == haproxy_hosts.keys()[0]
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/mq/tasks/rabbitmq.yml b/deploy/adapters/ansible/roles/mq/tasks/rabbitmq.yml
deleted file mode 100644
index edfc8a7a..00000000
--- a/deploy/adapters/ansible/roles/mq/tasks/rabbitmq.yml
+++ /dev/null
@@ -1,45 +0,0 @@
----
-- name: create rabbitmq directory
- file: path=/etc/rabbitmq state=directory mode=0755
-
-- name: copy rabbitmq config file
- template: src=rabbitmq-env.conf dest=/etc/rabbitmq/rabbitmq-env.conf mode=0755
-
-- name: install rabbitmq-server
- action: "{{ ansible_pkg_mgr }} name=rabbitmq-server state=present"
- with_items: packages | union(packages_noarch)
-
-- name: stop rabbitmq-server
- service: name=rabbitmq-server
- state=stopped
-
-- name: update .erlang.cookie
- template: src=.erlang.cookie dest=/var/lib/rabbitmq/.erlang.cookie
- group=rabbitmq
- owner=rabbitmq
- mode=0400
- when: ERLANG_TOKEN is defined
-
-- name: start and enable rabbitmq-server
- service: name=rabbitmq-server
- state=started
- enabled=yes
-
-- name: generate mq service list
- shell: echo {{ item }} >> /opt/service
- with_items: services_noarch
-
-- name: modify rabbitmq password
- command: rabbitmqctl change_password guest {{ RABBIT_PASS }}
- when: "RABBIT_USER is defined and RABBIT_USER == 'guest'"
- ignore_errors: True
-
-- name: add rabbitmq user
- command: rabbitmqctl add_user {{ RABBIT_USER }} {{ RABBIT_PASS }}
- when: "RABBIT_USER is defined and RABBIT_USER != 'guest'"
- ignore_errors: True
-
-- name: set rabbitmq user permission
- command: rabbitmqctl set_permissions -p / {{ RABBIT_USER }} ".*" ".*" ".*"
- when: "RABBIT_USER is defined and RABBIT_USER != 'guest'"
-
diff --git a/deploy/adapters/ansible/roles/mq/tasks/rabbitmq_cluster.yml b/deploy/adapters/ansible/roles/mq/tasks/rabbitmq_cluster.yml
index fdc18a65..0cae5298 100644
--- a/deploy/adapters/ansible/roles/mq/tasks/rabbitmq_cluster.yml
+++ b/deploy/adapters/ansible/roles/mq/tasks/rabbitmq_cluster.yml
@@ -1,27 +1,13 @@
---
- name: stop rabbitmq app
- command: rabbitmqctl stop_app
- run_once: True
+ shell: rabbitmqctl stop_app; sleep 3
-- name: rabbitmqctl reset
- command: rabbitmqctl reset
- run_once: True
-
-- name: stop rabbitmq
- shell: rabbitmqctl stop
-
-- name: set detach
- shell: rabbitmq-server -detached
-
-- name: join cluster
- command: rabbitmqctl join_cluster rabbit@{{ item }}
- when: item != inventory_hostname and HA_CLUSTER[item] == ''
- with_items:
- groups['controller']
+- name: join cluster
+ shell: rabbitmqctl join_cluster rabbit@{{ haproxy_hosts.keys()[0] }}
+ register: join_result
+ until: join_result|success
+ retries: 20
+ delay: 3
- name: start rabbitmq app
- command: rabbitmqctl start_app
-
-- name: set the HA policy
- rabbitmq_policy: name=ha-all pattern='^(?!amq\.).*' tags="ha-mode=all"
-
+ shell: rabbitmqctl start_app
diff --git a/deploy/adapters/ansible/roles/mq/tasks/rabbitmq_config.yml b/deploy/adapters/ansible/roles/mq/tasks/rabbitmq_config.yml
new file mode 100644
index 00000000..5eeccd65
--- /dev/null
+++ b/deploy/adapters/ansible/roles/mq/tasks/rabbitmq_config.yml
@@ -0,0 +1,15 @@
+---
+- name: remove default guest user is removed
+ rabbitmq_user:
+ user: guest
+ state: absent
+
+- name: add rabbitmq user
+ rabbitmq_user:
+ user='{{ RABBIT_USER }}'
+ password='{{ RABBIT_PASS }}'
+ vhost=/
+ configure_priv=.*
+ write_priv=.*
+ read_priv=.*
+ state=present
diff --git a/deploy/adapters/ansible/roles/mq/tasks/rabbitmq_install.yml b/deploy/adapters/ansible/roles/mq/tasks/rabbitmq_install.yml
new file mode 100755
index 00000000..559fa423
--- /dev/null
+++ b/deploy/adapters/ansible/roles/mq/tasks/rabbitmq_install.yml
@@ -0,0 +1,68 @@
+---
+- name: create rabbitmq directory
+ file: path=/etc/rabbitmq state=directory mode=0755
+
+- name: disable auto start
+ copy:
+ content: "#!/bin/sh\nexit 101"
+ dest: "/usr/sbin/policy-rc.d"
+ mode: 0755
+ when: ansible_os_family == "Debian"
+
+- name: install rabbitmq-server
+ action: "{{ ansible_pkg_mgr }} name=rabbitmq-server state=present"
+ with_items: packages | union(packages_noarch)
+
+- name: enable auto start
+ file:
+ path=/usr/sbin/policy-rc.d
+ state=absent
+ when: ansible_os_family == "Debian"
+
+- name: make sure rabbitmq-server stopped
+ service:
+ name: rabbitmq-server
+ state: stopped
+ enabled: yes
+
+- name: replace cookie
+ copy:
+ content: "{{ ERLANG_TOKEN }}"
+ dest: /var/lib/rabbitmq/.erlang.cookie
+ mode: 0400
+ owner: rabbitmq
+ group: rabbitmq
+
+- name: replace config
+ copy:
+ content: "RABBITMQ_NODE_IP_ADDRESS={{ internal_ip }}"
+ dest: /etc/rabbitmq/rabbitmq-env.conf
+ mode: 0400
+ owner: rabbitmq
+ group: rabbitmq
+
+- name: set open file limit for rabbitmq
+ copy:
+ content: "ulimit -n 65536"
+ dest: /etc/default/rabbitmq-server
+ mode: 0400
+ owner: rabbitmq
+ group: rabbitmq
+
+- name: restart rabbitmq-server
+ service:
+ name: rabbitmq-server
+ state: restarted
+
+- name: enable queue mirroring
+ rabbitmq_policy:
+ name: "ha-all"
+ pattern: '^(?!amq\.).*'
+ tags: "ha-mode=all"
+
+- include: rabbitmq_cluster.yml
+ when: inventory_hostname != haproxy_hosts.keys()[0]
+
+- name: generate mq service list
+ shell: echo {{ item }} >> /opt/service
+ with_items: services_noarch
diff --git a/deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml b/deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml
index 640692ff..0d7bb2d9 100644
--- a/deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/neutron-compute/tasks/main.yml
@@ -24,7 +24,7 @@
when: ansible_os_family == 'RedHat'
- name: generate neutron compute service list
- shell: echo {{ item }} >> /opt/service
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: services | union(services_noarch)
- name: config ml2 plugin
@@ -46,4 +46,3 @@
- include: ../../neutron-network/tasks/odl.yml
when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
-
diff --git a/deploy/adapters/ansible/roles/neutron-controller/tasks/main.yml b/deploy/adapters/ansible/roles/neutron-controller/tasks/main.yml
index 5825c5fe..a0d3b343 100644
--- a/deploy/adapters/ansible/roles/neutron-controller/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/neutron-controller/tasks/main.yml
@@ -11,3 +11,5 @@
- config
- neutron_config
- neutron
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml b/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml
index fce12722..044163e8 100644
--- a/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml
+++ b/deploy/adapters/ansible/roles/neutron-controller/tasks/neutron_install.yml
@@ -6,7 +6,7 @@
with_items: packages | union(packages_noarch)
- name: generate neutron control service list
- shell: echo {{ item }} >> /opt/service
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: services | union(services_noarch)
- name: get tenant id to fill neutron.conf
diff --git a/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml b/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml
index f8e9e8c4..a3a62b11 100644
--- a/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/neutron-network/tasks/main.yml
@@ -23,20 +23,12 @@
failed_when: iproute_out.rc == 255
when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
-- name: update epel-release
- shell: yum install -y http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm
- ignore_errors: True
-
-- name: update rdo-release-kilo repo
- shell: yum install -y http://rdo.fedorapeople.org/openstack-kilo/rdo-release-kilo.rpm
- ignore_errors: True
-
- name: install neutron network related packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
with_items: packages | union(packages_noarch)
- name: generate neutron network service list
- shell: echo {{ item }} >> /opt/service
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: services | union(services_noarch)
- name: fix openstack neutron plugin config file
@@ -82,7 +74,10 @@
- include: odl.yml
when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
-- name: restart neutron services
- debug: msg="restart neutron services"
- notify:
- - restart neutron network relation service
+- name: restart neutron network relation service
+ service: name={{ item }} state=restarted enabled=yes
+ with_flattened:
+ - services_noarch
+ - services
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/nova-compute/tasks/main.yml b/deploy/adapters/ansible/roles/nova-compute/tasks/main.yml
index f4bb373e..6c30f25b 100644
--- a/deploy/adapters/ansible/roles/nova-compute/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/nova-compute/tasks/main.yml
@@ -20,10 +20,10 @@
- restart nova-compute services
- name: generate neutron control service list
- shell: echo {{ item }} >> /opt/service
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: services | union(services_noarch)
-- meta: flush_handlers
-
- name: remove nova sqlite db
shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.removed
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/nova-controller/tasks/main.yml b/deploy/adapters/ansible/roles/nova-controller/tasks/main.yml
index da820c35..00a25284 100644
--- a/deploy/adapters/ansible/roles/nova-controller/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/nova-controller/tasks/main.yml
@@ -11,3 +11,5 @@
- config
- nova_config
- nova
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/nova-controller/tasks/nova_install.yml b/deploy/adapters/ansible/roles/nova-controller/tasks/nova_install.yml
index 7242fda6..58159589 100644
--- a/deploy/adapters/ansible/roles/nova-controller/tasks/nova_install.yml
+++ b/deploy/adapters/ansible/roles/nova-controller/tasks/nova_install.yml
@@ -6,7 +6,7 @@
with_items: packages | union(packages_noarch)
- name: generate nova control service list
- shell: echo {{ item }} >> /opt/service
+ lineinfile: dest=/opt/service create=yes line='{{ item }}'
with_items: services | union(services_noarch)
- name: update nova conf
diff --git a/deploy/adapters/ansible/roles/odl_cluster/files/recover_network.py b/deploy/adapters/ansible/roles/odl_cluster/files/recover_network.py
new file mode 100755
index 00000000..8d48ac1c
--- /dev/null
+++ b/deploy/adapters/ansible/roles/odl_cluster/files/recover_network.py
@@ -0,0 +1,65 @@
+import yaml
+import netaddr
+import os
+import log as logging
+
+LOG = logging.getLogger("net-recover")
+config_path = os.path.join(os.path.dirname(__file__), "network.cfg")
+
+def setup_bondings(bond_mappings):
+ print bond_mappings
+
+def add_ovs_port(ovs_br, ifname, uplink, vlan_id=None):
+ LOG.info("add_ovs_port enter")
+ cmd = "ovs-vsctl --may-exist add-port %s %s" % (ovs_br, ifname)
+ if vlan_id:
+ cmd += " tag=%s" % vlan_id
+ cmd += " -- set Interface %s type=internal;" % ifname
+ cmd += "ip link set dev %s address `ip link show %s |awk '/link\/ether/{print $2}'`;" \
+ % (ifname, uplink)
+ cmd += "ip link set %s up;" % ifname
+ LOG.info("add_ovs_port: cmd=%s" % cmd)
+ os.system(cmd)
+
+def setup_ips(ip_settings, sys_intf_mappings):
+ LOG.info("setup_ips enter")
+ for intf_info in ip_settings.values():
+ network = netaddr.IPNetwork(intf_info["cidr"])
+ if sys_intf_mappings[intf_info["name"]]["type"] == "ovs":
+ intf_name = intf_info["name"]
+ else:
+ intf_name = intf_info["alias"]
+ if "gw" in intf_info:
+ cmd = "ip addr add %s/%s brd %s dev %s;" \
+ % (intf_info["ip"], intf_info["netmask"], str(network.broadcast),intf_name)
+ cmd += "route del default;"
+ cmd += "ip route add default via %s dev %s" % (intf_info["gw"], intf_name)
+ LOG.info("setup_ips: cmd=%s" % cmd)
+ os.system(cmd)
+
+def setup_intfs(sys_intf_mappings, uplink_map):
+ LOG.info("setup_intfs enter")
+ for intf_name, intf_info in sys_intf_mappings.items():
+ if intf_info["type"] == "ovs":
+ add_ovs_port(
+ intf_info["interface"],
+ intf_name,
+ uplink_map[intf_info["interface"]],
+ vlan_id=intf_info.get("vlan_tag"))
+ else:
+ pass
+
+def main(config):
+ uplink_map = {}
+ setup_bondings(config["bond_mappings"])
+ for provider_net in config["provider_net_mappings"]:
+ uplink_map[provider_net['name']] = provider_net['interface']
+
+ setup_intfs(config["sys_intf_mappings"], uplink_map)
+ setup_ips(config["ip_settings"], config["sys_intf_mappings"])
+
+
+if __name__ == "__main__":
+ os.system("service openvswitch-switch status|| service openvswitch-switch start")
+ config = yaml.load(open(config_path))
+ main(config)
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml
index f06ce193..1eb517b8 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/main.yml
@@ -9,10 +9,4 @@
include: openvswitch.yml
when: groups['odl']|length !=0 and inventory_hostname not in groups['odl']
-- name: check out new flow table if enable
- shell: ovs-ofctl --protocol=OpenFlow13 dump-flows br-prv | grep CONTROLLER; while [ $? -ne 0 ]; do sleep 10; ovs-ofctl --protocol=OpenFlow13 dump-flows br-prv | grep CONTROLLER; done
- when: groups['odl']|length !=0
-- name: remove controller from br-prv
- shell: ovs-vsctl del-controller br-prv;
- when: groups['odl']|length !=0
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml
index 0c13ff21..08881202 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/odl_controller.yml
@@ -57,8 +57,8 @@
- name: extract odl package
# unarchive: src=/opt/{{ odl_pkg_name }} dest={{ odl_home }} group=odl owner=odl mode=0775 copy=no
command: su -s /bin/sh -c "tar xzf /opt/{{ odl_pkg_name }} -C {{ odl_home }} --strip-components 1 --no-overwrite-dir -k --skip-old-files" odl
- notify:
- - restart odl service
+# notify:
+# - restart odl service
- name: opendaylight system file
template:
@@ -66,13 +66,13 @@
dest: "{{ service_file.dst }}"
mode: 0644
-#- name: create karaf config
-# template:
-# src: org.apache.karaf.features.cfg
-# dest: "{{ odl_home }}/etc/org.apache.karaf.features.cfg"
-# owner: odl
-# group: odl
-# mode: 0775
+- name: create karaf config
+ template:
+ src: org.apache.karaf.features.cfg
+ dest: "{{ odl_home }}/etc/org.apache.karaf.features.cfg"
+ owner: odl
+ group: odl
+ mode: 0775
# notify:
# - restart odl service
@@ -80,8 +80,14 @@
template:
src: tomcat-server.xml
dest: "{{ odl_home }}/configuration/tomcat-server.xml"
- notify:
- - restart odl service
+# notify:
+# - restart odl service
+
+
+- name: install odl pip packages
+ pip: name={{ item }} state=present
+ with_items: odl_pip
+
#- name: restart odl service
# service: name=opendaylight state=started pattern="opendaylight"
@@ -97,16 +103,16 @@
template:
src: akka.conf
dest: "{{ odl_home }}/configuration/initial/akka.conf"
- notify:
- - restart odl service
+# notify:
+# - restart odl service
- name: create module-shards config
template:
src: module-shards.conf
dest: "{{ odl_home }}/configuration/initial/module-shards.conf"
- notify:
- - restart odl service
+# notify:
+# - restart odl service
#- name: copy Jolokia-OSGi config
# shell: >
@@ -140,9 +146,8 @@
#- name: copy Jolokia-OSGi jar config
# copy: src=roles/odl_cluster/templates/jolokia-osgi-1.1.5.jar dest="{{ odl_home }}/system/org/jolokia/jolokia-osgi/1.1.5/"
-- name: remove KARAF Data Directory
- shell: >
- rm -rf {{ odl_home }}/data/*;
+- name: remove karaf data directory
+ shell: rm -rf {{ odl_home }}/data/*;
#- name: chown OpenDaylight Directory and Files
# shell: >
@@ -153,23 +158,39 @@
##########################################################################################################
################################ OpenDayLight connect with OpenStack ################################
##########################################################################################################
-- name: Turn off neutron-server on control node
+- name: turn off neutron-server neutron-plugins-openvswitch-agent Daemon on control node
+ shell: >
+ sed -i '/neutron-plugin-openvswitch-agent/d' /opt/service ;
+ sed -i '/neutron-server/d' /opt/service;
+ sed -i '/keepalived/d' /opt/service;
+
+- name: turn off neutron-server on control node
service: name=neutron-server state=stopped
+- name: turn off keepalived on control node
+ service: name=keepalived state=stopped
+
#- name: Install Crudini
# apt: name={{ item }} state=present
# with_items:
# - crudini
-- name: Run OpenVSwitch Script
- include: openvswitch.yml
-
-- name: chown OpenDaylight Directory and Files
+- name: chown opendaylight directory and files
shell: >
chown -R odl:odl "{{ odl_home }}";
chown odl:odl "{{ service_file.dst }}";
+- name: start opendaylight
+ service: name=opendaylight state=started
+
+- name: check if opendaylight running
+ shell: netstat -lpen --tcp | grep java | grep 6653; while [ $? -ne 0 ]; do sleep 10; netstat -lpen --tcp | grep java | grep 6653; done
+
+- name: run openvswitch script
+ include: openvswitch.yml
+
+
#- name: Configure Neutron1
# shell: >
# crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers opendaylight;
@@ -184,12 +205,26 @@
#- name: Execute ML2 Configuration File
# command: su -s /bin/sh -c "/opt/ml2_conf.sh;"
-- name: Configure Neutron2
- shell: >
- mysql -e "drop database if exists neutron_ml2;";
- mysql -e "create database neutron_ml2 character set utf8;";
- mysql -e "grant all on neutron_ml2.* to 'neutron'@'%';";
- su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron;
+- name: drop and recreate neutron database
+ shell: mysql -e "drop database if exists neutron;";
+ mysql -e "create database neutron character set utf8;";
+ mysql -e "grant all on neutron.* to 'neutron'@'%' identified by 'console';";
+ su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron;
+ when: inventory_hostname == haproxy_hosts.keys()[0]
-- name: Restart neutron-server
+- name: restart neutron-server
service: name=neutron-server state=started
+
+- name: add service daemon
+ shell: >
+ echo opendaylight >> /opt/service ;
+ echo neutron-server >> /opt/service ;
+
+- name: restart neutron-l3-agent server
+ service: name=neutron-l3-agent state=restarted
+
+- name: restart neutron-dhcp-agent server
+ service: name=neutron-dhcp-agent state=restarted
+
+- name: restart neutron-metadata-agent server
+ service: name=neutron-metadata-agent state=restarted
diff --git a/deploy/adapters/ansible/roles/odl_cluster/tasks/openvswitch.yml b/deploy/adapters/ansible/roles/odl_cluster/tasks/openvswitch.yml
index 3bef2af3..ae9027c9 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/tasks/openvswitch.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/tasks/openvswitch.yml
@@ -9,52 +9,100 @@
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
with_items: compute_packages | union(compute_packages_noarch)
-- name: Adjust Service Daemon
- shell: >
- sed -i '/neutron-plugin-openvswitch-agent/d' /opt/service ;
- echo opendaylight >> /opt/service ;
+- name: remove neutron-openvswitch-agent service daemon
+ shell: sed -i '/neutron-plugin-openvswitch-agent/d' /opt/service ;
- name: shut down and disable Neutron's openvswitch agent services
service: name=neutron-plugin-openvswitch-agent state=stopped
-- name: Stop the Open vSwitch service and clear existing OVSDB
- shell: >
- ovs-ofctl del-flows br-int ;
- ovs-vsctl del-br br-tun ;
- ovs-vsctl del-manager ;
+#- name: Stop the Open vSwitch service and clear existing OVSDB
+# shell: >
+# ovs-ofctl del-flows br-int ;
+# ovs-vsctl del-br br-tun ;
+# ovs-vsctl del-port br-int patch-tun;
+# ovs-vsctl del-manager ;
+
+#- name: Restart OpenVSwitch
+# shell: service openvswitch-switch restart;
#- name: remove Neutron's openvswitch agent services
# shell: >
# update-rc.d neutron-plugin-openvswitch-agent remove
-#- name: Stop the Open vSwitch service and clear existing OVSDB
-# shell: >
-# service openvswitch-switch stop ;
-# rm -rf /var/log/openvswitch/* ;
-# rm -rf /etc/openvswitch/conf.db ;
-# service openvswitch-switch start ;
+- name: Check External network
+ shell: ovs-vsctl list-br | grep br-prv
+ register: extbr
+
+- name: Stop the Open vSwitch service and clear existing OVSDB
+ shell: >
+ service openvswitch-switch stop ;
+ rm -rf /var/log/openvswitch/* ;
+ rm -rf /etc/openvswitch/conf.db ;
+ service openvswitch-switch start ;
+
+#- name: Set OpenDaylight as the manager
+# command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ internal_vip.ip }}:6640;"
+
+
+- name: set local ip in openvswitch
+ shell: ovs-vsctl set Open_vSwitch $(ovs-vsctl show | head -n 1) other_config={'local_ip'=' {{ internal_ip }} '};
+
+#'
+##################################################################
+################ Recover External network #######################
+##################################################################
+
+- name: add ovs bridge
+ openvswitch_bridge: bridge={{ item["name"] }} state=present
+ with_items: "{{ network_cfg['provider_net_mappings'] }}"
+ when: item["type"] == "ovs" and extbr.rc == 0
+
+- name: add ovs uplink
+ openvswitch_port: bridge={{ item["name"] }} port={{ item["interface"] }} state=present
+ with_items: "{{ network_cfg['provider_net_mappings'] }}"
+ when: item["type"] == "ovs" and extbr.rc == 0
+
+- name: copy recovery script
+ copy: src={{ item }} dest=/opt/setup_networks
+ with_items:
+ - recover_network.py
+ when: extbr.rc == 0
+
+- name: recover external script
+ shell: python /opt/setup_networks/recover_network.py
+ when: extbr.rc == 0
+
+- name: restart keepalived
+ shell: service keepalived restart
+ when: inventory_hostname in groups['odl'] and extbr.rc == 0
+ ignore_errors: True
+
+##################################################################
+##################################################################
+##################################################################
-- name: Set OpenDaylight as the manager
+- name: set opendaylight as the manager
command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ internal_vip.ip }}:6640;"
-#- name: start and disable Neutron's agent services
-# service: name=neutron-plugin-openvswitch-agent state=started
+- name: check br-int
+ shell: ovs-vsctl list-br | grep br-int; while [ $? -ne 0 ]; do sleep 10; ovs-vsctl list-br | grep br-int; done
-- name: Configure Neutron1
+- name: configure opendaylight -> ml2
shell: >
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers opendaylight;
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan;
+ crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ovs enable_tunneling True;
#- name: Adjust Service Daemon
# shell: >
# sed -i '/neutron-plugin-openvswitch-agent/d' /opt/service ;
# echo opendaylight >> /opt/service ;
-- name: Create ML2 Configuration File
+- name: copy ml2 configuration script
template:
src: ml2_conf.sh
dest: "/opt/ml2_conf.sh"
mode: 0777
-- name: Execute ML2 Configuration File
+- name: execute ml2 configuration script
command: su -s /bin/sh -c "/opt/ml2_conf.sh;"
diff --git a/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg b/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg
index 4f439c25..e9953e7f 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg
+++ b/deploy/adapters/ansible/roles/odl_cluster/templates/org.apache.karaf.features.cfg
@@ -36,19 +36,19 @@
#
# Comma separated list of features repositories to register by default
#
-#featuresRepositories = mvn:org.apache.karaf.features/standard/3.0.1/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.1/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.0/xml/features,mvn:org.apache.karaf.features/spring/3.0.1/xml/features,mvn:org.opendaylight.integration/features-integration/0.2.2-Helium-SR2/xml/features,mvn:org.jolokia/jolokia-osgi/1.1.4/xml/features
-
-featuresRepositories = mvn:org.apache.karaf.features/standard/3.0.1/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.1/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.0/xml/features,mvn:org.apache.karaf.features/spring/3.0.1/xml/features,mvn:org.opendaylight.integration/features-integration/0.2.2-Helium-SR2/xml/features,mvn:org.jolokia/jolokia-osgi/1.1.5/xml/features
+featuresRepositories = mvn:org.apache.karaf.features/standard/3.0.3/xml/features,mvn:org.apache.karaf.features/enterprise/3.0.3/xml/features,mvn:org.ops4j.pax.web/pax-web-features/3.1.4/xml/features,mvn:org.apache.karaf.features/spring/3.0.3/xml/features,mvn:org.opendaylight.integration/features-integration-index/0.3.2-Lithium-SR2/xml/features
#
# Comma separated list of features to install at startup
-# Default features:
-# config,standard,region,package,kar,ssh,management
#
-featuresBoot= {{ odl_features | join(",") }}
+featuresBoot=config,standard,region,package,kar,ssh,management,odl-ovsdb-openstack
+
+#,odl-restconf-all,odl-aaa-authn,odl-dlux-all
+
+# odl-base-all,odl-restconf,odl-ovsdb-openstack,odl-dlux-all,odl-mdsal-apidocs
+#,odl-mdsal-clustering,odl-openflowplugin-flow-services
#
# Defines if the boot features are started in asynchronous mode (in a dedicated thread)
#
featuresBootAsynchronous=false
-
diff --git a/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml b/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml
index 450ee0d0..0355c0ab 100755
--- a/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml
+++ b/deploy/adapters/ansible/roles/odl_cluster/vars/main.yml
@@ -16,3 +16,7 @@ jdk8_pkg_name: jdk-8u51-linux-x64.tar.gz
controller_packages_noarch: []
compute_packages_noarch: []
+
+odl_pip:
+ - networking_odl
+
diff --git a/deploy/adapters/ansible/roles/onos_cluster/files/networking-onos.tar b/deploy/adapters/ansible/roles/onos_cluster/files/networking-onos.tar
index 765afa02..9358199c 100755..100644
--- a/deploy/adapters/ansible/roles/onos_cluster/files/networking-onos.tar
+++ b/deploy/adapters/ansible/roles/onos_cluster/files/networking-onos.tar
Binary files differ
diff --git a/deploy/adapters/ansible/roles/onos_cluster/tasks/main.yml b/deploy/adapters/ansible/roles/onos_cluster/tasks/main.yml
index c3e7c7b7..3cd4421b 100755
--- a/deploy/adapters/ansible/roles/onos_cluster/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/onos_cluster/tasks/main.yml
@@ -1,11 +1,44 @@
---
- include_vars: "{{ ansible_os_family }}.yml"
+- name: remove neutron-plugin-openvswitch-agent auto start
+ shell: >
+ update-rc.d neutron-plugin-openvswitch-agent remove;
+ sed -i /neutron-plugin-openvswitch-agent/d /opt/service
+ when: groups['onos']|length !=0
+ ignore_errors: True
+
+- name: shut down and disable Neutron's agent services
+ service: name=neutron-plugin-openvswitch-agent state=stopped
+ when: groups['onos']|length !=0
+ ignore_errors: True
+
+- name: remove neutron-l3-agent auto start
+ shell: >
+ update-rc.d neutron-l3-agent remove;
+ sed -i /neutron-l3-agent/d /opt/service
+ when: inventory_hostname in groups['onos']
+ ignore_errors: True
+
+- name: shut down and disable Neutron's l3 agent services
+ service: name=neutron-l3-agent state=stopped
+ when: inventory_hostname in groups['onos']
+ ignore_errors: True
+
+- name: Stop the Open vSwitch service and clear existing OVSDB
+ shell: >
+ ovs-vsctl del-br br-int ;
+ ovs-vsctl del-br br-tun ;
+ ovs-vsctl del-manager ;
+ when: groups['onos']|length !=0
+ ignore_errors: True
+
- name: Install ONOS Cluster on Controller
include: onos_controller.yml
when: inventory_hostname in groups['onos']
- name: Install ONOS Cluster on Compute
include: openvswitch.yml
- when: groups['onos']|length !=0 and inventory_hostname not in groups['onos']
+ when: groups['onos']|length !=0
+# when: groups['onos']|length !=0 and inventory_hostname not in groups['onos']
diff --git a/deploy/adapters/ansible/roles/onos_cluster/tasks/onos_controller.yml b/deploy/adapters/ansible/roles/onos_cluster/tasks/onos_controller.yml
index 20691cc9..0606fad9 100755
--- a/deploy/adapters/ansible/roles/onos_cluster/tasks/onos_controller.yml
+++ b/deploy/adapters/ansible/roles/onos_cluster/tasks/onos_controller.yml
@@ -63,9 +63,9 @@
echo 'export ONOS_USER=root' >> {{ onos_home }}/options;
mkdir {{ onos_home }}/var;
mkdir {{ onos_home }}/config;
- sed -i '/pre-stop/i\env JAVA_HOME=/usr/lib/jvm/java-8-oracle' {{ onos_home }}/debian/onos.conf;
- cp -rf {{ onos_home }}/debian/onos.conf /etc/init/;
- cp -rf {{ onos_home }}/debian/onos.conf /etc/init.d/;
+ sed -i '/pre-stop/i\env JAVA_HOME=/usr/lib/jvm/java-8-oracle' {{ onos_home }}/init/onos.conf;
+ cp -rf {{ onos_home }}/init/onos.conf /etc/init/;
+ cp -rf {{ onos_home }}/init/onos.conf /etc/init.d/;
# notify:
# - restart onos service
@@ -73,17 +73,17 @@
shell: >
sed -i '/^featuresBoot=/c\featuresBoot={{ onos_boot_features }}' {{ onos_home }}/{{ karaf_dist }}/etc/org.apache.karaf.features.cfg;
-- name: create cluster json
- template:
- src: cluster.json
- dest: "{{ onos_home }}/config/cluster.json"
+#- name: create cluster json
+# template:
+# src: cluster.json
+# dest: "{{ onos_home }}/config/cluster.json"
# notify:
# - restart onos service
-- name: create tablets json
- template:
- src: tablets.json
- dest: "{{ onos_home }}/config/tablets.json"
+#- name: create tablets json
+# template:
+# src: tablets.json
+# dest: "{{ onos_home }}/config/tablets.json"
# notify:
# - restart onos service
@@ -115,12 +115,13 @@
##########################################################################################################
################################ ONOS connect with OpenStack ################################
##########################################################################################################
-- name: Run OpenVSwitch Script
- include: openvswitch.yml
+#- name: Run OpenVSwitch Script
+# include: openvswitch.yml
- name: Configure Neutron1
shell: >
- crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers onos;
+ crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins onos_router;
+ crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers onos_ml2;
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan;
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers vxlan
diff --git a/deploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml b/deploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml
index 5bea0ae2..77161a63 100755
--- a/deploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml
+++ b/deploy/adapters/ansible/roles/onos_cluster/tasks/openvswitch.yml
@@ -1,17 +1,17 @@
---
-- name: remove neutron-plugin-openvswitch-agent auto start
- shell: >
- update-rc.d neutron-plugin-openvswitch-agent remove;
- sed -i /neutron-plugin-openvswitch-agent/d /opt/service
+#- name: remove neutron-plugin-openvswitch-agent auto start
+# shell: >
+# update-rc.d neutron-plugin-openvswitch-agent remove;
+# sed -i /neutron-plugin-openvswitch-agent/d /opt/service
-- name: shut down and disable Neutron's agent services
- service: name=neutron-plugin-openvswitch-agent state=stopped
+#- name: shut down and disable Neutron's agent services
+# service: name=neutron-plugin-openvswitch-agent state=stopped
-- name: Stop the Open vSwitch service and clear existing OVSDB
- shell: >
- ovs-vsctl del-br br-int ;
- ovs-vsctl del-br br-tun ;
- ovs-vsctl del-manager ;
+#- name: Stop the Open vSwitch service and clear existing OVSDB
+# shell: >
+# ovs-vsctl del-br br-int ;
+# ovs-vsctl del-br br-tun ;
+# ovs-vsctl del-manager ;
#- name: get image http server
# shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
@@ -28,6 +28,54 @@
# cd /opt/openvswitch;
# dpkg -i openvswitch-common_2.3.0-1_amd64.deb;
# dpkg -i openvswitch-switch_2.3.0-1_amd64.deb;
+
+- name: start up onos-external nic
+ command: su -s /bin/sh -c "ifconfig eth2 0 up"
+
+#- name: wait for onos start time
+# shell: "sleep 200"
+
+- name: add ovsdatabase feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-ovsdatabase'";
+ when: inventory_hostname == groups['onos'][0]
+
+- name: add openflow-base feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow-base'";
+ when: inventory_hostname == groups['onos'][0]
+
+- name: add openflow feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-openflow'";
+ when: inventory_hostname == groups['onos'][0]
+
+- name: add vtn feature
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'feature:install onos-app-vtn-onosfw'";
+ when: inventory_hostname == groups['onos'][0]
+
+
+- name: set public eth card start
+ command: su -s /bin/sh -c "/opt/onos/bin/onos 'externalportname-set -n eth2'"
+ when: inventory_hostname == groups['onos'][0]
+
- name: Set ONOS as the manager
command: su -s /bin/sh -c "ovs-vsctl set-manager tcp:{{ ip_settings[groups['onos'][0]]['mgmt']['ip'] }}:6640;"
+- name: create public network
+ shell: >
+ export OS_PASSWORD=console;
+ export OS_TENANT_NAME=admin;
+ export OS_AUTH_URL=http://{{ internal_vip.ip }}:35357/v2.0;
+ export OS_USERNAME=ADMIN;
+ neutron net-create ext-net --shared --router:external=True;
+ neutron subnet-create ext-net --name ext-subnet {{ ip_settings[haproxy_hosts.keys()[0]]['external']['cidr'] }};
+ when: inventory_hostname == groups['controller'][0]
+
+- name: set gateway mac address
+ shell: >
+ ping -c 1 {{ ansible_default_ipv4.gateway }};
+ gatewayMac=`arp -a {{ ansible_default_ipv4.gateway }} | awk '{print $4}'`;
+ /opt/onos/bin/onos "externalgateway-update -m $gatewayMac";
+ when: inventory_hostname == groups['onos'][0]
+
+- name: delete default gateway
+ command: su -s /bin/sh -c "route delete dufault";
+ ignore_errors: True
diff --git a/deploy/adapters/ansible/roles/onos_cluster/vars/main.yml b/deploy/adapters/ansible/roles/onos_cluster/vars/main.yml
index 66d81353..f083a89c 100755
--- a/deploy/adapters/ansible/roles/onos_cluster/vars/main.yml
+++ b/deploy/adapters/ansible/roles/onos_cluster/vars/main.yml
@@ -3,4 +3,4 @@ onos_pkg_name: onos-1.3.0.tar.gz
onos_home: /opt/onos/
karaf_dist: apache-karaf-3.0.3
jdk8_pkg_name: jdk-8u51-linux-x64.tar.gz
-onos_boot_features: config,standard,region,package,kar,ssh,management,webconsole,onos-api,onos-core,onos-incubator,onos-cli,onos-rest,onos-gui,onos-openflow,onos-ovsdatabase,onos-app-vtnrsc,onos-app-vtn,onos-app-vtnweb,onos-app-proxyarp
+onos_boot_features: config,standard,region,package,kar,ssh,management,webconsole,onos-api,onos-core,onos-incubator,onos-cli,onos-rest,onos-gui,onos-openflow-base,onos-openflow,onos-ovsdatabase, onos-app-vtn-onosfw
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-collector.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-collector.yml
index 02bc7f81..ba6ccbc8 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-collector.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-collector.yml
@@ -6,11 +6,11 @@
- name: "temporary disable supervisor analytics"
template:
# src: "templates/override.j2"
- src: "install/override.j2"
+ src: "../../templates/install/override.j2"
dest: "/etc/init/supervisor-analytics.override"
- name: "install contrail openstack analytics package"
# apt:
# name: "contrail-openstack-analytics"
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: collector_packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
+ with_items: collector_package
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-common.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-common.yml
index ea08e49b..e963e160 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-common.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-common.yml
@@ -2,16 +2,25 @@
#- hosts: all
# sudo: yes
# tasks:
-- name: "copy contrail install package temporary"
+#- name: "copy contrail install package temporary"
# sudo: True
- copy:
- src: "files/{{ package }}"
- dest: "/tmp/{{ package }}"
+# copy:
+# src: "{{ package }}"
+# dest: "/tmp/{{ package }}"
+
+- name: get image http server
+ shell: awk -F'=' '/compass_server/ {print $2}' /etc/compass.conf
+ register: http_server
+
+- name: download OpenContrail package file
+ get_url: url="http://{{ http_server.stdout_lines[0] }}/packages/open-contrail/{{ package }}" dest=/tmp/{{ package }}
+
- name: "install contrail install package"
# sudo: True
apt:
deb: "/tmp/{{ package }}"
+ force: yes
- name: "delete temporary contrail install package"
# sudo: True
@@ -48,7 +57,9 @@
# sudo: True
apt:
deb: "{{ item }}"
+ force: yes
with_items: required_packages.stdout_lines
+ ignore_errors: True
- name: modify source list
# sudo: True
@@ -82,3 +93,4 @@
apt:
name: "contrail-setup"
update_cache: yes
+ force: yes
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-compute.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-compute.yml
index 12b6ad28..d7f5c38b 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-compute.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-compute.yml
@@ -5,7 +5,7 @@
- name: "temporary disable supervisor vrouter"
# sudo: True
template:
- src: "install/override.j2"
+ src: "../../templates/install/override.j2"
dest: "/etc/init/supervisor-vrouter.override"
# - name: "install nova-compute for contrail package"
@@ -18,8 +18,8 @@
# name: "contrail-vrouter-3.13.0-40-generic"
# when: ansible_kernel == "3.13.0-40-generic"
# sudo: True
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: vrouter_packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
+ with_items: vrouter_package
when: ansible_kernel == kernel_required
- name: "install contrail vrouter dkms package"
@@ -27,8 +27,8 @@
# name: "contrail-vrouter-dkms"
# when: ansible_kernel != "3.13.0-40-generic"
# sudo: True
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: dkms_packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
+ with_items: dkms_package
when: ansible_kernel != kernel_required
# - name: "install contrail vrouter common package"
@@ -42,6 +42,6 @@
- name: "install contrail vrouter common & nova vif package"
# sudo: True
action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: compute_packages | union(compute_packages_noarch)
+ with_items: compute_package | union(compute_package_noarch)
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-config.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-config.yml
index 52459eb8..dbd542be 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-config.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-config.yml
@@ -6,19 +6,19 @@
# sudo: True
template:
# src: "templates/override.j2"
- src: "install/override.j2"
+ src: "../../templates/install/override.j2"
dest: "/etc/init/supervisor-config.override"
- name: "temporary disable neutron server"
# sudo: True
template:
# src: "templates/override.j2"
- src: "install/override.j2"
+ src: "../../templates/install/override.j2"
dest: "/etc/init/neutron-server.override"
- name: "install contrail openstack config package"
# sudo: True
# apt:
# name: "contrail-openstack-config"
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: config_packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
+ with_items: config_package
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-control.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-control.yml
index 6bb7fb25..51a713ec 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-control.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-control.yml
@@ -6,19 +6,19 @@
# sudo: True
template:
# src: "templates/override.j2"
- src: "install/override.j2"
+ src: "../../templates/install/override.j2"
dest: "/etc/init/supervisor-control.override"
- name: "temporary disable supervisor dns"
# sudo: True
template:
# src: "templates/override.j2"
- src: "install/override.j2"
+ src: "../../templates/install/override.j2"
dest: "/etc/init/supervisor-dns.override"
- name: "install contrail openstack control package"
# sudo: True
# apt:
# name: "contrail-openstack-control"
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: control_packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
+ with_items: control_package
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-database.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-database.yml
index fea4cef7..5b4875a7 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-database.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-database.yml
@@ -6,12 +6,12 @@
# sudo: True
template:
# src: "templates/override.j2"
- src: "install/override.j2"
+ src: "../../templates/install/override.j2"
dest: "/etc/init/supervisor-database.override"
- name: "install contrail openstack database package"
# sudo: True
# apt:
# name: "contrail-openstack-database"
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: database_packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
+ with_items: database_package
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-kernel.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-kernel.yml
index 6d4ca035..ed638b6a 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-kernel.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-kernel.yml
@@ -2,6 +2,7 @@
#- hosts: all
# sudo: yes
# tasks:
+
- name: "install Ubuntu kernel"
# sudo: True
# apt:
@@ -10,8 +11,8 @@
# name: "linux-image-3.13.0-40-generic"
# name: "linux-image-extra-3.13.0-40-generic"
# when: (kernel_install) and (ansible_kernel != "3.13.0-40-generic")
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: kernel_packages | union(kernel_packages_noarch)
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
+ with_items: kernel_package | union(kernel_package_noarch)
when: (kernel_install) and (ansible_kernel != kernel_required)
- name: "setup grub"
@@ -39,7 +40,7 @@
# when: (kernel_install) and (ansible_kernel != "3.13.0-40-generic")
when: (kernel_install) and (ansible_kernel != kernel_required)
-handlers:
+# handlers:
- name: "Wait for server to come back"
local_action:
module: wait_for
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-webui.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-webui.yml
index d66af675..665f2be2 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-webui.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/install/install-webui.yml
@@ -7,12 +7,12 @@
# sudo: True
template:
# src: "templates/override.j2"
- src: "install/override.j2"
+ src: "../../templates/install/override.j2"
dest: "/etc/init/supervisor-webui.override"
- name: "install contrail openstack webui package"
# sudo: True
# apt:
# name: "contrail-openstack-webui"
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items: webui_packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
+ with_items: webui_package
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/main.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/main.yml
index 2a0e2709..93f22117 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/main.yml
@@ -33,7 +33,8 @@
- name: Install compute for Open Contrail
include: install/install-compute.yml
- when: inventory_hostname in groups['opencontrail_compute'] or inventory_hostname in groups['opencontrail_tsn']
+ when: inventory_hostname in groups['opencontrail_compute']
+# or inventory_hostname in groups['opencontrail_tsn']
# Compass adapter: use OpenStack management network "mgmt"
@@ -52,9 +53,9 @@
#- include: install/install-interface.yml
-- name: Provision route on all hosts for Open Contrail
- include: provision/provision-route.yml
- when: groups['opencontrail_control']|length !=0
+#- name: Provision route on all hosts for Open Contrail
+# include: provision/provision-route.yml
+# when: groups['opencontrail_control']|length !=0
#- name: Provision rabbitmq on config for Open Contrail
@@ -64,7 +65,8 @@
- name: Provision increase limits for Open Contrail
include: provision/provision-increase-limits.yml
- when: inventory_hostname in groups['opencontrail_control'] or inventory_hostname in groups['opencontrail_config'] or inventory_hostname in groups['opencontrail_collector'] or inventory_hostname in groups['opencontrail_database']
+ when: inventory_hostname in groups['opencontrail_control']
+#or inventory_hostname in groups['opencontrail_config'] or inventory_hostname in groups['opencontrail_collector'] or inventory_hostname in groups['opencontrail_database']
- name: Provision database for Open Contrail
@@ -76,7 +78,6 @@
include: provision/provision-config.yml
when: inventory_hostname in groups['opencontrail_config']
-
- name: Provision control for Open Contrail
include: provision/provision-control.yml
when: inventory_hostname in groups['opencontrail_control']
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/-vrouter-compute-setup.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/-vrouter-compute-setup.yml
index 77ee20e3..f53e38ec 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/-vrouter-compute-setup.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/-vrouter-compute-setup.yml
@@ -31,21 +31,21 @@
- { section: "DEFAULT", option: "auth_strategy", value: "keystone" }
- { section: "DEFAULT", option: "libvirt_nonblocking", value: "True" }
- { section: "DEFAULT", option: "libvirt_inject_partition", value: "-1" }
- - { section: "DEFAULT", option: "rabbit_host", value: "{{ hostvars[groups['config'][0]]['contrail_address'] }}" }
+ - { section: "DEFAULT", option: "rabbit_host", value: "{{ contrail_haproxy_address }}" }
- { section: "DEFAULT", option: "rabbit_port", value: "5672" }
- - { section: "DEFAULT", option: "glance_host", value: "{{ hostvars[groups['openstack'][0]]['contrail_address'] }}" }
+ - { section: "DEFAULT", option: "glance_host", value: "{{ contrail_haproxy_address }}" }
- { section: "DEFAULT", option: "glance_port", value: "9292" }
- { section: "DEFAULT", option: "neutron_admin_tenant_name", value: "service" }
- { section: "DEFAULT", option: "neutron_admin_username", value: "neutron" }
- { section: "DEFAULT", option: "neutron_admin_password", value: "{{ contrail_admin_password }}" }
- - { section: "DEFAULT", option: "neutron_admin_auth_url", value: "http://{{ hostvars[groups['openstack'][0]]['contrail_address'] }}:35357/v2.0/" }
- - { section: "DEFAULT", option: "neutron_url", value: "http://{{ hostvars[groups['config'][0]]['contrail_address'] }}:9696/" }
+ - { section: "DEFAULT", option: "neutron_admin_auth_url", value: "http://{{ contrail_haproxy_address }}:35357/v2.0/" }
+ - { section: "DEFAULT", option: "neutron_url", value: "http://{{ contrail_haproxy_address }}:9696/" }
- { section: "DEFAULT", option: "neutron_url_timeout", value: "300" }
- { section: "DEFAULT", option: "network_api_class", value: "nova.network.neutronv2.api.API" }
- { section: "DEFAULT", option: "compute_driver", value: "libvirt.LibvirtDriver" }
- { section: "DEFAULT", option: "network_api_class", value: " nova_contrail_vif.contrailvif.ContrailNetworkAPI" }
- { section: "DEFAULT", option: "ec2_private_dns_show_ip", value: "False" }
- - { section: "DEFAULT", option: "novncproxy_base_url", value: "http://{{ hostvars[groups['openstack'][0]]['contrail_mgmt_address'] }}:5999/vnc_auto.html" }
+ - { section: "DEFAULT", option: "novncproxy_base_url", value: "http://{{ contrail_haproxy_address }}:5999/vnc_auto.html" }
- { section: "DEFAULT", option: "vncserver_enabled", value: "True" }
- { section: "DEFAULT", option: "vncserver_listen", value: "{{ contrail_address }}" }
- { section: "DEFAULT", option: "vncserver_proxyclient_address", value: "{{ contrail_address }}" }
@@ -54,16 +54,28 @@
- { section: "DEFAULT", option: "image_cache_manager_interval", value: "0" }
- { section: "DEFAULT", option: "libvirt_cpu_mode", value: "none" }
- { section: "DEFAULT", option: "libvirt_vif_driver", value: "nova_contrail_vif.contrailvif.VRouterVIFDriver" }
- - { section: "database", option: "connection", value: "mysql://nova:nova@{{ hostvars[groups['openstack'][0]]['contrail_address'] }}/nova?charset=utf8" }
+ - { section: "database", option: "connection", value: "mysql://nova:nova@{{ contrail_haproxy_address }}/nova?charset=utf8" }
- { section: "database", option: "idle_timeout", value: "180" }
- { section: "database", option: "max_retries", value: "-1" }
- { section: "keystone_authtoken", option: "admin_tenant_name", value: "service" }
- { section: "keystone_authtoken", option: "admin_user", value: "nova" }
- { section: "keystone_authtoken", option: "admin_password", value: "{{ contrail_admin_password }}" }
- { section: "keystone_authtoken", option: "auth_protocol", value: "http" }
- - { section: "keystone_authtoken", option: "auth_host", value: "{{ hostvars[groups['openstack'][0]]['contrail_address'] }}" }
+ - { section: "keystone_authtoken", option: "auth_host", value: "{{ contrail_haproxy_address }}" }
- { section: "keystone_authtoken", option: "signing_dir", value: "/tmp/keystone-signing-nova" }
+
+
+#- { section: "DEFAULT", option: "rabbit_host", value: "{{ hostvars[groups['config'][0]]['contrail_address'] }}" }
+#- { section: "DEFAULT", option: "glance_host", value: "{{ hostvars[groups['openstack'][0]]['contrail_address'] }}" }
+#- { section: "DEFAULT", option: "neutron_admin_auth_url", value: "http://{{ hostvars[groups['openstack'][0]]['contrail_address'] }}:35357/v2.0/" }
+#- { section: "DEFAULT", option: "neutron_url", value: "http://{{ hostvars[groups['config'][0]]['contrail_address'] }}:9696/" }
+#- { section: "DEFAULT", option: "novncproxy_base_url", value: "http://{{ hostvars[groups['openstack'][0]]['contrail_mgmt_address'] }}:5999/vnc_auto.html" }
+#- { section: "database", option: "connection", value: "mysql://nova:nova@{{ hostvars[groups['openstack'][0]]['contrail_address'] }}/nova?charset=utf8" }
+#- { section: "keystone_authtoken", option: "auth_host", value: "{{ hostvars[groups['openstack'][0]]['contrail_address'] }}" }
+
+
+
- name: "change database address if same node as first openstack node"
ini_file:
dest: "/etc/nova/nova.conf"
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-add-nodes.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-add-nodes.yml
index f4ad05cb..b8c07ddd 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-add-nodes.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-add-nodes.yml
@@ -44,4 +44,4 @@
shell: "python /opt/contrail/utils/provision_encap.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --oper add --encap_priority MPLSoUDP,MPLSoGRE,VXLAN"
run_once: yes
when: inventory_hostname in groups['opencontrail_config']
- \ No newline at end of file
+
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-collector.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-collector.yml
index 5dd72d77..4077ba64 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-collector.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-collector.yml
@@ -19,19 +19,19 @@
- name: "fix up contrail collector config"
template:
- src: "provision/contrail-collector-conf.j2"
+ src: "../../templates/provision/contrail-collector-conf.j2"
dest: "/etc/contrail/contrail-collector.conf"
- name: "fix up contrail query engine config"
template:
- src: "provision/contrail-query-engine-conf.j2"
+ src: "../../templates/provision/contrail-query-engine-conf.j2"
dest: "/etc/contrail/contrail-query-engine.conf"
- name: "fix up contrail analytics api config"
template:
- src: "provision/contrail-analytics-api-conf.j2"
+ src: "../../templates/provision/contrail-analytics-api-conf.j2"
dest: "/etc/contrail/contrail-analytics-api.conf"
@@ -45,7 +45,7 @@
- name: "fix up contrail keystone auth config"
template:
- src: "provision/contrail-keystone-auth-conf.j2"
+ src: "../../templates/provision/contrail-keystone-auth-conf.j2"
dest: "/etc/contrail/contrail-keystone-auth.conf"
force: no
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-compute.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-compute.yml
index 41ea5c25..3e3d6622 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-compute.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-compute.yml
@@ -17,7 +17,7 @@
- name: "create cgroup device acl for qemu conf"
template:
- src: "provision/qemu-device-acl-conf.j2"
+ src: "../../templates/provision/qemu-device-acl-conf.j2"
dest: "/tmp/qemu-device-acl.conf"
when: deviceacl | failed
@@ -33,7 +33,7 @@
- name: "fix up vrouter nodemgr param"
template:
- src: "provision/vrouter-nodemgr-param.j2"
+ src: "../../templates/provision/vrouter-nodemgr-param.j2"
dest: "/etc/contrail/vrouter_nodemgr_param"
- name: "set contrail device name for ansible"
@@ -42,7 +42,7 @@
- name: "fix up default pmac"
template:
- src: "provision/default-pmac.j2"
+ src: "../../templates/provision/default-pmac.j2"
dest: "/etc/contrail/default_pmac"
- name: "copy agent param config from template"
@@ -56,7 +56,7 @@
- name: "fix up contrail vrouter agent config"
template:
- src: "provision/contrail-vrouter-agent-conf.j2"
+ src: "../../templates/provision/contrail-vrouter-agent-conf.j2"
dest: "/etc/contrail/contrail-vrouter-agent.conf"
- name: "delete lines for contrail interface"
@@ -77,22 +77,22 @@
- "sed -i -n -e '/auto .*/,$p' /tmp/contrail-interfaces-bottom"
- "cat /tmp/contrail-interfaces-top /tmp/contrail-interfaces-bottom > /etc/network/interfaces"
-- name: "configure interface"
- lineinfile:
- dest: "/etc/network/interfaces"
- line: "{{ item }}"
- state: "present"
- with_items:
- - "auto {{ contrail_device }}"
- - "iface {{ contrail_device }} inet manual"
- - "\tpre-up ifconfig {{ contrail_device }} up"
- - "\tpost-down ifconfig {{ contrail_device }} down"
- - "auto vhost0"
- - "iface vhost0 inet static"
- - "\tpre-up /opt/contrail/bin/if-vhost0"
- - "\tnetwork_name application"
- - "\taddress {{ contrail_address }}"
- - "\tnetmask {{ contrail_netmask }}"
+#- name: "configure interface"
+# lineinfile:
+# dest: "/etc/network/interfaces"
+# line: "{{ item }}"
+# state: "present"
+# with_items:
+# - "auto {{ contrail_device }}"
+# - "iface {{ contrail_device }} inet manual"
+# - "\tpre-up ifconfig {{ contrail_device }} up"
+# - "\tpost-down ifconfig {{ contrail_device }} down"
+# - "auto vhost0"
+# - "iface vhost0 inet static"
+# - "\tpre-up /opt/contrail/bin/if-vhost0"
+# - "\tnetwork_name application"
+# - "\taddress {{ contrail_address }}"
+# - "\tnetmask {{ contrail_netmask }}"
- name: "delete temporary files"
file:
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-config.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-config.yml
index 8aa8f43b..cef5bbff 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-config.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-config.yml
@@ -35,7 +35,8 @@
- name: "create haproxy configuration for contrail"
template:
- src: "provision/haproxy-contrail-cfg.j2"
+# src: "provision/haproxy-contrail-cfg.j2"
+ src: "../../templates/provision/haproxy-contrail-cfg.j2"
dest: "/tmp/haproxy-contrail.cfg"
- name: "combination of the haproxy configuration"
@@ -54,7 +55,7 @@
# Compass is using this
#- name: "create keepalived configuration"
# template:
-# src: "provision/keepalived-conf.j2"
+# src: "../../templates/provision/keepalived-conf.j2"
# dest: "/etc/keepalived/keepalived.conf"
# with_indexed_items: groups['opencontrail_config']
# when: contrail_keepalived and item.1 == inventory_hostname
@@ -70,37 +71,37 @@
- name: "fix up contrail keystone auth config"
template:
- src: "provision/contrail-keystone-auth-conf.j2"
+ src: "../../templates/provision/contrail-keystone-auth-conf.j2"
dest: "/etc/contrail/contrail-keystone-auth.conf"
- name: "fix up ifmap server log4j properties"
template:
- src: "provision/ifmap-log4j-properties.j2"
+ src: "../../templates/provision/ifmap-log4j-properties.j2"
dest: "/etc/ifmap-server/log4j.properties"
- name: "fix up ifmap server authorization properties"
template:
- src: "provision/ifmap-authorization-properties.j2"
+ src: "../../templates/provision/ifmap-authorization-properties.j2"
dest: "/etc/ifmap-server/authorization.properties"
- name: "fix up ifmap server basicauthusers properties"
template:
- src: "provision/ifmap-basicauthusers-properties.j2"
+ src: "../../templates/provision/ifmap-basicauthusers-properties.j2"
dest: "/etc/ifmap-server/basicauthusers.properties"
- name: "fix up ifmap server publisher properties"
template:
- src: "provision/ifmap-publisher-properties.j2"
+ src: "../../templates/provision/ifmap-publisher-properties.j2"
dest: "/etc/ifmap-server/publisher.properties"
- name: "fix up contrail api config"
template:
- src: "provision/contrail-api-conf.j2"
+ src: "../../templates/provision/contrail-api-conf.j2"
dest: "/etc/contrail/contrail-api.conf"
- name: "fix up contrail api supervisord config"
template:
- src: "provision/contrail-api-supervisord-conf.j2"
+ src: "../../templates/provision/contrail-api-supervisord-conf.j2"
dest: "/etc/contrail/supervisord_config_files/contrail-api.ini"
- name: "modify contrail api init script"
@@ -111,27 +112,27 @@
- name: "fix up contrail schema config"
template:
- src: "provision/contrail-schema-conf.j2"
+ src: "../../templates/provision/contrail-schema-conf.j2"
dest: "/etc/contrail/contrail-schema.conf"
- name: "fix up contrail device manager config"
template:
- src: "provision/contrail-device-manager-conf.j2"
+ src: "../../templates/provision/contrail-device-manager-conf.j2"
dest: "/etc/contrail/contrail-device-manager.conf"
- name: "fix up contrail svc monitor config"
template:
- src: "provision/contrail-svc-monitor-conf.j2"
+ src: "../../templates/provision/contrail-svc-monitor-conf.j2"
dest: "/etc/contrail/contrail-svc-monitor.conf"
- name: "fix up contrail discovery supervisord config"
template:
- src: "provision/contrail-discovery-supervisord-conf.j2"
+ src: "../../templates/provision/contrail-discovery-supervisord-conf.j2"
dest: "/etc/contrail/supervisord_config_files/contrail-discovery.ini"
- name: "fix up contrail discovery config"
template:
- src: "provision/contrail-discovery-conf.j2"
+ src: "../../templates/provision/contrail-discovery-conf.j2"
dest: "/etc/contrail/contrail-discovery.conf"
- name: "modify contrail discovery init script"
@@ -142,7 +143,7 @@
- name: "fix up contrail vnc api library config"
template:
- src: "provision/contrail-vnc-api-lib-ini.j2"
+ src: "../../templates/provision/contrail-vnc-api-lib-ini.j2"
dest: "/etc/contrail/vnc_api_lib.ini"
- name: "fix up contrail config nodemgr config"
@@ -154,7 +155,7 @@
- name: "fix up contrail sudoers"
template:
- src: "provision/contrail-sudoers.j2"
+ src: "../../templates/provision/contrail-sudoers.j2"
dest: "/etc/sudoers.d/contrail_sudoers"
mode: 0440
@@ -165,7 +166,7 @@
- name: "fix up contrail plugin for nuetron"
template:
- src: "provision/neutron-contrail-plugin-ini.j2"
+ src: "../../templates/provision/neutron-contrail-plugin-ini.j2"
dest: "/etc/neutron/plugins/opencontrail/ContrailPlugin.ini"
- name: "modify neutron server configuration"
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-control.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-control.yml
index e36d8f22..3da783e1 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-control.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-control.yml
@@ -15,13 +15,15 @@
- name: "modify ifmap server basicauthusers properties for control"
lineinfile:
dest: "/etc/ifmap-server/basicauthusers.properties"
- line: "{{ hostvars[item]['contrail_address' ] }}:{{ hostvars[item]['contrail_address' ] }}"
+# line: "{{ hostvars[item]['contrail_address' ] }}:{{ hostvars[item]['contrail_address' ] }}"
+ line: "{{ haproxy_hosts[item] }}:{{ haproxy_hosts[item] }}"
with_items: groups['opencontrail_control']
- name: "modify ifmap server basicauthusers properties for dns"
lineinfile:
dest: "/etc/ifmap-server/basicauthusers.properties"
- line: "{{ hostvars[item]['contrail_address' ] }}.dns:{{ hostvars[item]['contrail_address' ] }}.dns"
+# line: "{{ hostvars[item]['contrail_address' ] }}.dns:{{ hostvars[item]['contrail_address' ] }}.dns"
+ line: "{{ haproxy_hosts[item] }}.dns:{{ haproxy_hosts[item] }}.dns"
with_items: groups['opencontrail_control']
- name: "node-common"
@@ -29,12 +31,12 @@
- name: "fix up contrail control config"
template:
- src: "provision/contrail-control-conf.j2"
+ src: "../../templates/provision/contrail-control-conf.j2"
dest: "/etc/contrail/contrail-control.conf"
- name: "fix up contrail dns config"
template:
- src: "provision/contrail-dns-conf.j2"
+ src: "../../templates/provision/contrail-dns-conf.j2"
dest: "/etc/contrail/contrail-dns.conf"
- name: "fix up contrail control nodemgr config"
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-database.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-database.yml
index 6807d7d4..b20491b5 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-database.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-database.yml
@@ -20,8 +20,10 @@
# sudo: True
lineinfile:
dest: "/etc/hosts"
- regexp: "^{{ contrail_address }}\t{{ ansible_hostname }}( .*)?$"
- line: "{{ contrail_address }}\t{{ ansible_hostname }}\\1"
+# regexp: "^{{ contrail_address }}\t{{ ansible_hostname }}( .*)?$"
+# line: "{{ contrail_address }}\t{{ ansible_hostname }}\\1"
+ regexp: "^{{ contrail_address }}\t{{ inventory_hostname }}( .*)?$"
+ line: "{{ contrail_address }}\t{{ inventory_hostname }}\\1"
backrefs: yes
@@ -46,19 +48,25 @@
- { regexp: "^(#(\\s*)?)?initial_token:", line: "# initial_token:" }
+
- name: "set first database host seed"
# sudo: True
set_fact:
- dbseeds: "{{ hostvars[item.1]['contrail_address'] }}"
- with_indexed_items: groups['database']
+# dbseeds: "{{ hostvars[item.1][ contrail_address ] }}"
+ dbseeds: "{{ haproxy_hosts[item.1] }}"
+ with_indexed_items: groups['opencontrail_database']
when: item.0 == 0
+
+
+
- name: "set second database host seed"
# sudo: True
set_fact:
- dbseeds: "{{ dbseeds }},{{ hostvars[item.1]['contrail_address'] }}"
- with_indexed_items: groups['database']
+# dbseeds: "{{ dbseeds }},{{ hostvars[item.1]['contrail_address'] }}"
+ dbseeds: "{{ dbseeds }},{{ haproxy_hosts[item.1] }}"
+ with_indexed_items: groups['opencontrail_database']
when: item.0 == 1
@@ -112,16 +120,17 @@
lineinfile:
dest: "/etc/zookeeper/conf/zoo.cfg"
regexp: "server.{{ item.0 + 1 }}="
- line: "server.{{ item.0 + 1 }}={{ hostvars[item.1]['contrail_address'] }}:2888:3888"
- with_indexed_items: groups['database']
+# line: "server.{{ item.0 + 1 }}={{ hostvars[item.1]['contrail_address'] }}:2888:3888"
+ line: "server.{{ item.0 + 1 }}={{ haproxy_hosts[item.1] }}:2888:3888"
+ with_indexed_items: groups['opencontrail_database']
- name: "set zookeeper unique id"
# sudo: True
template:
- src: "templates/zookeeper-unique-id.j2"
+ src: "../../templates/provision/zookeeper-unique-id.j2"
dest: "/var/lib/zookeeper/myid"
- with_indexed_items: groups['database']
+ with_indexed_items: groups['opencontrail_database']
when: item.1 == inventory_hostname
@@ -135,16 +144,18 @@
- name: "set first zookeeper host address"
# sudo: True
set_fact:
- zkaddrs: "{{ hostvars[item.1]['contrail_address'] }}:2181"
- with_indexed_items: groups['database']
+# zkaddrs: "{{ hostvars[item.1]['contrail_address'] }}:2181"
+ zkaddrs: "{{ haproxy_hosts[item.1] }}:2181"
+ with_indexed_items: groups['opencontrail_database']
when: item.0 == 0
- name: "set second or more zookeeper host addresses"
# sudo: True
set_fact:
- zkaddrs: "{{ zkaddrs }},{{ hostvars[item.1]['contrail_address'] }}:2181"
- with_indexed_items: groups['database']
+# zkaddrs: "{{ zkaddrs }},{{ hostvars[item.1]['contrail_address'] }}:2181"
+ zkaddrs: "{{ zkaddrs }},{{ haproxy_hosts[item.1] }}:2181"
+ with_indexed_items: groups['opencontrail_database']
when: item.0 > 0
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-rabbitmq.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-rabbitmq.yml
index b2785d8f..58037bce 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-rabbitmq.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-rabbitmq.yml
@@ -29,7 +29,8 @@
# sudo: True
lineinfile:
dest: "/etc/hosts"
- line: "{{ hostvars[item]['contrail_address'] }}\t{{ hostvars[item]['ansible_hostname'] }} {{ hostvars[item]['ansible_hostname'] }}-ctrl"
+# line: "{{ hostvars[item]['contrail_address'] }}\t{{ hostvars[item]['ansible_hostname'] }} {{ hostvars[item]['ansible_hostname'] }}-ctrl"
+ line: "{{ contrail_address }}\t{{ inventory_hostname }} {{ inventory_hostname }}-ctrl"
with_items: groups['opencontrail_config']
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-toragent.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-toragent.yml
index 87810732..78b01e3f 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-toragent.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-toragent.yml
@@ -69,9 +69,9 @@
run_once: yes
- name: "add tor agent to contrail"
- shell: "python /opt/contrail/utils/provision_vrouter.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --openstack_ip {{ contrail_keystone_address }} --oper add --host_name {{ ansible_hostname }}-{{ item.ansible_facts.toragent_index }} --host_ip {{ contrail_address }} --router_type tor-agent"
+ shell: "python /opt/contrail/utils/provision_vrouter.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --openstack_ip {{ contrail_keystone_address }} --oper add --host_name {{ inventory_hostname }}-{{ item.ansible_facts.toragent_index }} --host_ip {{ contrail_address }} --router_type tor-agent"
with_items: contrail_toragent_list.results
- name: "add device to contrail"
- shell: "python /opt/contrail/utils/provision_physical_device.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --openstack_ip {{ contrail_keystone_address }} --oper add --device_name {{ item.ansible_facts.toragent_params.name }} --vendor_name {{ item.ansible_facts.toragent_params.vendor_name }} --product_name {{ item.ansible_facts.toragent_params.product_name }} --device_mgmt_ip {{ item.ansible_facts.toragent_params.address }} --device_tunnel_ip {{ item.ansible_facts.toragent_params.tunnel_address }} --device_tor_agent {{ ansible_hostname }}-{{ item.ansible_facts.toragent_index }} --device_tsn {{ ansible_hostname }}"
+ shell: "python /opt/contrail/utils/provision_physical_device.py --api_server_ip {{ contrail_haproxy_address }} --admin_user {{ contrail_admin_user }} --admin_password {{ contrail_admin_password }} --admin_tenant_name admin --openstack_ip {{ contrail_keystone_address }} --oper add --device_name {{ item.ansible_facts.toragent_params.name }} --vendor_name {{ item.ansible_facts.toragent_params.vendor_name }} --product_name {{ item.ansible_facts.toragent_params.product_name }} --device_mgmt_ip {{ item.ansible_facts.toragent_params.address }} --device_tunnel_ip {{ item.ansible_facts.toragent_params.tunnel_address }} --device_tor_agent {{ inventory_hostname }}-{{ item.ansible_facts.toragent_index }} --device_tsn {{ inventory_hostname }}"
with_items: contrail_toragent_list.results
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-tsn.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-tsn.yml
index 058be18a..33ad6507 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-tsn.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-tsn.yml
@@ -39,7 +39,7 @@
- name: "fix up contrail vrouter agent config"
template:
- src: "provision/contrail-vrouter-agent-conf.j2"
+ src: "../../templates/provision/contrail-vrouter-agent-conf.j2"
dest: "/etc/contrail/contrail-vrouter-agent.conf"
- name: "delete lines for contrail interface"
diff --git a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-webui.yml b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-webui.yml
index eb6301b2..515b10e9 100755
--- a/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-webui.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/tasks/provision/provision-webui.yml
@@ -16,13 +16,15 @@
- name: "set first cassandra host address"
set_fact:
- cassandra_addrs: "'{{ hostvars[item.1]['contrail_address'] }}'"
+# cassandra_addrs: "'{{ hostvars[item.1]['contrail_address'] }}'"
+ cassandra_addrs: "'{{ haproxy_hosts[item] }}'"
with_indexed_items: groups['opencontrail_database']
when: item.0 == 0
- name: "set second or more cassandra host addresses"
set_fact:
- cassandra_addrs: "{{ cassandra_addrs }}, '{{ hostvars[item.1]['contrail_address'] }}'"
+# cassandra_addrs: "{{ cassandra_addrs }}, '{{ hostvars[item.1]['contrail_address'] }}'"
+ cassandra_addrs: "{{ cassandra_addrs }}, '{{ haproxy_hosts[item] }}'"
with_indexed_items: groups['opencontrail_database']
when: item.0 > 0
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-analytics-api-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-analytics-api-conf.j2
index 18192f19..6e2bcce3 100755
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-analytics-api-conf.j2
+++ b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-analytics-api-conf.j2
@@ -1,8 +1,10 @@
[DEFAULTS]
host_ip = {{ contrail_address }}
-rest_api_ip = 0.0.0.0
+rest_api_ip = {{ contrail_haproxy_address }}
rest_api_port = 9081
-cassandra_server_list = {% for cur_host in groups['opencontrail_database'] %}{{ hostvars[cur_host]['contrail_address'] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
+#cassandra_server_list = {% for cur_host in groups['opencontrail_database'] %}{{ ip_settings[cur_host]['mgmt']['ip'] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
+#chenshuai: This kind of written is also correct, but the following is better, this as record
+cassandra_server_list = {% for cur_host in groups['opencontrail_database'] %}{{ haproxy_hosts[cur_host] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
collectors = {{ contrail_address }}:8086
http_server_port = 8090
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-api-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-api-conf.j2
index 1eefacfb..e4acf3ea 100755
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-api-conf.j2
+++ b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-api-conf.j2
@@ -1,13 +1,13 @@
[DEFAULTS]
-listen_ip_addr = 0.0.0.0
+listen_ip_addr = {{ contrail_haproxy_address }}
listen_port = 8082
-ifmap_server_ip = {{ contrail_address }}
+ifmap_server_ip = {{ contrail_haproxy_address }}
ifmap_server_port = 8443
ifmap_username = api-server
ifmap_password = api-server
-zk_server_ip = {% for cur_host in groups['opencontrail_database'] %}{{ hostvars[cur_host]['contrail_address'] }}:2181{% if not loop.last %}, {% endif %}{% endfor %}
+zk_server_ip = {% for cur_host in groups['opencontrail_database'] %}{{ haproxy_hosts[cur_host] }}:2181{% if not loop.last %}, {% endif %}{% endfor %}
-cassandra_server_list = {% for cur_host in groups['opencontrail_database'] %}{{ hostvars[cur_host]['contrail_address'] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
+cassandra_server_list = {% for cur_host in groups['opencontrail_database'] %}{{ haproxy_hosts[cur_host] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
disc_server_ip = {{ contrail_haproxy_address }}
disc_server_port = 5998
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-collector-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-collector-conf.j2
index e6242346..da5cf2ab 100755
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-collector-conf.j2
+++ b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-collector-conf.j2
@@ -13,7 +13,7 @@ analytics_flow_ttl = -1
# IP address and port to be used to connect to cassandra.
# Multiple IP:port strings separated by space can be provided
-cassandra_server_list = {% for cur_host in groups['opencontrail_database'] %}{{ hostvars[cur_host]['contrail_address'] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
+cassandra_server_list = {% for cur_host in groups['opencontrail_database'] %}{{ haproxy_hosts[cur_host] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
# IP address and port to be used to connect to kafka.
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-device-manager-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-device-manager-conf.j2
index 77bcc95f..7ab29f6f 100755
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-device-manager-conf.j2
+++ b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-device-manager-conf.j2
@@ -1,9 +1,9 @@
[DEFAULTS]
api_server_ip = {{ contrail_haproxy_address }}
api_server_port = 8082
-zk_server_ip = {% for cur_host in groups['opencontrail_database'] %}{{ hostvars[cur_host]['contrail_address'] }}:2181{% if not loop.last %}, {% endif %}{% endfor %}
+zk_server_ip = {% for cur_host in groups['opencontrail_database'] %}{{ haproxy_hosts[cur_host] }}:2181{% if not loop.last %}, {% endif %}{% endfor %}
-cassandra_server_list = {% for cur_host in groups['opencontrail_database'] %}{{ hostvars[cur_host]['contrail_address'] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
+cassandra_server_list = {% for cur_host in groups['opencontrail_database'] %}{{ haproxy_hosts[cur_host] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
disc_server_ip = {{ contrail_haproxy_address }}
disc_server_port = 5998
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-discovery-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-discovery-conf.j2
index 84e6317f..509b3569 100755
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-discovery-conf.j2
+++ b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-discovery-conf.j2
@@ -1,10 +1,10 @@
[DEFAULTS]
-listen_ip_addr = 0.0.0.0
+listen_ip_addr = {{ contrail_haproxy_address }}
listen_port = 5998
-zk_server_ip = {% for cur_host in groups['opencontrail_database'] %}{{ hostvars[cur_host]['contrail_address'] }}{% if not loop.last %}, {% endif %}{% endfor %}
+zk_server_ip = {% for cur_host in groups['opencontrail_database'] %}{{ haproxy_hosts[cur_host] }}{% if not loop.last %}, {% endif %}{% endfor %}
zk_server_port = 2181
-cassandra_server_list = {% for cur_host in groups['opencontrail_database'] %}{{ hostvars[cur_host]['contrail_address'] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
+cassandra_server_list = {% for cur_host in groups['opencontrail_database'] %}{{ haproxy_hosts[cur_host] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
log_file = /var/log/contrail/contrail-discovery.log
log_level = SYS_NOTICE
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-dns-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-dns-conf.j2
index 0a2ab433..9d415563 100755
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-dns-conf.j2
+++ b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-dns-conf.j2
@@ -1,6 +1,6 @@
[DEFAULT]
hostip = {{ contrail_address }}
-hostname = {{ ansible_hostname }}
+hostname = {{ inventory_hostname }}
log_file = /var/log/contrail/contrail-dns.log
log_level = SYS_NOTICE
log_local = 1
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-query-engine-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-query-engine-conf.j2
index e051b7ec..7564d26d 100755
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-query-engine-conf.j2
+++ b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-query-engine-conf.j2
@@ -1,8 +1,9 @@
[DEFAULT]
hostip = {{ contrail_address }}
-cassandra_server_list = {% for cur_host in groups['opencontrail_database'] %}{{ hostvars[cur_host]['contrail_address'] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
+#cassandra_server_list = {% for cur_host in groups['opencontrail_database'] %}{{ ip_settings[cur_host]['mgmt']['ip'] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
+cassandra_server_list = {% for cur_host in groups['opencontrail_database'] %}{{ haproxy_hosts[cur_host] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
-collectors = {{ contrail_address }}:8086
+collectors = {{ contrail_haproxy_address }}:8086
http_server_port = 8091
log_file = /var/log/contrail/contrail-query-engine.log
log_level = SYS_NOTICE
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-schema-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-schema-conf.j2
index 2bb4ab79..cf5dead3 100755
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-schema-conf.j2
+++ b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-schema-conf.j2
@@ -1,13 +1,15 @@
[DEFAULTS]
-ifmap_server_ip = {{ hostvars[groups['opencontrail_config'][0]]['contrail_address'] }}
+#ifmap_server_ip = {{ ip_settings[haproxy_hosts.keys()[0]]['mgmt']['ip'] }}
+ifmap_server_ip = {{ haproxy_hosts.values()[0] }}
ifmap_server_port = 8443
ifmap_username = schema-transformer
ifmap_password = schema-transformer
-api_server_ip = {{ hostvars[groups['opencontrail_config'][0]]['contrail_address'] }}
+#api_server_ip = {{ ip_settings[haproxy_hosts.keys()[0]]['mgmt']['ip'] }}
+api_server_ip = {{ haproxy_hosts.values()[0] }}
api_server_port = 8082
-zk_server_ip = {% for cur_host in groups['opencontrail_database'] %}{{ hostvars[cur_host]['contrail_address'] }}:2181{% if not loop.last %}, {% endif %}{% endfor %}
+zk_server_ip = {% for cur_host in groups['opencontrail_database'] %}{{ haproxy_hosts[cur_host] }}:2181{% if not loop.last %}, {% endif %}{% endfor %}
-cassandra_server_list = {% for cur_host in groups['opencontrail_database'] %}{{ hostvars[cur_host]['contrail_address'] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
+cassandra_server_list = {% for cur_host in groups['opencontrail_database'] %}{{ haproxy_hosts[cur_host] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
disc_server_ip = {{ contrail_haproxy_address }}
disc_server_port = 5998
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-svc-monitor-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-svc-monitor-conf.j2
index 4b4221d7..6ad5ec2d 100755
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-svc-monitor-conf.j2
+++ b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-svc-monitor-conf.j2
@@ -1,13 +1,13 @@
[DEFAULTS]
-ifmap_server_ip = {{ contrail_address }}
+ifmap_server_ip = {{ contrail_haproxy_address }}
ifmap_server_port = 8443
ifmap_username = svc-monitor
ifmap_password = svc-monitor
api_server_ip = {{ contrail_haproxy_address }}
api_server_port = 8082
-zk_server_ip = {% for cur_host in groups['opencontrail_database'] %}{{ hostvars[cur_host]['contrail_address'] }}:2181{% if not loop.last %}, {% endif %}{% endfor %}
+zk_server_ip = {% for cur_host in groups['opencontrail_database'] %}{{ haproxy_hosts[cur_host] }}:2181{% if not loop.last %}, {% endif %}{% endfor %}
-cassandra_server_list = {% for cur_host in groups['opencontrail_database'] %}{{ hostvars[cur_host]['contrail_address'] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
+cassandra_server_list = {% for cur_host in groups['opencontrail_database'] %}{{ haproxy_hosts[cur_host] }}:9160{% if not loop.last %} {% endif %}{% endfor %}
disc_server_ip = {{ contrail_haproxy_address }}
disc_server_port = 5998
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-tor-agent-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-tor-agent-conf.j2
index fb483c3e..8d336e52 100755
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-tor-agent-conf.j2
+++ b/deploy/adapters/ansible/roles/open-contrail/templates/provision/contrail-tor-agent-conf.j2
@@ -78,7 +78,7 @@ server = {{ contrail_haproxy_address }}
[NETWORKS]
# control-channel IP address used by WEB-UI to connect to vnswad to fetch
# required information (Optional)
-control_network_ip = {{ contrail_address }}
+control_network_ip = {{ contrail_haproxy_address }}
[TOR]
# IP address of the TOR to manage
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/haproxy-contrail-cfg.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/haproxy-contrail-cfg.j2
index 6aa4d06e..d7691b6b 100755
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/haproxy-contrail-cfg.j2
+++ b/deploy/adapters/ansible/roles/open-contrail/templates/provision/haproxy-contrail-cfg.j2
@@ -1,66 +1,78 @@
#contrail-marker-start
listen contrail-stats
- bind *:5937
+# bind *:5937
+ bind {{ internal_vip.ip }}:5937
+ bind {{ public_vip.ip }}:5937
mode http
stats enable
stats uri /
stats auth haproxy:contrail123
-listen neutron-server
- bind *:9696
- balance roundrobin
- option nolinger
-{% for cur_host in groups['opencontrail_config'] %} server {{ hostvars[cur_host]['contrail_address'] }} {{ hostvars[cur_host]['contrail_address'] }}:9697 check inter 2000 rise 2 fall 3
-{% endfor %}
+# compass has bind neutron-server
+#listen neutron-server
+# bind *:9696
+# balance roundrobin
+# option nolinger
+#{% for host,ip in haproxy_hosts.items() %}
+# server {{ host }} {{ ip }}:9697 weight 1 check inter 2000 rise 2 fall 3
+#{% endfor %}
+
+
listen contrail-api
- bind *:8082
+# bind *:8082
+ bind {{ internal_vip.ip }}:8082
+ bind {{ public_vip.ip }}:8082
balance roundrobin
option nolinger
timeout client 3m
timeout server 3m
-{% for cur_host in groups['opencontrail_config'] %} server {{ hostvars[cur_host]['contrail_address'] }} {{ hostvars[cur_host]['contrail_address'] }}:9100 check inter 2000 rise 2 fall 3
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:9100 weight 1 check inter 2000 rise 2 fall 3
{% endfor %}
+
+
listen contrail-discovery
- bind *:5998
+# bind *:5998
+ bind {{ internal_vip.ip }}:5998
+ bind {{ public_vip.ip }}:5998
balance roundrobin
option nolinger
-{% for cur_host in groups['opencontrail_config'] %} server {{ hostvars[cur_host]['contrail_address'] }} {{ hostvars[cur_host]['contrail_address'] }}:9110 check inter 2000 rise 2 fall 3
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:9110 weight 1 check inter 2000 rise 2 fall 3
{% endfor %}
+
+
listen contrail-analytics-api
- bind *:8081
+# bind *:8081
+ bind {{ internal_vip.ip }}:8081
+ bind {{ public_vip.ip }}:8081
balance roundrobin
option nolinger
option tcp-check
tcp-check connect port 6379
default-server error-limit 1 on-error mark-down
-{% for cur_host in groups['opencontrail_collector'] %} server {{ hostvars[cur_host]['contrail_address'] }} {{ hostvars[cur_host]['contrail_address'] }}:9081 check inter 2000 rise 2 fall 3
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:9081 weight 1 check inter 2000 rise 2 fall 5
{% endfor %}
-{% if contrail_tor_agents is defined %}listen contrail-tor-agent
- bind {% for cur_agent in contrail_tor_agents %}*:{{ cur_agent['ovs_port'] }}{% if not loop.last %},{% endif %}{% endfor %}
- mode tcp
- balance leastconn
- option tcplog
- option tcpka
-{% for cur_host in groups['opencontrail_tsn'] %} server {{ hostvars[cur_host]['contrail_address'] }} {{ hostvars[cur_host]['contrail_address'] }} check inter 2000
-{% endfor %}{% endif %}
-
-listen rabbitmq
- bind *:5673
- mode tcp
- balance roundrobin
- maxconn 10000
- option tcplog
- option tcpka
- option redispatch
- timeout client 48h
- timeout server 48h
-{% for cur_host in groups['opencontrail_config'] %} server {{ hostvars[cur_host]['contrail_address'] }} {{ hostvars[cur_host]['contrail_address'] }}:5672 check inter 2000 rise 2 fall 3 weight 1 maxconn 500
+# compass doesn't use ha for rabbitmq, but use cluster mode
+#listen rabbitmq
+# bind *:5673
+# mode tcp
+# balance roundrobin
+# maxconn 10000
+# option tcplog
+# option tcpka
+# option redispatch
+# timeout client 48h
+# timeout server 48h
+{% for host,ip in haproxy_hosts.items() %}
+ server {{ host }} {{ ip }}:5672 weight 1 check inter 2000 rise 2 fall 5
{% endfor %}
#contrail-marker-end
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-env-conf.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-env-conf.j2
index 1b3e60f7..6728eddb 100755
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-env-conf.j2
+++ b/deploy/adapters/ansible/roles/open-contrail/templates/provision/rabbitmq-env-conf.j2
@@ -1,2 +1,2 @@
NODE_IP_ADDRESS={{ contrail_address }}
-NODENAME=rabbit@{{ ansible_hostname }}-ctrl
+NODENAME=rabbit@{{ inventory_hostname }}-ctrl
diff --git a/deploy/adapters/ansible/roles/open-contrail/templates/provision/vrouter-nodemgr-param.j2 b/deploy/adapters/ansible/roles/open-contrail/templates/provision/vrouter-nodemgr-param.j2
index 7eee51ba..436967b7 100755
--- a/deploy/adapters/ansible/roles/open-contrail/templates/provision/vrouter-nodemgr-param.j2
+++ b/deploy/adapters/ansible/roles/open-contrail/templates/provision/vrouter-nodemgr-param.j2
@@ -1 +1 @@
-DISCOVERY={{ hostvars[groups['opencontrail_config'][0]]['contrail_address'] }}
+DISCOVERY={{ haproxy_hosts.values()[0] }}
diff --git a/deploy/adapters/ansible/roles/open-contrail/vars/Debian.yml b/deploy/adapters/ansible/roles/open-contrail/vars/Debian.yml
index c64f238f..62596917 100755
--- a/deploy/adapters/ansible/roles/open-contrail/vars/Debian.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/vars/Debian.yml
@@ -1,11 +1,11 @@
---
-package: "contrail-install-packages_2.21-102~juno_all.deb"
+package: "contrail-install-packages_2.21-102-ubuntu-14-04juno_all.deb"
-common_packages:
+common_package:
- contrail-setup
-kernel_packages:
+kernel_package:
- linux-headers-3.13.0-40
- linux-headers-3.13.0-40-generic
- linux-image-3.13.0-40-generic
@@ -13,28 +13,28 @@ kernel_packages:
kernel_required: "3.13.0-40-generic"
-database_packages:
+database_package:
- contrail-openstack-database
-config_packages:
+config_package:
- contrail-openstack-config
-control_packages:
+control_package:
- contrail-openstack-control
-collector_packages:
+collector_package:
- contrail-openstack-analytics
-webui_packages:
+webui_package:
- contrail-openstack-webui
-vrouter_packages:
+vrouter_package:
- contrail-vrouter-3.13.0-40-generic
-dkms_packages:
+dkms_package:
- contrail-vrouter-dkms
-compute_packages:
+compute_package:
- contrail-vrouter-common
- contrail-nova-vif
diff --git a/deploy/adapters/ansible/roles/open-contrail/vars/main.yml b/deploy/adapters/ansible/roles/open-contrail/vars/main.yml
index 015c99b7..e19b1fbf 100755
--- a/deploy/adapters/ansible/roles/open-contrail/vars/main.yml
+++ b/deploy/adapters/ansible/roles/open-contrail/vars/main.yml
@@ -1,15 +1,27 @@
---
#package: "contrail-install-packages_2.21-102~juno_all.deb" # mv to {os}.yml
kernel_install: no
-ansible_ssh_user: "root"
-ansible_ssh_pass: "root"
+#ansible_ssh_user: "root"
+#ansible_ssh_pass: "root"
contrail_keystone_address: "{{ internal_vip.ip }}"
-contrail_admin_user: "keystone"
-contrail_admin_password: "{{ keystone_PASS }}"
+contrail_admin_user: "admin"
+contrail_admin_password: "console"
+
+
+# network infor adapter for compass
+contrail_device: "mgmt"
+contrail_address: "{{ internal_ip }}"
+contrail_netmask: "255.255.255.0"
+#contrail_gateway: "10.84.50.254"
+contrail_gateway:
+#contrail_mgmt_address: "172.27.113.91"
+
+
contrail_keepalived: no
-contrail_haproxy_address: "10.0.0.22" # 10.0.0.80
+#contrail_haproxy_address: "10.0.0.22" # 10.0.0.80
+contrail_haproxy_address: "{{ internal_vip.ip }}"
contrail_netmask: "255.255.255.0"
contrail_prefixlen: "24"
contrail_gateway: "10.0.0.1"
@@ -19,7 +31,7 @@ contrail_router_asn: "64512"
### Modify when need openstack provisioning
keystone_provision: no
install_nova: no
-rabbit_password: "password"
+rabbit_password: "console"
contrail_tor_agents:
- name: "test01"
@@ -43,9 +55,9 @@ contrail_tor_agents:
# adapter for compass
-kernel_packages_noarch: []
+kernel_package_noarch: []
-compute_packages_noarch: []
+compute_package_noarch: []
# network infor adapter for compass
contrail_device: "mgmt"
diff --git a/deploy/adapters/ansible/roles/secgroup/tasks/main.yml b/deploy/adapters/ansible/roles/secgroup/tasks/main.yml
index c26af4b0..6419208b 100644
--- a/deploy/adapters/ansible/roles/secgroup/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/secgroup/tasks/main.yml
@@ -8,3 +8,5 @@
- include: secgroup.yml
when: '{{ enable_secgroup }} == False'
tags: secgroup
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/setup-network/tasks/main.yml b/deploy/adapters/ansible/roles/setup-network/tasks/main.yml
index 818ac57a..51821b09 100644
--- a/deploy/adapters/ansible/roles/setup-network/tasks/main.yml
+++ b/deploy/adapters/ansible/roles/setup-network/tasks/main.yml
@@ -1,4 +1,12 @@
---
+- name: disable NetworkManager
+ service: name=NetworkManager state=stopped enabled=no
+ when: ansible_os_family == 'RedHat'
+
+- name: enable network service
+ service: name=network state=started enabled=yes
+ when: ansible_os_family == 'RedHat'
+
- name: add ovs bridge
openvswitch_bridge: bridge={{ item["name"] }} state=present
with_items: "{{ network_cfg['provider_net_mappings'] }}"
@@ -42,3 +50,5 @@
- name: add to boot scripts
service: name=net_init enabled=yes
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/storage/files/create_img.sh b/deploy/adapters/ansible/roles/storage/files/create_img.sh
new file mode 100755
index 00000000..13565bdc
--- /dev/null
+++ b/deploy/adapters/ansible/roles/storage/files/create_img.sh
@@ -0,0 +1,4 @@
+seek_num=`echo $1 | sed -e 's/.* //g'`
+if [ ! -f /var/storage.img ]; then
+ dd if=/dev/zero of=/var/storage.img bs=1 count=0 seek=$seek_num
+fi
diff --git a/deploy/adapters/ansible/roles/cinder-volume/files/get_var_size.sh b/deploy/adapters/ansible/roles/storage/files/get_var_size.sh
index 55041c6e..6f690375 100644..100755
--- a/deploy/adapters/ansible/roles/cinder-volume/files/get_var_size.sh
+++ b/deploy/adapters/ansible/roles/storage/files/get_var_size.sh
@@ -1,5 +1,5 @@
size=`df /var | awk '$3 ~ /[0-9]+/ { print $4 }'`;
-if [[ $size -gt 2000000000 ]]; then
+if [ $size -gt 2000000000 ]; then
echo -n 2000000000000;
else
echo -n $((size * 1000 / 512 * 512));
diff --git a/deploy/adapters/ansible/roles/cinder-volume/files/loop.yml b/deploy/adapters/ansible/roles/storage/files/loop.yml
index e872652a..e872652a 100644..100755
--- a/deploy/adapters/ansible/roles/cinder-volume/files/loop.yml
+++ b/deploy/adapters/ansible/roles/storage/files/loop.yml
diff --git a/deploy/adapters/ansible/roles/storage/files/losetup.sh b/deploy/adapters/ansible/roles/storage/files/losetup.sh
new file mode 100755
index 00000000..3e95069f
--- /dev/null
+++ b/deploy/adapters/ansible/roles/storage/files/losetup.sh
@@ -0,0 +1,7 @@
+loop_dev=`losetup -a |grep "/var/storage.img"|awk -F':' '{print $1}'`
+if [ -z $loop_dev ]; then
+ losetup -f --show /var/storage.img
+else
+ echo $loop_dev
+fi
+
diff --git a/deploy/adapters/ansible/roles/storage/tasks/loop.yml b/deploy/adapters/ansible/roles/storage/tasks/loop.yml
new file mode 100755
index 00000000..a16d2358
--- /dev/null
+++ b/deploy/adapters/ansible/roles/storage/tasks/loop.yml
@@ -0,0 +1,23 @@
+---
+
+- name: get available /var partition size
+ script: get_var_size.sh
+ register: part_size
+
+- name: create image file if not exitst
+ script: create_img.sh \"{{ part_size.stdout }}\"
+
+- name: do a losetup on storage volumes
+ script: losetup.sh
+ register: loop_device
+
+- name: debug loop device
+ debug: msg={{ loop_device.stdout }}
+
+- name: get device
+ shell: echo '{{ loop_device.stdout }}' | sed ':a;N;$!ba;s/.*\n\(\/dev\)/\1/g'
+ register: loop_device_filterd
+
+- name: create physical and group volumes
+ lvg: vg=storage-volumes pvs={{ loop_device_filterd.stdout }}
+ vg_options=--force
diff --git a/deploy/adapters/ansible/roles/storage/tasks/main.yml b/deploy/adapters/ansible/roles/storage/tasks/main.yml
new file mode 100755
index 00000000..4185dc91
--- /dev/null
+++ b/deploy/adapters/ansible/roles/storage/tasks/main.yml
@@ -0,0 +1,20 @@
+---
+- name: check if physical device exists
+ stat: path={{ physical_device }}
+ register: status
+ tags:
+ - storage
+
+- name: load loop.yml
+ include: loop.yml
+ when: status.stat.exists == False or status.stat.isblk == False
+ tags:
+ - storage
+
+- name: load real.yml
+ include: real.yml
+ when: status.stat.exists == True and status.stat.isblk == True
+ tags:
+ - storage
+
+- meta: flush_handlers
diff --git a/deploy/adapters/ansible/roles/cinder-volume/tasks/real.yml b/deploy/adapters/ansible/roles/storage/tasks/real.yml
index 19ef828b..fd3351c0 100644..100755
--- a/deploy/adapters/ansible/roles/cinder-volume/tasks/real.yml
+++ b/deploy/adapters/ansible/roles/storage/tasks/real.yml
@@ -1,10 +1,8 @@
---
-- include_vars: "{{ ansible_os_family }}.yml"
-
- name: destroy GPT lable
shell: dd if=/dev/urandom of={{ physical_device }} bs=4M count=1
ignore_errors: True
- name: create physical and group volumes
- lvg: vg=cinder-volumes pvs={{ physical_device }}
+ lvg: vg=storage-volumes pvs={{ physical_device }}
vg_options=--force
diff --git a/deploy/client.py b/deploy/client.py
index 15f1ba8d..b9cfd7fe 100644
--- a/deploy/client.py
+++ b/deploy/client.py
@@ -228,6 +228,9 @@ opts = [
cfg.StrOpt('deploy_type',
help='deploy type',
default='virtual'),
+ cfg.StrOpt('deploy_flag',
+ help='deploy flag',
+ default='deploy'),
]
CONF.register_cli_opts(opts)
@@ -279,9 +282,9 @@ class CompassClient(object):
def get_machines(self):
status, resp = self.client.list_machines()
- LOG.info(
- 'get all machines status: %s, resp: %s', status, resp)
if not self.is_ok(status):
+ LOG.error(
+ 'get all machines status: %s, resp: %s', status, resp)
raise RuntimeError('failed to get machines')
machines_to_add = list(set([
@@ -289,12 +292,21 @@ class CompassClient(object):
if machine
]))
- LOG.info('machines to add: %s', machines_to_add)
machines_db = [str(m["mac"]) for m in resp]
- LOG.info('machines in db: %s', machines_db)
- assert(set(machines_db) == set(machines_to_add))
+ LOG.info('machines in db: %s\n to add: %s', machines_db, machines_to_add)
+ if not set(machines_to_add).issubset(set(machines_db)):
+ raise RuntimeError('unidentify machine to add')
+
+ return [m["id"] for m in resp if str(m["mac"]) in machines_to_add]
+
+ def list_clusters(self):
+ status, resp = self.client.list_clusters(name=CONF.cluster_name)
+ if not self.is_ok(status) or not resp:
+ raise RuntimeError('failed to list cluster')
+
+ cluster = resp[0]
- return [m["id"] for m in resp]
+ return cluster['id']
def get_adapter(self):
"""get adapter."""
@@ -307,19 +319,16 @@ class CompassClient(object):
if not self.is_ok(status) or not resp:
raise RuntimeError('failed to get adapters')
- adapter_name = CONF.adapter_name
os_re = re.compile(CONF.adapter_os_pattern)
flavor_re = re.compile(CONF.adapter_flavor_pattern)
adapter_id = None
os_id = None
- distributed_system_id = None
flavor_id = None
adapter = None
adapter = resp[0]
adapter_id = adapter['id']
- distributed_system_id = adapter['distributed_system_id']
for supported_os in adapter['supported_oses']:
if not os_re or os_re.match(supported_os['name']):
os_id = supported_os['os_id']
@@ -332,7 +341,7 @@ class CompassClient(object):
break
assert(os_id and flavor_id)
- return (adapter_id, os_id, distributed_system_id, flavor_id)
+ return (adapter_id, os_id, flavor_id)
def add_subnets(self):
subnets = [
@@ -809,6 +818,18 @@ class CompassClient(object):
if not self.is_ok(status):
raise RuntimeError("deploy cluster failed")
+ def redeploy_clusters(self, cluster_id):
+ status, response = self.client.redeploy_cluster(
+ cluster_id
+ )
+
+ if not self.is_ok(status):
+ LOG.info(
+ 'deploy cluster %s status %s: %s',
+ cluster_id, status, response
+ )
+ raise RuntimeError("redeploy cluster failed")
+
def get_installing_progress(self, cluster_id):
"""get intalling progress."""
action_timeout = time.time() + 60 * float(CONF.action_timeout)
@@ -865,21 +886,22 @@ class CompassClient(object):
raise Exception(msg)
-def main():
+
+def deploy():
client = CompassClient()
machines = client.get_machines()
LOG.info('machines are %s', machines)
client.add_subnets()
- adapter_id, os_id, distributed_system_id, flavor_id = client.get_adapter()
+ adapter_id, os_id, flavor_id = client.get_adapter()
cluster_id = client.add_cluster(adapter_id, os_id, flavor_id)
client.add_cluster_hosts(cluster_id, machines)
client.set_host_networking()
client.set_cluster_os_config(cluster_id)
- if distributed_system_id:
+ if flavor_id:
client.set_cluster_package_config(cluster_id)
client.set_all_hosts_roles(cluster_id)
@@ -888,6 +910,23 @@ def main():
client.get_installing_progress(cluster_id)
client.check_dashboard_links(cluster_id)
+def redeploy():
+ client = CompassClient()
+
+ cluster_id = client.list_clusters()
+
+ client.redeploy_clusters(cluster_id)
+
+ client.get_installing_progress(cluster_id)
+ client.check_dashboard_links(cluster_id)
+
+def main():
+ if CONF.deploy_flag == "redeploy":
+ redeploy()
+ else:
+ deploy()
+
+
if __name__ == "__main__":
CONF(args=sys.argv[1:])
main()
diff --git a/deploy/compass_vm.sh b/deploy/compass_vm.sh
index 958bf33b..196df4a3 100644..100755
--- a/deploy/compass_vm.sh
+++ b/deploy/compass_vm.sh
@@ -1,3 +1,4 @@
+#!/bin/bash
compass_vm_dir=$WORK_DIR/vm/compass
rsa_file=$compass_vm_dir/boot.rsa
ssh_args="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i $rsa_file"
@@ -14,14 +15,26 @@ function tear_down_compass() {
}
function install_compass_core() {
+ install_compass "compass_nodocker.yml"
+}
+
+function set_compass_machine() {
+ local config_file=$WORK_DIR/installer/compass-install/install/group_vars/all
+
+ sed -i -e '/test: true/d' -e '/pxe_boot_macs/d' $config_file
+ echo "test: true" >> $config_file
+ echo "pxe_boot_macs: [${machines}]" >> $config_file
+
+ install_compass "compass_machine.yml"
+}
+
+function install_compass() {
local inventory_file=$compass_vm_dir/inventory.file
- log_info "install_compass_core enter"
sed -i "s/mgmt_next_ip:.*/mgmt_next_ip: ${COMPASS_SERVER}/g" $WORK_DIR/installer/compass-install/install/group_vars/all
echo "compass_nodocker ansible_ssh_host=$MGMT_IP ansible_ssh_port=22" > $inventory_file
- PYTHONUNBUFFERED=1 ANSIBLE_FORCE_COLOR=true ANSIBLE_HOST_KEY_CHECKING=false ANSIBLE_SSH_ARGS='-o UserKnownHostsFile=/dev/null -o ControlMaster=auto -o ControlPersist=60s' python /usr/local/bin/ansible-playbook -e pipeline=true --private-key=$rsa_file --user=root --connection=ssh --inventory-file=$inventory_file $WORK_DIR/installer/compass-install/install/compass_nodocker.yml
+ PYTHONUNBUFFERED=1 ANSIBLE_FORCE_COLOR=true ANSIBLE_HOST_KEY_CHECKING=false ANSIBLE_SSH_ARGS='-o UserKnownHostsFile=/dev/null -o ControlMaster=auto -o ControlPersist=60s' python /usr/local/bin/ansible-playbook -e pipeline=true --private-key=$rsa_file --user=root --connection=ssh --inventory-file=$inventory_file $WORK_DIR/installer/compass-install/install/$1
exit_status=$?
rm $inventory_file
- log_info "install_compass_core exit"
if [[ $exit_status != 0 ]];then
/bin/false
fi
@@ -67,6 +80,7 @@ function launch_compass() {
chmod 755 -R $new_mnt
cp $COMPASS_DIR/util/isolinux.cfg $new_mnt/isolinux/ -f
+ cp $COMPASS_DIR/util/ks.cfg $new_mnt/isolinux/ -f
sed -i -e "s/REPLACE_MGMT_IP/$MGMT_IP/g" \
-e "s/REPLACE_MGMT_NETMASK/$MGMT_MASK/g" \
diff --git a/deploy/conf/base.conf b/deploy/conf/base.conf
index 45970a05..d7d2f4ef 100644
--- a/deploy/conf/base.conf
+++ b/deploy/conf/base.conf
@@ -12,7 +12,7 @@ export MGMT_IP_END=${MGMT_IP_END:-192.168.200.254}
export EXTERNAL_NIC=${EXTERNAL_NIC:-eth0}
export CLUSTER_NAME="opnfv2"
export DOMAIN="ods.com"
-export PARTITIONS="/=70%,/home=5%,/tmp=5%,/var=20%"
+export PARTITIONS="/=30%,/home=5%,/tmp=5%,/var=60%"
export SUBNETS="10.1.0.0/24,172.16.2.0/24,172.16.3.0/24,172.16.4.0/24"
export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-'10.1.0.50'}
@@ -59,6 +59,6 @@ export GATEWAY="10.1.0.1"
export SERVER_CREDENTIAL="root=root"
export LOCAL_REPO_URL=""
export OS_CONFIG_FILENAME=""
-export SERVICE_CREDENTIALS="image:service=service,compute:service=service,dashboard:service=service,identity:service=service,metering:service=service,rabbitmq:service=service,volume:service=service,mysql:service=service,heat:heat=heat_secret"
-export CONSOLE_CREDENTIALS="admin:console=console,compute:console=console,dashboard:console=console,image:console=console,metering:console=console,network:console=console,object-store:console=console,volume:console=console,heat:heat=heat_db_secret"
+export SERVICE_CREDENTIALS="image:service=service,compute:service=service,dashboard:service=service,identity:service=service,image:service=service,metering:service=service,network:service=service,rabbitmq:service=service,volume:service=service,mysql:service=service,heat:heat=heat_db_secret"
+export CONSOLE_CREDENTIALS="admin:console=console,demo:console=console,compute:console=console,dashboard:console=console,identity:console=console,image:console=console,metering:console=console,network:console=console,object-store:console=console,volume:console=console,heat:heat=heat_secret"
export PACKAGE_CONFIG_FILENAME=""
diff --git a/deploy/conf/cluster.conf b/deploy/conf/cluster.conf
index 200fae65..f741895b 100644
--- a/deploy/conf/cluster.conf
+++ b/deploy/conf/cluster.conf
@@ -1,18 +1,19 @@
+# for Operating System
export ADAPTER_OS_PATTERN=${ADAPTER_OS_PATTERN:-'(?i)ubuntu-14\.04\.3.*'}
-export ADAPTER_TARGET_SYSTEM_PATTERN="^openstack$"
+#export ADAPTER_OS_PATTERN=${ADAPTER_OS_PATTERN:-'(?i)CentOS-7.*15.*'}
+
+# trustry/centos7
+export OS_VERSION=${OS_VERSION:-"trusty"}
-export REPO_NAME=${REPO_NAME:-"trusty-juno-ppa"}
-export ADAPTER_NAME="openstack_juno"
-export ADAPTER_FLAVOR_PATTERN="HA-ansible-multinodes-juno"
-#export REPO_NAME=${REPO_NAME:-"trusty-liberty-ppa"}
-#export ADAPTER_NAME="openstack_liberty"
-#export ADAPTER_FLAVOR_PATTERN="HA-ansible-multinodes-liberty"
+# liberty/kilo/juno
+export OPENSTACK_VERSION=${OPENSTACK_VERSION:-"liberty"}
+
+# don't touch this
+export ADAPTER_TARGET_SYSTEM_PATTERN="^openstack$"
-#export ADAPTER_OS_PATTERN=${ADAPTER_OS_PATTERN:-'(?i)CentOS-7.*1503-01.*'}
-#export REPO_NAME=${REPO_NAME:-"centos7-kilo-ppa"}
-#export ADAPTER_NAME="openstack_kilo"
-#export ADAPTER_TARGET_SYSTEM_PATTERN="^openstack$"
-#export ADAPTER_FLAVOR_PATTERN="HA-ansible-multinodes-kilo"
+export REPO_NAME="$OS_VERSION-$OPENSTACK_VERSION-ppa"
+export ADAPTER_NAME="openstack_$OPENSTACK_VERSION"
+export ADAPTER_FLAVOR_PATTERN="HA-ansible-multinodes-$OPENSTACK_VERSION"
export DEFAULT_ROLES=""
export VIP="10.1.0.222"
diff --git a/deploy/conf/virtual.conf b/deploy/conf/virtual.conf
index 0dbdb28b..611aca76 100644
--- a/deploy/conf/virtual.conf
+++ b/deploy/conf/virtual.conf
@@ -1,7 +1,7 @@
export VIRT_NUMBER=5
export VIRT_CPUS=4
export VIRT_MEM=16384
-export VIRT_DISK=30G
+export VIRT_DISK=50G
export SWITCH_IPS="1.1.1.1"
export SWITCH_CREDENTIAL="version=2c,community=public"
diff --git a/deploy/deploy_host.sh b/deploy/deploy_host.sh
index e7dedb89..b87a74d3 100644..100755
--- a/deploy/deploy_host.sh
+++ b/deploy/deploy_host.sh
@@ -1,3 +1,4 @@
+#!/bin/bash
function deploy_host(){
ssh $ssh_args root@${MGMT_IP} mkdir -p /opt/compass/bin/ansible_callbacks
@@ -5,7 +6,13 @@ function deploy_host(){
reboot_hosts
- python ${COMPASS_DIR}/deploy/client.py --compass_server="${HTTP_SERVER_URL}" \
+ if [[ "$REDEPLOY_HOST" == true ]]; then
+ deploy_flag="redeploy"
+ else
+ deploy_flag="deploy"
+ fi
+
+ python ${COMPASS_DIR}/deploy/client.py --deploy_flag=$deploy_flag --compass_server="${HTTP_SERVER_URL}" \
--compass_user_email="${COMPASS_USER_EMAIL}" --compass_user_password="${COMPASS_USER_PASSWORD}" \
--cluster_name="${CLUSTER_NAME}" --language="${LANGUAGE}" --timezone="${TIMEZONE}" \
--hostnames="${HOSTNAMES}" --partitions="${PARTITIONS}" --subnets="${SUBNETS}" \
diff --git a/deploy/deploy_parameter.sh b/deploy/deploy_parameter.sh
index 8b2e7695..dfecbe33 100755
--- a/deploy/deploy_parameter.sh
+++ b/deploy/deploy_parameter.sh
@@ -1,3 +1,4 @@
+#!/bin/bash
set -x
function get_option_name_list()
{
diff --git a/deploy/host_baremetal.sh b/deploy/host_baremetal.sh
index 9e25c98d..2281ffbf 100644..100755
--- a/deploy/host_baremetal.sh
+++ b/deploy/host_baremetal.sh
@@ -1,3 +1,4 @@
+#!/bin/bash
function reboot_hosts() {
if [ -z $POWER_MANAGE ]; then
return
@@ -6,10 +7,6 @@ function reboot_hosts() {
}
function get_host_macs() {
- local config_file=$WORK_DIR/installer/compass-install/install/group_vars/all
- echo "test: true" >> $config_file
- machine=`echo $HOST_MACS | sed -e 's/,/'\',\''/g' -e 's/^/'\''/g' -e 's/$/'\''/g'`
- echo "pxe_boot_macs: [$machine]" >> $config_file
-
- echo $machine
+ machines=`echo $HOST_MACS | sed -e 's/,/'\',\''/g' -e 's/^/'\''/g' -e 's/$/'\''/g'`
+ echo $machines
}
diff --git a/deploy/host_virtual.sh b/deploy/host_virtual.sh
index 4cb33613..8e93b9b3 100644..100755
--- a/deploy/host_virtual.sh
+++ b/deploy/host_virtual.sh
@@ -1,10 +1,14 @@
+#!/bin/bash
host_vm_dir=$WORK_DIR/vm
function tear_down_machines() {
+ old_ifs=$IFS
+ IFS=,
for i in $HOSTNAMES; do
sudo virsh destroy $i
sudo virsh undefine $i
rm -rf $host_vm_dir/$i
done
+ IFS=$old_ifs
}
function reboot_hosts() {
@@ -14,7 +18,6 @@ function reboot_hosts() {
function launch_host_vms() {
old_ifs=$IFS
IFS=,
- tear_down_machines
#function_bod
mac_array=($machines)
log_info "bringing up pxe boot vms"
@@ -44,16 +47,18 @@ function launch_host_vms() {
}
function get_host_macs() {
- local config_file=$WORK_DIR/installer/compass-install/install/group_vars/all
local mac_generator=${COMPASS_DIR}/deploy/mac_generator.sh
local machines=
- chmod +x $mac_generator
- mac_array=`$mac_generator $VIRT_NUMBER`
- machines=`echo $mac_array|sed 's/ /,/g'`
+ if [[ $REDEPLOY_HOST == "true" ]]; then
+ mac_array=`cat $WORK_DIR/switch_machines`
+ else
+ chmod +x $mac_generator
+ mac_array=`$mac_generator $VIRT_NUMBER`
+ echo $mac_array > $WORK_DIR/switch_machines
+ fi
- echo "test: true" >> $config_file
- echo "pxe_boot_macs: [${machines}]" >> $config_file
+ machines=`echo $mac_array|sed 's/ /,/g'`
echo $machines
}
diff --git a/deploy/launch.sh b/deploy/launch.sh
index bad03cf7..b9e47693 100755
--- a/deploy/launch.sh
+++ b/deploy/launch.sh
@@ -1,3 +1,4 @@
+#!/bin/bash
#set -x
WORK_DIR=$COMPASS_DIR/work/deploy
@@ -18,49 +19,56 @@ source ${COMPASS_DIR}/deploy/compass_vm.sh
source ${COMPASS_DIR}/deploy/deploy_host.sh
######################### main process
-if true
-then
-if ! prepare_env;then
- echo "prepare_env failed"
- exit 1
+print_logo
+
+if [[ ! -z $VIRT_NUMBER ]];then
+ tear_down_machines
fi
log_info "########## get host mac begin #############"
machines=`get_host_macs`
-if [[ -z $machines ]];then
+if [[ -z $machines ]]; then
log_error "get_host_macs failed"
exit 1
fi
-log_info "deploy host macs: $machines"
export machines
-log_info "########## set up network begin #############"
-if ! create_nets;then
- log_error "create_nets failed"
- exit 1
-fi
+if [[ "$DEPLOY_COMPASS" == "true" ]]; then
+ if ! prepare_env;then
+ echo "prepare_env failed"
+ exit 1
+ fi
-if ! launch_compass;then
- log_error "launch_compass failed"
- exit 1
-fi
-else
-# test code
-export machines="'00:00:3d:a4:ee:4c','00:00:63:35:3c:2b','00:00:f2:f2:b7:a5','00:00:2f:d3:88:28','00:00:46:67:11:e7'"
-fi
-if [[ ! -z $VIRT_NUMBER ]];then
- if ! launch_host_vms;then
- log_error "launch_host_vms failed"
+ log_info "########## set up network begin #############"
+ if ! create_nets;then
+ log_error "create_nets failed"
+ exit 1
+ fi
+
+ if ! launch_compass;then
+ log_error "launch_compass failed"
exit 1
fi
fi
-if ! deploy_host;then
- #tear_down_machines
- #tear_down_compass
- exit 1
-else
- #tear_down_machines
- #tear_down_compass
- exit 0
+
+if [[ -z "$REDEPLOY_HOST" || "$REDEPLOY_HOST" == "false" ]]; then
+ if ! set_compass_machine; then
+ log_error "set_compass_machine fail"
+ fi
fi
+
+if [[ "$DEPLOY_HOST" == "true" || $REDEPLOY_HOST == "true" ]]; then
+ if [[ ! -z $VIRT_NUMBER ]];then
+ if ! launch_host_vms;then
+ log_error "launch_host_vms failed"
+ exit 1
+ fi
+ fi
+
+ if ! deploy_host;then
+ exit 1
+ fi
+fi
+
+figlet -ctf slant Installation Complete!
diff --git a/deploy/network.sh b/deploy/network.sh
index ac0a73e2..9f1a7b90 100755
--- a/deploy/network.sh
+++ b/deploy/network.sh
@@ -1,3 +1,8 @@
+#!/bin/bash
+function clear_forward_rejct_rules()
+{
+ while sudo iptables -nL FORWARD --line-number|grep -E 'REJECT +all +-- +0.0.0.0/0 +0.0.0.0/0 +reject-with icmp-port-unreachable'|head -1|awk '{print $1}'|xargs sudo iptables -D FORWARD; do :; done
+}
function setup_bridge_net()
{
@@ -16,6 +21,44 @@ function setup_bridge_net()
sudo virsh net-start $net_name
}
+function save_network_info()
+{
+ sudo ovs-vsctl list-br |grep br-external
+ br_exist=$?
+ external_nic=`ip route |grep '^default'|awk '{print $NF}'`
+ route_info=`ip route |grep -Eo '^default via [^ ]+'`
+ ip_info=`ip addr show $external_nic|grep -Eo '[^ ]+ brd [^ ]+ '`
+ if [ $br_exist -eq 0 ]; then
+ if [ "$external_nic" != "br-external" ]; then
+ sudo ovs-vsctl --may-exist add-port br-external $external_nic
+ sudo ip addr flush $external_nic
+ sudo ip addr add $ip_info dev br-external
+ sudo ip route add $route_info dev br-external
+ fi
+ else
+ sudo ovs-vsctl add-br br-external
+ sudo ovs-vsctl add-port br-external $external_nic
+ sudo ip addr flush $external_nic
+ sudo ip addr add $ip_info dev br-external
+ sudo ip route add $route_info dev br-external
+ fi
+}
+
+function setup_bridge_external()
+{
+ sudo virsh net-destroy external
+ sudo virsh net-undefine external
+
+ save_network_info
+ sed -e "s/REPLACE_NAME/external/g" \
+ -e "s/REPLACE_OVS/br-external/g" \
+ $COMPASS_DIR/deploy/template/network/bridge_ovs.xml \
+ > $WORK_DIR/network/external.xml
+
+ sudo virsh net-define $WORK_DIR/network/external.xml
+ sudo virsh net-start external
+}
+
function setup_nat_net() {
net_name=$1
gw=$2
@@ -50,5 +93,6 @@ function create_nets() {
fi
# create external network
- setup_bridge_net external $EXTERNAL_NIC
+ setup_bridge_external
+ clear_forward_rejct_rules
}
diff --git a/deploy/prepare.sh b/deploy/prepare.sh
index ce8b7aca..ee27db1b 100755
--- a/deploy/prepare.sh
+++ b/deploy/prepare.sh
@@ -1,3 +1,15 @@
+#!/bin/bash
+function print_logo()
+{
+ if ! apt --installed list 2>/dev/null | grep "figlet"
+ then
+ sudo apt-get update -y
+ sudo apt-get install -y --force-yes figlet
+ fi
+
+ figlet -ctf slant Compass Installer
+ set +x; sleep 2; set -x
+}
function download_iso()
{
@@ -15,11 +27,10 @@ function download_iso()
curl --connect-timeout 10 -o $WORK_DIR/cache/$iso_name $ISO_URL
}
-
function prepare_env() {
export PYTHONPATH=/usr/lib/python2.7/dist-packages:/usr/local/lib/python2.7/dist-packages
sudo apt-get update -y
- sudo apt-get install -y --force-yes mkisofs bc curl ipmitool
+ sudo apt-get install -y --force-yes mkisofs bc curl ipmitool openvswitch-switch
sudo apt-get install -y --force-yes git python-pip python-dev
sudo apt-get install -y --force-yes libxslt-dev libxml2-dev libvirt-dev build-essential qemu-utils qemu-kvm libvirt-bin virtinst libmysqld-dev
sudo pip install --upgrade pip
diff --git a/deploy/restful.py b/deploy/restful.py
index 4d86da82..44e461c4 100644
--- a/deploy/restful.py
+++ b/deploy/restful.py
@@ -647,6 +647,11 @@ class Client(object):
data['deploy'] = deploy
return self._post('/clusters/%s/action' % cluster_id, data=data)
+ def redeploy_cluster(self, cluster_id):
+ data = {}
+ data['redeploy'] = {}
+ return self._post('/clusters/%s/action' % cluster_id, data=data)
+
def get_cluster_state(self, cluster_id):
return self._get('/clusters/%s/state' % cluster_id)
diff --git a/deploy/template/network/bridge_ovs.xml b/deploy/template/network/bridge_ovs.xml
new file mode 100644
index 00000000..cf01a5d9
--- /dev/null
+++ b/deploy/template/network/bridge_ovs.xml
@@ -0,0 +1,6 @@
+ <network ipv6='no'>
+ <name>REPLACE_NAME</name>
+ <bridge name='REPLACE_OVS'/>
+ <forward mode='bridge'/>
+ <virtualport type='openvswitch'/>
+ </network>
diff --git a/deploy_compass.sh b/deploy_compass.sh
new file mode 100755
index 00000000..7d20d03a
--- /dev/null
+++ b/deploy_compass.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+export DEPLOY_COMPASS=${DEPLOY_COMPASS-"true"}
+
+./deploy.sh $*
diff --git a/deploy_host.sh b/deploy_host.sh
new file mode 100755
index 00000000..f5b6e786
--- /dev/null
+++ b/deploy_host.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+export DEPLOY_HOST=${DEPLOY_HOST-"true"}
+
+./deploy.sh $*
diff --git a/redeploy_host.sh b/redeploy_host.sh
new file mode 100755
index 00000000..8a86ff14
--- /dev/null
+++ b/redeploy_host.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+export REDEPLOY_HOST=${REDEPLOY_HOST-"true"}
+
+./deploy.sh $*
diff --git a/util/isolinux.cfg b/util/isolinux.cfg
index ca612f77..b4f637da 100644
--- a/util/isolinux.cfg
+++ b/util/isolinux.cfg
@@ -1,40 +1,120 @@
default vesamenu.c32
-#prompt 1
-timeout 15
+timeout 1
display boot.msg
-menu background splash.jpg
-menu title Welcome to CentOS 6.5!
-menu color border 0 #ffffffff #00000000
-menu color sel 7 #ffffffff #ff000000
-menu color title 0 #ffffffff #00000000
-menu color tabmsg 0 #ffffffff #00000000
-menu color unsel 0 #ffffffff #00000000
-menu color hotsel 0 #ff000000 #ffffffff
-menu color hotkey 7 #ffffffff #ff000000
-menu color scrollbar 0 #ffffffff #00000000
+# Clear the screen when exiting the menu, instead of leaving the menu displayed.
+# For vesamenu, this means the graphical background is still displayed without
+# the menu itself for as long as the screen remains in graphics mode.
+menu clear
+menu background splash.png
+menu title CentOS 7
+menu vshift 8
+menu rows 18
+menu margin 8
+#menu hidden
+menu helpmsgrow 15
+menu tabmsgrow 13
+
+# Border Area
+menu color border * #00000000 #00000000 none
+
+# Selected item
+menu color sel 0 #ffffffff #00000000 none
+
+# Title bar
+menu color title 0 #ff7ba3d0 #00000000 none
+
+# Press [Tab] message
+menu color tabmsg 0 #ff3a6496 #00000000 none
+
+# Unselected menu item
+menu color unsel 0 #84b8ffff #00000000 none
+
+# Selected hotkey
+menu color hotsel 0 #84b8ffff #00000000 none
+
+# Unselected hotkey
+menu color hotkey 0 #ffffffff #00000000 none
+
+# Help text
+menu color help 0 #ffffffff #00000000 none
+
+# A scrollbar of some type? Not sure.
+menu color scrollbar 0 #ffffffff #ff355594 none
+
+# Timeout msg
+menu color timeout 0 #ffffffff #00000000 none
+menu color timeout_msg 0 #ffffffff #00000000 none
+
+# Command prompt text
+menu color cmdmark 0 #84b8ffff #00000000 none
+menu color cmdline 0 #ffffffff #00000000 none
+
+# Do not display the actual menu unless the user presses a key. All that is displayed is a timeout message.
+
+menu tabmsg Press Tab for full configuration options on menu items.
+
+menu separator # insert an empty line
+menu separator # insert an empty line
label linux
- menu label ^Install or upgrade an existing system
+ menu label ^Install CentOS 7
menu default
kernel vmlinuz
append ks=cdrom:/isolinux/ks.cfg initrd=initrd.img mgmt_ip=REPLACE_MGMT_IP mgmt_netmask=REPLACE_MGMT_NETMASK install_ip=REPLACE_INSTALL_IP install_netmask=REPLACE_INSTALL_NETMASK gw=REPLACE_GW external_ip=REPLACE_COMPASS_EXTERNAL_IP external_mask=REPLACE_COMPASS_EXTERNAL_NETMASK external_gw=REPLACE_COMPASS_EXTERNAL_GW dns1=REPLACE_COMPASS_DNS1 dns2=REPLACE_COMPASS_DNS2
+label check
+ menu label Test this ^media & install CentOS 7
+ kernel vmlinuz
+ append initrd=initrd.img inst.stage2=hd:LABEL=CentOS\x207\x20x86_64 rd.live.check quiet
+
+menu separator # insert an empty line
+
+# utilities submenu
+menu begin ^Troubleshooting
+ menu title Troubleshooting
label vesa
- menu label Install system with ^basic video driver
+ menu indent count 5
+ menu label Install CentOS 7 in ^basic graphics mode
+ text help
+ Try this option out if you're having trouble installing
+ CentOS 7.
+ endtext
kernel vmlinuz
- append initrd=initrd.img xdriver=vesa nomodeset
+ append initrd=initrd.img inst.stage2=hd:LABEL=CentOS\x207\x20x86_64 xdriver=vesa nomodeset quiet
+
label rescue
- menu label ^Rescue installed system
+ menu indent count 5
+ menu label ^Rescue a CentOS system
+ text help
+ If the system will not boot, this lets you access files
+ and edit config files to try to get it booting again.
+ endtext
kernel vmlinuz
- append initrd=initrd.img rescue
+ append initrd=initrd.img inst.stage2=hd:LABEL=CentOS\x207\x20x86_64 rescue quiet
+
+label memtest
+ menu label Run a ^memory test
+ text help
+ If your system is having issues, a problem with your
+ system's memory may be the cause. Use this utility to
+ see if the memory is working correctly.
+ endtext
+ kernel memtest
+
+menu separator # insert an empty line
+
label local
menu label Boot from ^local drive
localboot 0xffff
-label memtest86
- menu label ^Memory test
- kernel memtest
- append -
+menu separator # insert an empty line
+menu separator # insert an empty line
+
+label returntomain
+ menu label Return to ^main menu
+ menu exit
+
+menu end
diff --git a/util/ks.cfg b/util/ks.cfg
index 9e33f42f..dd31d8fd 100644
--- a/util/ks.cfg
+++ b/util/ks.cfg
@@ -1,27 +1,70 @@
+#version=DEVEL
+# System authorization information
+auth --useshadow --enablemd5
+# Install OS instead of upgrade
install
+# License agreement
+eula --agreed
+# Use text mode install
text
-cdrom
-reboot --eject
+# Firewall configuration
+firewall --disabled
+firstboot --disable
+# Keyboard layouts
+# old format: keyboard us
+# new format:
+keyboard --vckeymap=us --xlayouts='us'
+network --onboot no --device eth0 --bootproto dhcp --noipv6
+network --onboot no --device eth1 --bootproto dhcp --noipv6
+network --onboot no --device eth2 --bootproto dhcp --noipv6
+# System language
lang en_US.UTF-8
-keyboard us
+# Installation logging level
+logging --level=info
+# Reboot after installation
+reboot
+# Root password
rootpw root
-timezone --utc Etc/UTC
-firewall --disabled
+# SELinux configuration
selinux --disabled
-unsupported_hardware
+# System services
+services --enabled="NetworkManager,sshd"
+# Do not configure the X Window System
skipx
+# System timezone
+timezone America/Los_Angeles --isUtc
+# System bootloader configuration
+bootloader --append=" crashkernel=auto" --location=mbr --boot-drive=sda
+# Clear the Master Boot Record
+zerombr
+# Partition clearing information
+clearpart --all --initlabel
+# Disk partitioning information
+part swap --asprimary --fstype="swap" --ondisk=sda --size=3072
+part /boot --asprimary --fstype="ext3" --ondisk=sda --size=500
+part pv.64 --fstype="lvmpv" --ondisk=sda --size=27145
+volgroup os --pesize=4096 pv.64
+logvol /var --fstype="ext3" --grow --percent=40 --name=varvol --vgname=os
+logvol /tmp --fstype="ext3" --grow --percent=5 --name=tmpvol --vgname=os
+logvol / --fstype="ext3" --grow --percent=50 --name=rootvol --vgname=os
+logvol /home --fstype="ext3" --grow --percent=5 --name=homevol --vgname=os
-# NEVER ever place zerombr here, it breaks automated installation
-%include /tmp/bootloader.ks
-%include /tmp/partition.ks
-
-# PREINSTALL SECTION
-# HERE ARE COMMANDS THAT WILL BE LAUNCHED BEFORE
-# INSTALLATION PROCESS ITSELF
%pre
#!/bin/sh
-# hard drives
+set -x -v
+exec 1>/tmp/ks-pre.log 2>&1
+# Once root's homedir is there, copy over the log.
+while : ; do
+ sleep 10
+ if [ -d /mnt/sysimage/root ]; then
+ cp /tmp/ks-pre.log /mnt/sysimage/root/
+ logger "Copied %pre section log to system"
+ break
+ fi
+done &
+
+# hard drivedd
drives=(`ls -1 /sys/block | grep "sd\|hd\|vd\|cciss"`)
default_drive=${drives[0]}
@@ -94,60 +137,30 @@ echo "logvol /var/log --vgname=os --size=4096 --percent 40 --grow --name=varlog
echo "bootloader --location=mbr --driveorder=${tgtdrive} --append=' biosdevname=0 crashkernel=none'" > /tmp/bootloader.ks
%end
-%packages --nobase --excludedocs
-wget
-git
-yum
-cobbler
-cobbler-web
-createrepo
-mkisofs
-syslinux
-debmirror
-pykickstart
-cman
-bind
-rsync
-dhcp
-xinetd
-tftp-server
-gcc
-httpd
-libselinux-python
-python-setuptools
-python-devel
-mysql-devel
-mysql-server
-mysql
-MySQL-python
-redis
-mod_wsgi
-rabbitmq-server
-nfs-utils
-
%post
echo -e "modprobe nf_conntrack_ipv4\nmodprobe nf_conntrack_ipv6\nmodprobe nf_conntrack_tftp\nmodprobe nf_nat_tftp" >> /etc/rc.modules
chmod +x /etc/rc.modules
echo -e "net.nf_conntrack_max=1048576" >> /etc/sysctl.conf
mkdir -p /var/log/coredump
echo -e "kernel.core_pattern=/var/log/coredump/core.%e.%p.%h.%t" >> /etc/sysctl.conf
+
chmod 777 /var/log/coredump
echo -e "* soft core unlimited\n* hard core unlimited" >> /etc/security/limits.conf
+%end
-########################### post install before chroot ####################
-%post --nochroot --log=/mnt/sysimage/root/anaconda-post-before-chroot.log
+%post --nochroot --log=/root/anaconda-post-before-chroot.log
#!/bin/sh
set -x
SOURCE="/mnt/sysimage/tmp/source"
for I in `cat /proc/cmdline`; do case "$I" in *=*) eval $I;; esac ; done
mkdir -p "${SOURCE}"
-mount -o bind "/mnt/source" "${SOURCE}"
+mount -o bind "/mnt/install/source" "${SOURCE}"
+%end
-########################### post install after chroot ####################
-%post --log=/root/anaconda-post-after-chroot.log
-#!/bin/bash
-set -x
+%post --logfile /var/log/post_install.log
+set -x -v
+exec 1>/root/ks-post.log 2>&1
function save_nic_cfg() {
scrFile="/etc/sysconfig/network-scripts/ifcfg-$1"
@@ -197,31 +210,29 @@ ex_gw=$external_gw
install_ip=$install_ip
install_netmask=$install_netmask
install_intf="eth1"
-install_hwaddr=`ifconfig $install_intf | grep -i hwaddr | sed -e 's#^.*hwaddr[[:space:]]*##I'`
+#install_hwaddr=`ifconfig $install_intf | grep -i hwaddr | sed -e 's#^.*hwaddr[[:space:]]*##I'`
save_nic_cfg $install_intf $install_ip $install_netmask $install_hwaddr
mgmt_ip=$mgmt_ip
mgmt_netmask=$mgmt_netmask
mgmt_intf="eth0"
-mgmt_hwaddr=`ifconfig $mgmt_intf | grep -i hwaddr | sed -e 's#^.*hwaddr[[:space:]]*##I'`
+#mgmt_hwaddr=`ifconfig $mgmt_intf | grep -i hwaddr | sed -e 's#^.*hwaddr[[:space:]]*##I'`
save_nic_cfg $mgmt_intf $mgmt_ip $mgmt_netmask $mgmt_hwaddr
external_ip=$external_ip
external_netmask=$external_mask
external_intf="eth2"
-external_hwaddr=`ifconfig $external_intf | grep -i hwaddr | sed -e 's#^.*hwaddr[[:space:]]*##I'`
+#external_hwaddr=`ifconfig $external_intf | grep -i hwaddr | sed -e 's#^.*hwaddr[[:space:]]*##I'`
save_nic_cfg $external_intf $external_ip $external_netmask $external_hwaddr
save_gateway
-# already chroot
SOURCE=/tmp/source
FS=/tmp/fs
-echo
mkdir -p ${SOURCE}
mkdir -p ${FS}
@@ -229,7 +240,6 @@ repodir="/var/lib/install_iso"
# Copying Centos files
mkdir -p ${repodir}
-cp -r ${SOURCE} ${repodir}/source
cp -rf ${SOURCE} ${repodir}/source
rm -rf ${repodir}/source/bootstrap/ \
@@ -247,9 +257,6 @@ cp -rf ${SOURCE}/repos/cobbler/* /opt/cobbler/
cp -rf ${SOURCE}/ansible/* /opt
cp -rf ${SOURCE}/repos/* /opt
cp -rf ${SOURCE}/loaders.tar.gz /opt
-
-mkdir -p /var/www
-
cp -rf ${SOURCE}/pip /var/www/pip
cp -rf ${SOURCE}/guestimg /var/www/guestimg
@@ -270,5 +277,9 @@ sed -i 's/Defaults requiretty/#Defaults requiretty/g' /etc/sudoers
sed -i 's/^UseDNS/#UseDNS/g' /etc/ssh/sshd_config
echo "UseDNS no" >> /etc/ssh/sshd_config
+# End post_install_network_config generated code
%end
+%packages --nobase
+@core
+%end