From de3139313cb7e3087e3328d1b37cdc88510ca5db Mon Sep 17 00:00:00 2001 From: Weidong Shao Date: Mon, 19 Oct 2015 15:04:48 +0000 Subject: Remove Compass from genesis. Compass4nfv has its own repo. Change-Id: I4b1b778682e5b49192ab00f5319594c66640a432 Signed-off-by: Weidong Shao --- compass/build/Makefile | 117 -- compass/build/cache.mk | 0 compass/build/config.mk | 0 compass/build/install.sh | 27 - compass/ci/build.sh | 394 ------ compass/ci/deploy.sh | 9 - compass/ci/launch.sh | 65 - compass/ci/log.sh | 22 - .../openstack_juno/HA-ansible-multinodes.yml | 42 - compass/deploy/ansible/openstack_juno/allinone.yml | 38 - compass/deploy/ansible/openstack_juno/compute.yml | 9 - .../deploy/ansible/openstack_juno/controller.yml | 15 - .../deploy/ansible/openstack_juno/group_vars/all | 54 - .../deploy/ansible/openstack_juno/multinodes.yml | 75 -- compass/deploy/ansible/openstack_juno/network.yml | 8 - .../ansible/openstack_juno/single-controller.yml | 38 - compass/deploy/ansible/openstack_juno/storage.yml | 8 - .../roles/cinder-controller/handlers/main.yml | 6 - .../cinder-controller/tasks/cinder_config.yml | 20 - .../cinder-controller/tasks/cinder_install.yml | 20 - .../ansible/roles/cinder-controller/tasks/main.yml | 13 - .../cinder-controller/templates/api-paste.ini | 71 -- .../roles/cinder-controller/templates/cinder.conf | 63 - .../cinder-controller/templates/cinder_init.sh | 6 - .../ansible/roles/cinder-volume/files/loop.yml | 1 - .../ansible/roles/cinder-volume/handlers/main.yml | 3 - .../ansible/roles/cinder-volume/tasks/main.yml | 55 - .../roles/cinder-volume/templates/cinder.conf | 62 - .../files/sources.list.d/cloudarchive-juno.list | 1 - compass/deploy/ansible/roles/common/tasks/main.yml | 28 - .../deploy/ansible/roles/common/templates/hosts | 22 - .../deploy/ansible/roles/common/templates/ntp.conf | 56 - .../deploy/ansible/roles/dashboard/tasks/main.yml | 30 - .../roles/dashboard/templates/local_settings.py | 511 -------- .../dashboard/templates/openstack-dashboard.conf | 14 - compass/deploy/ansible/roles/database/files/my.cnf | 131 -- .../deploy/ansible/roles/database/tasks/main.yml | 12 - .../ansible/roles/database/tasks/mariadb.yml | 46 - .../deploy/ansible/roles/database/tasks/mysql.yml | 22 - .../ansible/roles/database/templates/data.j2 | 39 - .../deploy/ansible/roles/database/templates/my.cnf | 134 -- .../ansible/roles/database/templates/wsrep.cnf | 126 -- .../deploy/ansible/roles/glance/handlers/main.yml | 6 - .../ansible/roles/glance/tasks/glance_config.yml | 29 - .../ansible/roles/glance/tasks/glance_install.yml | 26 - compass/deploy/ansible/roles/glance/tasks/main.yml | 18 - compass/deploy/ansible/roles/glance/tasks/nfs.yml | 41 - .../ansible/roles/glance/templates/glance-api.conf | 677 ---------- .../roles/glance/templates/glance-registry.conf | 190 --- .../ansible/roles/glance/templates/image_upload.sh | 2 - compass/deploy/ansible/roles/ha/files/galera_chk | 10 - compass/deploy/ansible/roles/ha/files/mysqlchk | 15 - compass/deploy/ansible/roles/ha/files/notify.sh | 4 - compass/deploy/ansible/roles/ha/handlers/main.yml | 9 - compass/deploy/ansible/roles/ha/tasks/main.yml | 94 -- .../deploy/ansible/roles/ha/templates/failover.j2 | 65 - .../deploy/ansible/roles/ha/templates/haproxy.cfg | 133 -- .../ansible/roles/ha/templates/keepalived.conf | 42 - .../roles/keystone/tasks/keystone_config.yml | 16 - .../roles/keystone/tasks/keystone_install.yml | 29 - .../deploy/ansible/roles/keystone/tasks/main.yml | 13 - .../roles/keystone/templates/admin-openrc.sh | 6 - .../roles/keystone/templates/demo-openrc.sh | 5 - .../ansible/roles/keystone/templates/keystone.conf | 1317 -------------------- .../ansible/roles/keystone/templates/keystone_init | 43 - .../ansible/roles/monitor/files/check_service.sh | 7 - compass/deploy/ansible/roles/monitor/files/root | 1 - .../deploy/ansible/roles/monitor/tasks/main.yml | 11 - compass/deploy/ansible/roles/mq/tasks/main.yml | 5 - compass/deploy/ansible/roles/mq/tasks/rabbitmq.yml | 45 - .../ansible/roles/mq/tasks/rabbitmq_cluster.yml | 27 - .../ansible/roles/mq/templates/.erlang.cookie | 1 - .../ansible/roles/mq/templates/rabbitmq-env.conf | 1 - .../ansible/roles/neutron-common/handlers/main.yml | 13 - .../roles/neutron-compute/defaults/main.yml | 2 - .../roles/neutron-compute/handlers/main.yml | 13 - .../ansible/roles/neutron-compute/tasks/main.yml | 55 - .../roles/neutron-compute/templates/dhcp_agent.ini | 90 -- .../neutron-compute/templates/dnsmasq-neutron.conf | 2 - .../neutron-compute/templates/etc/xorp/config.boot | 25 - .../roles/neutron-compute/templates/l3_agent.ini | 81 -- .../neutron-compute/templates/metadata_agent.ini | 46 - .../roles/neutron-compute/templates/ml2_conf.ini | 108 -- .../neutron-compute/templates/neutron-network.conf | 465 ------- .../roles/neutron-compute/templates/neutron.conf | 466 ------- .../neutron-compute/templates/neutron_init.sh | 4 - .../roles/neutron-compute/templates/nova.conf | 73 -- .../roles/neutron-controller/handlers/main.yml | 24 - .../roles/neutron-controller/tasks/main.yml | 13 - .../neutron-controller/tasks/neutron_config.yml | 10 - .../neutron-controller/tasks/neutron_install.yml | 29 - .../neutron-controller/templates/dhcp_agent.ini | 90 -- .../templates/dnsmasq-neutron.conf | 2 - .../templates/etc/xorp/config.boot | 25 - .../neutron-controller/templates/l3_agent.ini | 81 -- .../templates/metadata_agent.ini | 46 - .../neutron-controller/templates/ml2_conf.ini | 108 -- .../templates/neutron-network.conf | 465 ------- .../neutron-controller/templates/neutron.conf | 466 ------- .../neutron-controller/templates/neutron_init.sh | 4 - .../roles/neutron-controller/templates/nova.conf | 69 - .../roles/neutron-network/handlers/main.yml | 21 - .../roles/neutron-network/tasks/igmp-router.yml | 20 - .../ansible/roles/neutron-network/tasks/main.yml | 114 -- .../ansible/roles/neutron-network/tasks/odl.yml | 13 - .../roles/neutron-network/templates/dhcp_agent.ini | 90 -- .../neutron-network/templates/dnsmasq-neutron.conf | 2 - .../neutron-network/templates/etc/xorp/config.boot | 25 - .../roles/neutron-network/templates/l3_agent.ini | 81 -- .../neutron-network/templates/metadata_agent.ini | 46 - .../roles/neutron-network/templates/ml2_conf.ini | 108 -- .../neutron-network/templates/neutron-network.conf | 465 ------- .../roles/neutron-network/templates/neutron.conf | 466 ------- .../neutron-network/templates/neutron_init.sh | 4 - .../roles/neutron-network/templates/nova.conf | 69 - .../ansible/roles/nova-compute/handlers/main.yml | 3 - .../ansible/roles/nova-compute/tasks/main.yml | 21 - .../roles/nova-compute/templates/nova-compute.conf | 7 - .../ansible/roles/nova-compute/templates/nova.conf | 73 -- .../roles/nova-controller/handlers/main.yml | 24 - .../ansible/roles/nova-controller/tasks/main.yml | 13 - .../roles/nova-controller/tasks/nova_config.yml | 16 - .../roles/nova-controller/tasks/nova_install.yml | 35 - .../roles/nova-controller/templates/dhcp_agent.ini | 90 -- .../nova-controller/templates/dnsmasq-neutron.conf | 2 - .../nova-controller/templates/etc/xorp/config.boot | 25 - .../roles/nova-controller/templates/l3_agent.ini | 81 -- .../nova-controller/templates/metadata_agent.ini | 46 - .../roles/nova-controller/templates/ml2_conf.ini | 108 -- .../nova-controller/templates/neutron-network.conf | 465 ------- .../roles/nova-controller/templates/neutron.conf | 466 ------- .../nova-controller/templates/neutron_init.sh | 4 - .../roles/nova-controller/templates/nova.conf | 72 -- compass/deploy/ansible/roles/repo/tasks/main.yml | 6 - .../ansible/roles/repo/templates/sources.list | 1 - compass/deploy/compass_vm.sh | 103 -- compass/deploy/conf/baremetal.conf | 20 - compass/deploy/conf/base.conf | 78 -- compass/deploy/conf/cluster.conf | 20 - compass/deploy/conf/five.conf | 19 - compass/deploy/deploy-vm.sh | 52 - compass/deploy/deploy_host.sh | 40 - compass/deploy/func.sh | 23 - compass/deploy/host_baremetal.sh | 9 - compass/deploy/host_vm.sh | 59 - compass/deploy/mac_generator.sh | 23 - compass/deploy/network.sh | 70 -- compass/deploy/prepare.sh | 35 - compass/deploy/remote_excute.exp | 23 - compass/deploy/setup-env.sh | 61 - compass/deploy/status_callback.py | 174 --- compass/deploy/template/network/bridge.xml | 5 - compass/deploy/template/network/nat.xml | 10 - compass/deploy/template/vm/compass.xml | 64 - compass/deploy/template/vm/host.xml | 67 - 155 files changed, 12213 deletions(-) delete mode 100755 compass/build/Makefile delete mode 100755 compass/build/cache.mk delete mode 100755 compass/build/config.mk delete mode 100755 compass/build/install.sh delete mode 100755 compass/ci/build.sh delete mode 100755 compass/ci/deploy.sh delete mode 100755 compass/ci/launch.sh delete mode 100755 compass/ci/log.sh delete mode 100644 compass/deploy/ansible/openstack_juno/HA-ansible-multinodes.yml delete mode 100644 compass/deploy/ansible/openstack_juno/allinone.yml delete mode 100644 compass/deploy/ansible/openstack_juno/compute.yml delete mode 100644 compass/deploy/ansible/openstack_juno/controller.yml delete mode 100644 compass/deploy/ansible/openstack_juno/group_vars/all delete mode 100644 compass/deploy/ansible/openstack_juno/multinodes.yml delete mode 100644 compass/deploy/ansible/openstack_juno/network.yml delete mode 100644 compass/deploy/ansible/openstack_juno/single-controller.yml delete mode 100644 compass/deploy/ansible/openstack_juno/storage.yml delete mode 100644 compass/deploy/ansible/roles/cinder-controller/handlers/main.yml delete mode 100644 compass/deploy/ansible/roles/cinder-controller/tasks/cinder_config.yml delete mode 100644 compass/deploy/ansible/roles/cinder-controller/tasks/cinder_install.yml delete mode 100644 compass/deploy/ansible/roles/cinder-controller/tasks/main.yml delete mode 100644 compass/deploy/ansible/roles/cinder-controller/templates/api-paste.ini delete mode 100644 compass/deploy/ansible/roles/cinder-controller/templates/cinder.conf delete mode 100644 compass/deploy/ansible/roles/cinder-controller/templates/cinder_init.sh delete mode 100644 compass/deploy/ansible/roles/cinder-volume/files/loop.yml delete mode 100644 compass/deploy/ansible/roles/cinder-volume/handlers/main.yml delete mode 100644 compass/deploy/ansible/roles/cinder-volume/tasks/main.yml delete mode 100644 compass/deploy/ansible/roles/cinder-volume/templates/cinder.conf delete mode 100644 compass/deploy/ansible/roles/common/files/sources.list.d/cloudarchive-juno.list delete mode 100644 compass/deploy/ansible/roles/common/tasks/main.yml delete mode 100644 compass/deploy/ansible/roles/common/templates/hosts delete mode 100644 compass/deploy/ansible/roles/common/templates/ntp.conf delete mode 100644 compass/deploy/ansible/roles/dashboard/tasks/main.yml delete mode 100644 compass/deploy/ansible/roles/dashboard/templates/local_settings.py delete mode 100644 compass/deploy/ansible/roles/dashboard/templates/openstack-dashboard.conf delete mode 100644 compass/deploy/ansible/roles/database/files/my.cnf delete mode 100644 compass/deploy/ansible/roles/database/tasks/main.yml delete mode 100644 compass/deploy/ansible/roles/database/tasks/mariadb.yml delete mode 100644 compass/deploy/ansible/roles/database/tasks/mysql.yml delete mode 100644 compass/deploy/ansible/roles/database/templates/data.j2 delete mode 100644 compass/deploy/ansible/roles/database/templates/my.cnf delete mode 100644 compass/deploy/ansible/roles/database/templates/wsrep.cnf delete mode 100644 compass/deploy/ansible/roles/glance/handlers/main.yml delete mode 100644 compass/deploy/ansible/roles/glance/tasks/glance_config.yml delete mode 100644 compass/deploy/ansible/roles/glance/tasks/glance_install.yml delete mode 100644 compass/deploy/ansible/roles/glance/tasks/main.yml delete mode 100644 compass/deploy/ansible/roles/glance/tasks/nfs.yml delete mode 100644 compass/deploy/ansible/roles/glance/templates/glance-api.conf delete mode 100644 compass/deploy/ansible/roles/glance/templates/glance-registry.conf delete mode 100644 compass/deploy/ansible/roles/glance/templates/image_upload.sh delete mode 100644 compass/deploy/ansible/roles/ha/files/galera_chk delete mode 100644 compass/deploy/ansible/roles/ha/files/mysqlchk delete mode 100644 compass/deploy/ansible/roles/ha/files/notify.sh delete mode 100644 compass/deploy/ansible/roles/ha/handlers/main.yml delete mode 100644 compass/deploy/ansible/roles/ha/tasks/main.yml delete mode 100644 compass/deploy/ansible/roles/ha/templates/failover.j2 delete mode 100644 compass/deploy/ansible/roles/ha/templates/haproxy.cfg delete mode 100644 compass/deploy/ansible/roles/ha/templates/keepalived.conf delete mode 100644 compass/deploy/ansible/roles/keystone/tasks/keystone_config.yml delete mode 100644 compass/deploy/ansible/roles/keystone/tasks/keystone_install.yml delete mode 100644 compass/deploy/ansible/roles/keystone/tasks/main.yml delete mode 100644 compass/deploy/ansible/roles/keystone/templates/admin-openrc.sh delete mode 100644 compass/deploy/ansible/roles/keystone/templates/demo-openrc.sh delete mode 100644 compass/deploy/ansible/roles/keystone/templates/keystone.conf delete mode 100644 compass/deploy/ansible/roles/keystone/templates/keystone_init delete mode 100644 compass/deploy/ansible/roles/monitor/files/check_service.sh delete mode 100644 compass/deploy/ansible/roles/monitor/files/root delete mode 100644 compass/deploy/ansible/roles/monitor/tasks/main.yml delete mode 100644 compass/deploy/ansible/roles/mq/tasks/main.yml delete mode 100644 compass/deploy/ansible/roles/mq/tasks/rabbitmq.yml delete mode 100644 compass/deploy/ansible/roles/mq/tasks/rabbitmq_cluster.yml delete mode 100644 compass/deploy/ansible/roles/mq/templates/.erlang.cookie delete mode 100644 compass/deploy/ansible/roles/mq/templates/rabbitmq-env.conf delete mode 100644 compass/deploy/ansible/roles/neutron-common/handlers/main.yml delete mode 100644 compass/deploy/ansible/roles/neutron-compute/defaults/main.yml delete mode 100644 compass/deploy/ansible/roles/neutron-compute/handlers/main.yml delete mode 100644 compass/deploy/ansible/roles/neutron-compute/tasks/main.yml delete mode 100644 compass/deploy/ansible/roles/neutron-compute/templates/dhcp_agent.ini delete mode 100644 compass/deploy/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf delete mode 100644 compass/deploy/ansible/roles/neutron-compute/templates/etc/xorp/config.boot delete mode 100644 compass/deploy/ansible/roles/neutron-compute/templates/l3_agent.ini delete mode 100644 compass/deploy/ansible/roles/neutron-compute/templates/metadata_agent.ini delete mode 100644 compass/deploy/ansible/roles/neutron-compute/templates/ml2_conf.ini delete mode 100644 compass/deploy/ansible/roles/neutron-compute/templates/neutron-network.conf delete mode 100644 compass/deploy/ansible/roles/neutron-compute/templates/neutron.conf delete mode 100644 compass/deploy/ansible/roles/neutron-compute/templates/neutron_init.sh delete mode 100644 compass/deploy/ansible/roles/neutron-compute/templates/nova.conf delete mode 100644 compass/deploy/ansible/roles/neutron-controller/handlers/main.yml delete mode 100644 compass/deploy/ansible/roles/neutron-controller/tasks/main.yml delete mode 100644 compass/deploy/ansible/roles/neutron-controller/tasks/neutron_config.yml delete mode 100644 compass/deploy/ansible/roles/neutron-controller/tasks/neutron_install.yml delete mode 100644 compass/deploy/ansible/roles/neutron-controller/templates/dhcp_agent.ini delete mode 100644 compass/deploy/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf delete mode 100644 compass/deploy/ansible/roles/neutron-controller/templates/etc/xorp/config.boot delete mode 100644 compass/deploy/ansible/roles/neutron-controller/templates/l3_agent.ini delete mode 100644 compass/deploy/ansible/roles/neutron-controller/templates/metadata_agent.ini delete mode 100644 compass/deploy/ansible/roles/neutron-controller/templates/ml2_conf.ini delete mode 100644 compass/deploy/ansible/roles/neutron-controller/templates/neutron-network.conf delete mode 100644 compass/deploy/ansible/roles/neutron-controller/templates/neutron.conf delete mode 100644 compass/deploy/ansible/roles/neutron-controller/templates/neutron_init.sh delete mode 100644 compass/deploy/ansible/roles/neutron-controller/templates/nova.conf delete mode 100644 compass/deploy/ansible/roles/neutron-network/handlers/main.yml delete mode 100644 compass/deploy/ansible/roles/neutron-network/tasks/igmp-router.yml delete mode 100644 compass/deploy/ansible/roles/neutron-network/tasks/main.yml delete mode 100644 compass/deploy/ansible/roles/neutron-network/tasks/odl.yml delete mode 100644 compass/deploy/ansible/roles/neutron-network/templates/dhcp_agent.ini delete mode 100644 compass/deploy/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf delete mode 100644 compass/deploy/ansible/roles/neutron-network/templates/etc/xorp/config.boot delete mode 100644 compass/deploy/ansible/roles/neutron-network/templates/l3_agent.ini delete mode 100644 compass/deploy/ansible/roles/neutron-network/templates/metadata_agent.ini delete mode 100644 compass/deploy/ansible/roles/neutron-network/templates/ml2_conf.ini delete mode 100644 compass/deploy/ansible/roles/neutron-network/templates/neutron-network.conf delete mode 100644 compass/deploy/ansible/roles/neutron-network/templates/neutron.conf delete mode 100644 compass/deploy/ansible/roles/neutron-network/templates/neutron_init.sh delete mode 100644 compass/deploy/ansible/roles/neutron-network/templates/nova.conf delete mode 100644 compass/deploy/ansible/roles/nova-compute/handlers/main.yml delete mode 100644 compass/deploy/ansible/roles/nova-compute/tasks/main.yml delete mode 100644 compass/deploy/ansible/roles/nova-compute/templates/nova-compute.conf delete mode 100644 compass/deploy/ansible/roles/nova-compute/templates/nova.conf delete mode 100644 compass/deploy/ansible/roles/nova-controller/handlers/main.yml delete mode 100644 compass/deploy/ansible/roles/nova-controller/tasks/main.yml delete mode 100644 compass/deploy/ansible/roles/nova-controller/tasks/nova_config.yml delete mode 100644 compass/deploy/ansible/roles/nova-controller/tasks/nova_install.yml delete mode 100644 compass/deploy/ansible/roles/nova-controller/templates/dhcp_agent.ini delete mode 100644 compass/deploy/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf delete mode 100644 compass/deploy/ansible/roles/nova-controller/templates/etc/xorp/config.boot delete mode 100644 compass/deploy/ansible/roles/nova-controller/templates/l3_agent.ini delete mode 100644 compass/deploy/ansible/roles/nova-controller/templates/metadata_agent.ini delete mode 100644 compass/deploy/ansible/roles/nova-controller/templates/ml2_conf.ini delete mode 100644 compass/deploy/ansible/roles/nova-controller/templates/neutron-network.conf delete mode 100644 compass/deploy/ansible/roles/nova-controller/templates/neutron.conf delete mode 100644 compass/deploy/ansible/roles/nova-controller/templates/neutron_init.sh delete mode 100644 compass/deploy/ansible/roles/nova-controller/templates/nova.conf delete mode 100644 compass/deploy/ansible/roles/repo/tasks/main.yml delete mode 100644 compass/deploy/ansible/roles/repo/templates/sources.list delete mode 100644 compass/deploy/compass_vm.sh delete mode 100644 compass/deploy/conf/baremetal.conf delete mode 100644 compass/deploy/conf/base.conf delete mode 100644 compass/deploy/conf/cluster.conf delete mode 100644 compass/deploy/conf/five.conf delete mode 100644 compass/deploy/deploy-vm.sh delete mode 100644 compass/deploy/deploy_host.sh delete mode 100755 compass/deploy/func.sh delete mode 100644 compass/deploy/host_baremetal.sh delete mode 100644 compass/deploy/host_vm.sh delete mode 100755 compass/deploy/mac_generator.sh delete mode 100755 compass/deploy/network.sh delete mode 100644 compass/deploy/prepare.sh delete mode 100644 compass/deploy/remote_excute.exp delete mode 100644 compass/deploy/setup-env.sh delete mode 100644 compass/deploy/status_callback.py delete mode 100644 compass/deploy/template/network/bridge.xml delete mode 100644 compass/deploy/template/network/nat.xml delete mode 100644 compass/deploy/template/vm/compass.xml delete mode 100644 compass/deploy/template/vm/host.xml diff --git a/compass/build/Makefile b/compass/build/Makefile deleted file mode 100755 index 7448dc4..0000000 --- a/compass/build/Makefile +++ /dev/null @@ -1,117 +0,0 @@ -############################################################################## -# Copyright (c) 2015 Ericsson AB and others. -# stefan.k.berg@ericsson.com -# jonas.bjurel@ericsson.com -# dradez@redhat.com -# chigang@huawei.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -SHELL = /bin/bash -############################################################################ -# BEGIN of variables to customize -# -#Input args -export UNIT_TEST = FALSE -export INTERACTIVE = TRUE -export ISOSRC = file:$(shell pwd)/ubuntu -export ISOCACHE = $(shell pwd)/$(shell basename $(ISOSRC)) -export PRODNO = "OPNFV_BGS" -export REVSTATE = "P0000" -export RELEASE_DIR = $(shell pwd)/release - -# Note! Invoke with "make REVSTATE=RXXXX all" to make release build! -# Invoke with ICOCACHE=/full/path/to/iso if cached ISO is in non-standard location. - -#Build variables -export BUILD_BASE := $(shell pwd) -export CACHE_DIR := $(BUILD_BASE)/cache -export INSTALL_DIR := $(BUILD_BASE) -export VERSION_FILE := $(BUILD_BASE)/.versions -export TOPDIR := $(shell pwd) - -export OLDISO_DIR := $(TOPDIR)/oldiso -export NEWISO_DIR := $(TOPDIR)/newiso -export NEWIMAGE_DIR := $(TOPDIR)/newiso/image -export NEWFILESYSTEM := $(TOPDIR)/newiso/filesystem -export MANIFEST_DIR = $(shell find $(NEWISO_DIR) -name filesystem.manifest) -export SQUASHFS_DIR = $(shell find $(NEWISO_DIR) -name filesystem.squashfs) -export FSSIZE_DIR = $(shell find $(NEWISO_DIR) -name filesystem.size) - -# -# END of variables to customize -############################################################################# - -.PHONY: all -all: iso - @echo "Versions of cached build results built by" $(shell hostname) "at" $(shell date -u) > $(VERSION_FILE) - @echo "cache.mk" $(shell md5sum $(BUILD_BASE)/cache.mk | cut -f1 -d " ") >> $(VERSION_FILE) - @echo "config.mk" $(shell md5sum $(BUILD_BASE)/config.mk | cut -f1 -d " ") >> $(VERSION_FILE) - -############################################################################ -# BEGIN of Include definitions -# -include config.mk -include cache.mk -# -# END Include definitions -############################################################################# - -.PHONY: prepare-cache -prepare-cache: - @echo "prepare-cache to be done" - -.PHONY: mount-ubuntuiso -mount-ubuntuiso: - @echo "===Mounting ubuntu ISO in $(OLDISO_DIR)" - -mkdir -p $(OLDISO_DIR) $(NEWIMAGE_DIR) - @fuseiso $(ISOCACHE)/*.iso $(OLDISO_DIR) - cp $(OLDISO_DIR)/. $(NEWIMAGE_DIR) -rp - -.PHONY: umount-ubuntuiso -umount-ubuntuiso: - @set +e - @echo "===Unmounting ubuntu ISO from $(OLDISO_DIR)" - @fusermount -u $(OLDISO_DIR) - @set -e - -.PHONY: install-package -install-package: - @echo "===uncompress file system to add new files" - @find $(NEWISO_DIR) -name "filesystem.squashfs" |xargs unsquashfs - @mv squashfs-root $(NEWFILESYSTEM) - cp -f /etc/resolv.conf $(NEWFILESYSTEM)/run/resolvconf/ - cp /etc/hosts $(NEWFILESYSTEM)/etc/ - cp $(INSTALL_DIR)/install.sh $(NEWFILESYSTEM)/ - @echo "===install package on filesystem for newiso" - #@chroot $(NEWFILESYSTEM) sh ./install.sh - @chmod +w $(MANIFEST_DIR) - @chroot $(NEWFILESYSTEM) dpkg-query -W --showformat='$${Package} $${Version}\n' | tee ${MANIFEST_DIR} - @rm $(SQUASHFS_DIR) - @mksquashfs $(NEWFILESYSTEM) $(SQUASHFS_DIR) - @chmod +w $(FSSIZE_DIR) - cd $(NEWISO_DIR); \ - (du -sx --block-size=1 $(NEWFILESYSTEM) | cut -f1 ) | tee ${FSSIZE_DIR} - cd $(NEWIMAGE_DIR); \ - find . -type f -print0 | xargs -0 md5sum | grep -v "\./md5sum.txt" | tee ./md5sum.txt - -.PHONY: make-iso -make-iso: - @echo "===Building OPNFV iso" - cd $(NEWIMAGE_DIR); \ - mkisofs -r -V "OPNFV" -cache-inodes -J -l -b isolinux/isolinux.bin -c isolinux/boot.cat -no-emul-boot -boot-load-size 4 -boot-info-table -o ../ubuntu-14.04-amd64-opnfv.iso . - -.PHONY: build-clean -build-clean: - -rm -Rf $(OLDISO_DIR) - -rm -Rf $(NEWISO_DIR) - -rm -Rf $(RELEASE_DIR) - -.PHONY: iso -iso: build-clean mount-ubuntuiso umount-ubuntuiso install-package make-iso - -mkdir $(RELEASE_DIR) - @mv $(NEWISO_DIR)/*.iso $(RELEASE_DIR) - @printf "\n\nISO is built successfully!\n\n" diff --git a/compass/build/cache.mk b/compass/build/cache.mk deleted file mode 100755 index e69de29..0000000 diff --git a/compass/build/config.mk b/compass/build/config.mk deleted file mode 100755 index e69de29..0000000 diff --git a/compass/build/install.sh b/compass/build/install.sh deleted file mode 100755 index 4a8b893..0000000 --- a/compass/build/install.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -##################################################################################### -# Copyright (c) 2015 Huawei Technologies Co.,Ltd. -# chigang@huawei.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -##################################################################################### - -# some packages or tools maybe use below filesystem -mount -t proc none /proc -mount -t sysfs none /sys -mount -t devpts none /dev/pts - -# install/remove packages -sudo apt-get update -sudo apt-get -y upgrade -sudo apt-get -y dist-upgrade -sudo apt-get install libxslt-dev libxml2-dev libvirt-dev build-essential qemu-utils qemu-kvm libvirt-bin virtinst -y - -#rm /etc/resolv.conf -#rm -rf /tmp/* - -umount /proc -umount /sys -umount /dev/pts \ No newline at end of file diff --git a/compass/ci/build.sh b/compass/ci/build.sh deleted file mode 100755 index 2b7fd9a..0000000 --- a/compass/ci/build.sh +++ /dev/null @@ -1,394 +0,0 @@ -#!/bin/bash -set -e -############################################################################## -# Copyright (c) 2015 Ericsson AB and others. -# stefan.k.berg@ericsson.com -# jonas.bjurel@ericsson.com -# chigang@huawei.com -# All rights reserved. This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# http://www.apache.org/licenses/LICENSE-2.0 -############################################################################## - -trap 'echo "Exiting ..."; \ -if [ -f ${LOCK_FILE} ]; then \ - if [ $(cat ${LOCK_FILE}) -eq $$ ]; then \ - rm -f ${LOCK_FILE}; \ - fi; \ -fi;' EXIT - -############################################################################ -# BEGIN of usage description -# -usage () -{ -cat << EOF -$0 Builds the Compass@OPNFV stack - -usage: $0 [-s spec-file] [-c cache-URI] [-l log-file] [-f Flags] build-directory - -OPTIONS: - -s spec-file ($BUILD_SPEC), define the build-spec file, default ../build/config.mk - -c cache base URI ($BUILD_CACHE_URI), specifies the base URI to a build cache to be used/updated - the name is automatically generated from the md5sum of the spec-file, http://, ftp://, file://[absolute path] suported. - - -l log-file ($BUILD_LOG), specifies the output log-file (stdout and stderr), if not specified logs are output to console as normal - -v version tag to be applied to the build result - -r alternative remote access method script/program. curl is default. - -t run small build-script unit test. - -T run large build-script unit test. - -f build flags ($BUILD_FLAGS): - o s: Do nothing, succeed - o f: Do nothing, fail - o t: run build unit tests - o i: run interactive (-t flag to docker run) - o P: Populate a new local cache and push it to the (-c cache-URI) cache artifactory if -c option is present, currently file://, http:// and ftp:// are supported - o d: Detatch - NOT YET SUPPORTED - - build-directory ($BUILD_DIR), specifies the directory for the output artifacts (.iso file). - - -h help, prints this help text - -Description: -build.sh builds opnfv .iso artifact. -To reduce build time it uses build cache on a local or remote location. The cache is rebuilt and uploaded if either of the below conditions are met: -1) The P(opulate) flag is set and the -c cache-base-URI is provided, if -c is not provided the cache will stay local. -2) If the cache is invalidated by one of the following conditions: - - The config spec md5sum does not compare to the md5sum for the spec which the cache was built. - - The git Commit-Id on the remote repos/HEAD defined in the spec file does not correspont with the Commit-Id for what the cache was built with. -3) A valid cache does not exist on the specified -c cache-base-URI. - -The cache URI object name is compass_cache-"md5sum(spec file)" - -Logging by default to console, but can be directed elsewhere with the -l option in which case both stdout and stderr is redirected to that destination. - -Built in unit testing of components is enabled by adding the t(est) flag. - -Return codes: - - 0 Success! - - 1-99 Unspecified build error - - 100-199 Build system internal error (not build it self) - o 101 Build system instance busy - - 200 Build failure - -Examples: -build -c http://opnfv.org/artifactory/compass/cache -d ~/jenkins/genesis/compass/ci/output -f ti -NOTE: At current the build scope is set to the git root of the repository, -d destination locations outside that scope will not work -EOF -} -# -# END of usage description -############################################################################ - -############################################################################ -# BEGIN of variables to customize -# -BUILD_BASE=$(readlink -e ../build/) -export RESULT_DIR="${BUILD_BASE}/release" -BUILD_SPEC="${BUILD_BASE}/config.mk" -CACHE_DIR="cache" -LOCAL_CACHE_ARCH_NAME="compass-cache" -REMOTE_CACHE_ARCH_NAME="compass_cache-$(md5sum ${BUILD_SPEC}| cut -f1 -d " ")" -REMOTE_ACCESS_METHD=curl -INCLUDE_DIR=../include -# -# END of variables to customize -############################################################################ - -############################################################################ -# BEGIN of script assigned variables -# -SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) -LOCK_FILE="${SCRIPT_DIR}/.build.lck" -CACHE_TMP="${SCRIPT_DIR}/tmp" -TEST_SUCCEED=0 -TEST_FAIL=0 -UNIT_TEST=0 -UPDATE_CACHE=0 -POPULATE_CACHE=0 -RECURSIV=0 -DETACH=0 -DEBUG=0 -INTEGRATION_TEST=0 -FULL_INTEGRATION_TEST=0 -INTERACTIVE=0 -export BUILD_CACHE_URI= -BUILD_SPEC= -BUILD_DIR= -BUILD_LOG= -export BUILD_VERSION= -MAKE_ARGS= -# -# END of script assigned variables -############################################################################ - -############################################################################ -# BEGIN of include pragmas -# -source ${INCLUDE_DIR}/build.sh.debug -# -# END of include -############################################################################ - -############################################################################ -# BEGIN of main -# -build_prepare - -while getopts "s:c:v:f:l:r:RtTh" OPTION -do - case $OPTION in - h) - usage - rc=0 - exit $rc - ;; - - s) - BUILD_SPEC=${OPTARG} - ;; - - c) - BUILD_CACHE_URI=${OPTARG} - ;; - - l) - BUILD_LOG=${OPTARG} - ;; - - v) - BUILD_VERSION=${OPTARG} - ;; - - f) - BUILD_FLAGS=${OPTARG} - ;; - - r) REMOTE_ACCESS_METHD=${OPTARG} - ;; - - R) - RECURSIVE=1 - ;; - - t) - INTEGRATION_TEST=1 - ;; - - T) - INTEGRATION_TEST=1 - FULL_INTEGRATION_TEST=1 - ;; - - *) - echo "${OPTION} is not a valid argument" - rc=100 - exit $rc - ;; - esac -done - -if [ -z $BUILD_DIR ]; then - BUILD_DIR=$(echo $@ | cut -d ' ' -f ${OPTIND}) -fi - -for ((i=0; i<${#BUILD_FLAGS};i++)); do - case ${BUILD_FLAGS:$i:1} in - s) - rc=0 - exit $rc - ;; - - f) - rc=1 - exit $rc - ;; - - t) - UNIT_TEST=1 - ;; - - i) - INTERACTIVE=1 - ;; - - P) - POPULATE_CACHE=1 - ;; - - d) - DETACH=1 - echo "Detach is not yet supported - exiting ...." - rc=100 - exit $rc - ;; - - D) - DEBUG=1 - ;; - - *) - echo "${BUILD_FLAGS:$i:1} is not a valid build flag - exiting ...." - rc=100 - exit $rc - ;; - esac -done - -if [ ${INTEGRATION_TEST} -eq 1 ]; then - integration-test - rc=0 - exit $rc -fi - -if [ ! -f ${BUILD_SPEC} ]; then - echo "spec file does not exist: $BUILD_SPEC - exiting ...." - rc=100 - exit $rc -fi - -if [ -z ${BUILD_DIR} ]; then - echo "Missing build directory - exiting ...." - rc=100 - exit $rc -fi - -if [ ! -z ${BUILD_LOG} ]; then - if [[ ${RECURSIVE} -ne 1 ]]; then - set +e - eval $0 -R $@ > ${BUILD_LOG} 2>&1 - rc=$? - set -e - if [ $rc -ne 0 ]; then - exit $rc - fi - fi -fi - -if [ ${TEST_SUCCEED} -eq 1 ]; then - sleep 1 - rc=0 - exit $rc -fi - -if [ ${TEST_FAIL} -eq 1 ]; then - sleep 1 - rc=1 - exit $rc -fi - -if [ -e ${LOCK_FILE} ]; then - echo "A build job is already running, exiting....." - rc=101 - exit $rc -fi - -echo $$ > ${LOCK_FILE} - -if [ ! -z ${BUILD_CACHE_URI} ]; then - if [ ${POPULATE_CACHE} -ne 1 ]; then - rm -rf ${CACHE_TMP}/cache - mkdir -p ${CACHE_TMP}/cache - echo "Downloading cach file ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME} ..." - set +e - ${REMOTE_ACCESS_METHD} -o ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}.tgz - rc=$? - set -e - if [ $rc -ne 0 ]; then - echo "Remote cache does not exist, or is not accessible - a new cache will be built ..." - POPULATE_CACHE=1 - else - echo "Unpacking cache file ..." - tar -C ${CACHE_TMP}/cache -xvf ${CACHE_TMP}/cache/${LOCAL_CACHE_ARCH_NAME}.tgz - cp ${CACHE_TMP}/cache/cache/.versions ${BUILD_BASE}/. - set +e - make -C ${BUILD_BASE} validate-cache; - rc=$? - set -e - - if [ $rc -ne 0 ]; then - echo "Cache invalid - a new cache will be built " - POPULATE_CACHE=1 - else - cp -rf ${CACHE_TMP}/cache/cache/. ${BUILD_BASE} - fi - rm -rf ${CACHE_TMP}/cache - fi - fi -fi - -if [ ${POPULATE_CACHE} -eq 1 ]; then - if [ ${DEBUG} -eq 0 ]; then - set +e - cd ${BUILD_BASE} && make clean - rc=$? - set -e - if [ $rc -ne 0 ]; then - echo "Build - make clean failed, exiting ..." - rc=100 - exit $rc - fi - fi -fi - -if [ ! -z ${BUILD_VERSION} ]; then - MAKE_ARGS+="REVSTATE=${BUILD_VERSION} " -fi - -if [ ${UNIT_TEST} -eq 1 ]; then - MAKE_ARGS+="UNIT_TEST=TRUE " -else - MAKE_ARGS+="UNIT_TEST=FALSE " -fi - -if [ ${INTERACTIVE} -eq 1 ]; then - MAKE_ARGS+="INTERACTIVE=TRUE " -else - MAKE_ARGS+="INTERACTIVE=FALSE " -fi - -MAKE_ARGS+=all - -if [ ${DEBUG} -eq 0 ]; then - set +e - cd ${BUILD_BASE} && make ${MAKE_ARGS} - rc=$? - set -e - if [ $rc -gt 0 ]; then - echo "Build: make all failed, exiting ..." - rc=200 - exit $rc - fi -else -debug_make -fi -set +e -make -C ${BUILD_BASE} prepare-cache -rc=$? -set -e - -if [ $rc -gt 0 ]; then - echo "Build: make prepare-cache failed - exiting ..." - rc=100 - exit $rc -fi -echo "Copying built OPNFV .iso file to target directory ${BUILD_DIR} ..." -rm -rf ${BUILD_DIR} -mkdir -p ${BUILD_DIR} -cp ${BUILD_BASE}/.versions ${BUILD_DIR} -cp ${RESULT_DIR}/*.iso* ${BUILD_DIR} - -#if [ $POPULATE_CACHE -eq 1 ]; then -# if [ ! -z ${BUILD_CACHE_URI} ]; then -# echo "Building cache ..." -# tar --dereference -C ${BUILD_BASE} -caf ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${CACHE_DIR} -# echo "Uploading cache ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}" -# ${REMOTE_ACCESS_METHD} -T ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz ${BUILD_CACHE_URI}/${REMOTE_CACHE_ARCH_NAME}.tgz -# rm ${BUILD_BASE}/${LOCAL_CACHE_ARCH_NAME}.tgz -# fi -#fi -echo "Success!!!" -exit 0 -# -# END of main -############################################################################ diff --git a/compass/ci/deploy.sh b/compass/ci/deploy.sh deleted file mode 100755 index 197bf63..0000000 --- a/compass/ci/deploy.sh +++ /dev/null @@ -1,9 +0,0 @@ -#set -x -COMPASS_DIR=`cd ${BASH_SOURCE[0]%/*}/../;pwd` -export COMPASS_DIR - -apt-get install screen -screen -ls |grep deploy|awk -F. '{print $1}'|xargs kill -9 -screen -wipe -#screen -dmSL deploy bash $COMPASS_DIR/ci/launch.sh $* -$COMPASS_DIR/ci/launch.sh $* diff --git a/compass/ci/launch.sh b/compass/ci/launch.sh deleted file mode 100755 index 316b06f..0000000 --- a/compass/ci/launch.sh +++ /dev/null @@ -1,65 +0,0 @@ -#set -x -WORK_DIR=$COMPASS_DIR/ci/work - -if [[ $# -ge 1 ]];then - CONF_NAME=$1 -else - CONF_NAME=cluster -fi - -source ${COMPASS_DIR}/ci/log.sh -source ${COMPASS_DIR}/deploy/conf/${CONF_NAME}.conf -source ${COMPASS_DIR}/deploy/prepare.sh -source ${COMPASS_DIR}/deploy/network.sh - -if [[ ! -z $VIRT_NUMBER ]];then - source ${COMPASS_DIR}/deploy/host_vm.sh -else - source ${COMPASS_DIR}/deploy/host_baremetal.sh -fi - -source ${COMPASS_DIR}/deploy/compass_vm.sh -source ${COMPASS_DIR}/deploy/deploy_host.sh - -######################### main process - -if ! prepare_env;then - echo "prepare_env failed" - exit 1 -fi - -log_info "########## get host mac begin #############" -machines=`get_host_macs` -if [[ -z $machines ]];then - log_error "get_host_macs failed" - exit 1 -fi - -log_info "deploy host macs: $machines" -export machines - -log_info "########## set up network begin #############" -if ! create_nets;then - log_error "create_nets failed" - exit 1 -fi - -if ! launch_compass;then - log_error "launch_compass failed" - exit 1 -fi -if [[ ! -z $VIRT_NUMBER ]];then - if ! launch_host_vms;then - log_error "launch_host_vms failed" - exit 1 - fi -fi -if ! deploy_host;then - #tear_down_machines - #tear_down_compass - exit 1 -else - #tear_down_machines - #tear_down_compass - exit 0 -fi diff --git a/compass/ci/log.sh b/compass/ci/log.sh deleted file mode 100755 index f54fdca..0000000 --- a/compass/ci/log.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -reset=`tput sgr0` -red=`tput setaf 1` -green=`tput setaf 2` -yellow=`tput setaf 3` - -function log_info() { - echo -e "${green}$*${reset}" -} - -function log_warn() { - echo -e "${yellow}$*${reset}" -} - -function log_error() { - echo -e "${red}$*${reset}" -} - -function log_progress() { - echo -en "${yellow}$*\r${reset}" -} - diff --git a/compass/deploy/ansible/openstack_juno/HA-ansible-multinodes.yml b/compass/deploy/ansible/openstack_juno/HA-ansible-multinodes.yml deleted file mode 100644 index 9c1d7e7..0000000 --- a/compass/deploy/ansible/openstack_juno/HA-ansible-multinodes.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -- hosts: all - remote_user: root - sudo: true - roles: - - repo - - common - -- hosts: ha - remote_user: root - sudo: True - roles: - - ha - -- hosts: controller - remote_user: root - sudo: True - roles: - - database - - mq - - keystone - - nova-controller - - neutron-controller - - cinder-controller - - glance - - neutron-common - - neutron-network - - dashboard - -- hosts: compute - remote_user: root - sudo: True - roles: - - nova-compute - - neutron-compute - - cinder-volume - -- hosts: all - remote_user: root - sudo: True - roles: - - monitor diff --git a/compass/deploy/ansible/openstack_juno/allinone.yml b/compass/deploy/ansible/openstack_juno/allinone.yml deleted file mode 100644 index 15220ca..0000000 --- a/compass/deploy/ansible/openstack_juno/allinone.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -- hosts: all - remote_user: root - sudo: true - roles: - - repo - -- hosts: controller - sudo: True - roles: - - common - - database - - mq - - keystone - - nova-controller - - neutron-controller - - dashboard - - cinder-controller - - glance - -- hosts: network - sudo: True - roles: - - common - - neutron-network - -- hosts: storage - sudo: True - roles: - - common - - cinder-volume - -- hosts: compute - sudo: True - roles: - - common - - nova-compute - - neutron-compute diff --git a/compass/deploy/ansible/openstack_juno/compute.yml b/compass/deploy/ansible/openstack_juno/compute.yml deleted file mode 100644 index b2679c0..0000000 --- a/compass/deploy/ansible/openstack_juno/compute.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- hosts: all - remote_user: vagrant - sudo: True - roles: - - repo - - common - - nova-compute - - neutron-compute diff --git a/compass/deploy/ansible/openstack_juno/controller.yml b/compass/deploy/ansible/openstack_juno/controller.yml deleted file mode 100644 index 7f4a10e..0000000 --- a/compass/deploy/ansible/openstack_juno/controller.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- hosts: controller - remote_user: root - sudo: True - roles: - - repo - - common - - database - - mq - - keystone - - nova-controller - - neutron-controller - - dashboard - - cinder-controller - - glance diff --git a/compass/deploy/ansible/openstack_juno/group_vars/all b/compass/deploy/ansible/openstack_juno/group_vars/all deleted file mode 100644 index 5643fcd..0000000 --- a/compass/deploy/ansible/openstack_juno/group_vars/all +++ /dev/null @@ -1,54 +0,0 @@ -controller_host: 10.1.0.11 -network_host: 10.1.0.12 -compute_host: 10.1.0.13 -storage_host: 10.1.0.14 -odl_controller: 10.1.0.15 - -DEBUG: False -VERBOSE: False -NTP_SERVER_LOCAL: controller -DB_HOST: "{{ controller_host }}" -MQ_BROKER: rabbitmq - -OPENSTACK_REPO: cloudarchive-juno.list -ADMIN_TOKEN: admin -CEILOMETER_TOKEN: c095d479023a0fd58a54 -RABBIT_PASS: guest -KEYSTONE_DBPASS: keystone_db_secret -DEMO_PASS: demo_secret -ADMIN_PASS: admin_secret -GLANCE_DBPASS: glance_db_secret -GLANCE_PASS: glance_secret -NOVA_DBPASS: nova_db_secret -NOVA_PASS: nova_secret -DASH_DBPASS: dash_db_secret -CINDER_DBPASS: cinder_db_secret -CINDER_PASS: cinder_secret -NEUTRON_DBPASS: neutron_db_secret -NEUTRON_PASS: netron_secret -NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan'] -NEUTRON_TENANT_NETWORK_TYPES: ['vxlan'] -#NEUTRON_MECHANISM_DRIVERS: ['opendaylight'] -NEUTRON_MECHANISM_DRIVERS: ['openvswitch'] -NEUTRON_TUNNEL_TYPES: ['vxlan'] -METADATA_SECRET: metadata_secret -INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS: 10.1.1.21 -INTERFACE_NAME: eth2 - -EXTERNAL_NETWORK_CIDR: 203.0.113.0/24 -EXTERNAL_NETWORK_GATEWAY: 203.0.113.1 -FLOATING_IP_START: 203.0.113.101 -FLOATING_IP_END: 203.0.113.200 - -juno_cloud_archive: deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main -build_in_image: http://cdn.download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img -build_in_image_name: cirros-0.3.3-x86_64-disk.img - -physical_device: /dev/sdb - -internal_interface: ansible_eth1 -internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}" - -odl_username: admin -odl_password: admin -odl_api_port: 8080 diff --git a/compass/deploy/ansible/openstack_juno/multinodes.yml b/compass/deploy/ansible/openstack_juno/multinodes.yml deleted file mode 100644 index ffd29d5..0000000 --- a/compass/deploy/ansible/openstack_juno/multinodes.yml +++ /dev/null @@ -1,75 +0,0 @@ ---- -- hosts: all - remote_user: root - sudo: true - roles: - - repo - -- hosts: database - sudo: True - roles: - - common - - database - -- hosts: messaging - sudo: True - roles: - - common - - mq - -- hosts: identity - sudo: True - roles: - - common - - keystone - -- hosts: compute-controller - sudo: True - roles: - - common - - nova-controller - -- hosts: network-server - sudo: True - roles: - - common - - neutron-controller - -- hosts: storage-controller - sudo: True - roles: - - common - - cinder-controller - -- hosts: image - sudo: True - roles: - - common - - glance - -- hosts: dashboard - sudo: True - roles: - - common - - dashboard - -- hosts: network-worker - sudo: True - roles: - - common - - neutron-network - -- hosts: storage-volume - sudo: True - roles: - - common - - cinder-volume - -- hosts: compute-worker - sudo: True - roles: - - common - - nova-compute - - neutron-compute - - diff --git a/compass/deploy/ansible/openstack_juno/network.yml b/compass/deploy/ansible/openstack_juno/network.yml deleted file mode 100644 index 558f317..0000000 --- a/compass/deploy/ansible/openstack_juno/network.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- hosts: all - remote_user: vagrant - sudo: True - roles: - - repo - - common - - neutron-network diff --git a/compass/deploy/ansible/openstack_juno/single-controller.yml b/compass/deploy/ansible/openstack_juno/single-controller.yml deleted file mode 100644 index 15220ca..0000000 --- a/compass/deploy/ansible/openstack_juno/single-controller.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -- hosts: all - remote_user: root - sudo: true - roles: - - repo - -- hosts: controller - sudo: True - roles: - - common - - database - - mq - - keystone - - nova-controller - - neutron-controller - - dashboard - - cinder-controller - - glance - -- hosts: network - sudo: True - roles: - - common - - neutron-network - -- hosts: storage - sudo: True - roles: - - common - - cinder-volume - -- hosts: compute - sudo: True - roles: - - common - - nova-compute - - neutron-compute diff --git a/compass/deploy/ansible/openstack_juno/storage.yml b/compass/deploy/ansible/openstack_juno/storage.yml deleted file mode 100644 index 3c0aa41..0000000 --- a/compass/deploy/ansible/openstack_juno/storage.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- hosts: all - remote_user: vagrant - sudo: True - roles: - - repo - - common - - cinder-volume diff --git a/compass/deploy/ansible/roles/cinder-controller/handlers/main.yml b/compass/deploy/ansible/roles/cinder-controller/handlers/main.yml deleted file mode 100644 index ef671dd..0000000 --- a/compass/deploy/ansible/roles/cinder-controller/handlers/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: restart cinder-scheduler - service: name=cinder-scheduler state=restarted enabled=yes -- name: restart cinder-api - service: name=cinder-api state=restarted enabled=yes - diff --git a/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_config.yml b/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_config.yml deleted file mode 100644 index 7796cf7..0000000 --- a/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_config.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- name: sync cinder db - shell: su -s /bin/sh -c "cinder-manage db sync" cinder && cinder - register: result - until: result.rc == 0 - retries: 5 - delay: 3 - notify: - - restart cinder-scheduler - - restart cinder-api - -- meta: flush_handlers - -- name: upload cinder keystone register script - template: src=cinder_init.sh dest=/opt/cinder_init.sh mode=0744 - -- name: run cinder register script - shell: for i in {0..5}; do /opt/cinder_init.sh && touch cinder_init_complete; if [ $? != 0 ]; then sleep 5; else break; fi; done - args: - creates: cinder_init_complete diff --git a/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_install.yml b/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_install.yml deleted file mode 100644 index 03ad432..0000000 --- a/compass/deploy/ansible/roles/cinder-controller/tasks/cinder_install.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- name: install cinder packages - apt: name={{ item }} state=present force=yes - with_items: - - cinder-api - - cinder-scheduler - - python-cinderclient - -- name: generate cinder service list - shell: echo {{ item }} >> /opt/service - with_items: - - cinder-api - - cinder-scheduler - -- name: upload cinder conf - template: src=cinder.conf dest=/etc/cinder/cinder.conf - notify: - - restart cinder-scheduler - - restart cinder-api - diff --git a/compass/deploy/ansible/roles/cinder-controller/tasks/main.yml b/compass/deploy/ansible/roles/cinder-controller/tasks/main.yml deleted file mode 100644 index 1dbe91f..0000000 --- a/compass/deploy/ansible/roles/cinder-controller/tasks/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- include: cinder_install.yml - tags: - - install - - cinder-install - - cinder - -- include: cinder_config.yml - when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == '' - tags: - - config - - cinder-config - - cinder diff --git a/compass/deploy/ansible/roles/cinder-controller/templates/api-paste.ini b/compass/deploy/ansible/roles/cinder-controller/templates/api-paste.ini deleted file mode 100644 index b568a17..0000000 --- a/compass/deploy/ansible/roles/cinder-controller/templates/api-paste.ini +++ /dev/null @@ -1,71 +0,0 @@ -############# -# OpenStack # -############# - -[composite:osapi_volume] -use = call:cinder.api:root_app_factory -/: apiversions -/v1: openstack_volume_api_v1 -/v2: openstack_volume_api_v2 - -[composite:openstack_volume_api_v1] -use = call:cinder.api.middleware.auth:pipeline_factory -noauth = request_id faultwrap sizelimit osprofiler noauth apiv1 -keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1 -keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1 - -[composite:openstack_volume_api_v2] -use = call:cinder.api.middleware.auth:pipeline_factory -noauth = request_id faultwrap sizelimit osprofiler noauth apiv2 -keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2 -keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2 - -[filter:request_id] -paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMiddleware.factory - -[filter:faultwrap] -paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory - -[filter:osprofiler] -paste.filter_factory = osprofiler.web:WsgiMiddleware.factory -hmac_keys = SECRET_KEY -enabled = yes - -[filter:noauth] -paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory - -[filter:sizelimit] -paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory - -[app:apiv1] -paste.app_factory = cinder.api.v1.router:APIRouter.factory - -[app:apiv2] -paste.app_factory = cinder.api.v2.router:APIRouter.factory - -[pipeline:apiversions] -pipeline = faultwrap osvolumeversionapp - -[app:osvolumeversionapp] -paste.app_factory = cinder.api.versions:Versions.factory - -[filter:authtoken] -paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory -# auth_host = 127.0.0.1 -# auth_port = 35357 -# auth_protocol = http -auth_uri = http://{{ HA_VIP }}:5000/v2.0 -identity_uri = http://{{ HA_VIP }}:35357 -admin_tenant_name = service -admin_user = cinder -admin_password = {{ CINDER_PASS }} - -########## -# Shared # -########## - -[filter:keystonecontext] -paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory - -[filter:authtoken] -paste.filter_factory = keystonemiddleware.auth_token:filter_factory diff --git a/compass/deploy/ansible/roles/cinder-controller/templates/cinder.conf b/compass/deploy/ansible/roles/cinder-controller/templates/cinder.conf deleted file mode 100644 index e34fd2f..0000000 --- a/compass/deploy/ansible/roles/cinder-controller/templates/cinder.conf +++ /dev/null @@ -1,63 +0,0 @@ -[DEFAULT] -rootwrap_config = /etc/cinder/rootwrap.conf -api_paste_confg = /etc/cinder/api-paste.ini -iscsi_helper = tgtadm -volume_name_template = volume-%s -volume_group = cinder-volumes -verbose = {{ VERBOSE }} -debug = {{ DEBUG }} -auth_strategy = keystone -state_path = /var/lib/cinder -lock_path = /var/lock/cinder -notification_driver=cinder.openstack.common.notifier.rpc_notifier -volumes_dir = /var/lib/cinder/volumes - -log_file=/var/log/cinder/cinder.log - -control_exchange = cinder -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_port = 5672 -rabbit_userid = {{ RABBIT_USER }} -rabbit_password = {{ RABBIT_PASS }} -my_ip = {{ storage_controller_host }} - -glance_host = {{ HA_VIP }} -glance_port = 9292 -api_rate_limit = False -storage_availability_zone = nova - -quota_volumes = 10 -quota_gigabytes=1000 -quota_driver=cinder.quota.DbQuotaDriver - -osapi_volume_listen = {{ storage_controller_host }} -osapi_volume_listen_port = 8776 - -db_backend = sqlalchemy -volume_name_template = volume-%s -snapshot_name_template = snapshot-%s - -max_gigabytes=10000 -volume_group=cinder-volumes - -volume_clear=zero -volume_clear_size=10 - -iscsi_ip_address={{ storage_controller_host }} -iscsi_port=3260 -iscsi_helper=tgtadm - -volumes_dir=/var/lib/cinder/volumes - -volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver - -[keystone_authtoken] -auth_uri = http://{{ HA_VIP }}:5000/v2.0 -identity_uri = http://{{ HA_VIP }}:35357 -admin_tenant_name = service -admin_user = cinder -admin_password = {{ CINDER_PASS }} - -[database] -connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder diff --git a/compass/deploy/ansible/roles/cinder-controller/templates/cinder_init.sh b/compass/deploy/ansible/roles/cinder-controller/templates/cinder_init.sh deleted file mode 100644 index 0ec61b6..0000000 --- a/compass/deploy/ansible/roles/cinder-controller/templates/cinder_init.sh +++ /dev/null @@ -1,6 +0,0 @@ -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=cinder --pass={{ CINDER_PASS }} --email=cinder@example.com -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=cinder --tenant=service --role=admin - -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=cinder --type=volume --description="OpenStack Block Storage" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ volume / {print $2}') --publicurl=http://{{ HA_VIP }}:8776/v1/%\(tenant_id\)s --internalurl=http://{{ HA_VIP }}:8776/v1/%\(tenant_id\)s --adminurl=http://{{ HA_VIP }}:8776/v1/%\(tenant_id\)s - diff --git a/compass/deploy/ansible/roles/cinder-volume/files/loop.yml b/compass/deploy/ansible/roles/cinder-volume/files/loop.yml deleted file mode 100644 index e872652..0000000 --- a/compass/deploy/ansible/roles/cinder-volume/files/loop.yml +++ /dev/null @@ -1 +0,0 @@ -physical_device: /dev/loop0 diff --git a/compass/deploy/ansible/roles/cinder-volume/handlers/main.yml b/compass/deploy/ansible/roles/cinder-volume/handlers/main.yml deleted file mode 100644 index ad917ce..0000000 --- a/compass/deploy/ansible/roles/cinder-volume/handlers/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -- name: restart cinder-volume - service: name=cinder-volume state=restarted enabled=yes diff --git a/compass/deploy/ansible/roles/cinder-volume/tasks/main.yml b/compass/deploy/ansible/roles/cinder-volume/tasks/main.yml deleted file mode 100644 index 8c0e626..0000000 --- a/compass/deploy/ansible/roles/cinder-volume/tasks/main.yml +++ /dev/null @@ -1,55 +0,0 @@ ---- -- name: install cinder-volume and lvm2 packages - apt: name={{ item }} state=present force=yes - with_items: - - cinder-volume - - lvm2 - -- name: generate cinder volume service list - shell: echo {{ item }} >> /opt/service - with_items: - - cinder-volume - -- name: check if physical device exists - stat: path={{ physical_device }} - register: st - -- name: repace physical_device if st is false - local_action: copy src=loop.yml dest=/tmp/loop.yml - when: st.stat.exists == False - -- name: load loop.yml - include_vars: /tmp/loop.yml - when: st.stat.exists == False - -- name: check if cinder-volumes is mounted - shell: ls /mnt - register: cindervolumes - -- name: get available partition size - shell: df / | awk '$3 ~ /[0-9]+/ { print $4 }' - register: partition_size - -- name: if not mounted, mount it - shell: dd if=/dev/zero of=/mnt/cinder-volumes - bs=1 count=0 seek={{ partition_size.stdout }} - when: cindervolumes.stdout != 'cinder-volumes' - -- name: get first lo device - shell: ls /dev/loop* | egrep 'loop[0-9]+'|sed -n 1p - register: first_lo - when: cindervolumes.stdout != 'cinder-volumes' - -- name: do a losetup on /mnt/cinder-volumes - shell: losetup {{ first_lo.stdout }} /mnt/cinder-volumes - when: cindervolumes.stdout != 'cinder-volumes' - -- name: create physical and group volumes - lvg: vg=cinder-volumes pvs={{ physical_device }} - vg_options=--force - -- name: upload cinder-volume configuration - template: src=cinder.conf dest=/etc/cinder/cinder.conf - backup=yes - notify: - - restart cinder-volume diff --git a/compass/deploy/ansible/roles/cinder-volume/templates/cinder.conf b/compass/deploy/ansible/roles/cinder-volume/templates/cinder.conf deleted file mode 100644 index aa3b8cc..0000000 --- a/compass/deploy/ansible/roles/cinder-volume/templates/cinder.conf +++ /dev/null @@ -1,62 +0,0 @@ -[DEFAULT] -rootwrap_config = /etc/cinder/rootwrap.conf -api_paste_confg = /etc/cinder/api-paste.ini -iscsi_helper = tgtadm -volume_name_template = volume-%s -volume_group = cinder-volumes -verbose = True -auth_strategy = keystone -state_path = /var/lib/cinder -lock_path = /var/lock/cinder -notification_driver=cinder.openstack.common.notifier.rpc_notifier -volumes_dir = /var/lib/cinder/volumes - -log_file=/var/log/cinder/cinder.log - -control_exchange = cinder -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_port = 5672 -rabbit_userid = {{ RABBIT_USER }} -rabbit_password = {{ RABBIT_PASS }} -my_ip = {{ storage_controller_host }} - -glance_host = {{ HA_VIP }} -glance_port = 9292 -api_rate_limit = False -storage_availability_zone = nova - -quota_volumes = 10 -quota_gigabytes=1000 -quota_driver=cinder.quota.DbQuotaDriver - -osapi_volume_listen = {{ storage_controller_host }} -osapi_volume_listen_port = 8776 - -db_backend = sqlalchemy -volume_name_template = volume-%s -snapshot_name_template = snapshot-%s - -max_gigabytes=10000 -volume_group=cinder-volumes - -volume_clear=zero -volume_clear_size=10 - -iscsi_ip_address={{ storage_controller_host }} -iscsi_port=3260 -iscsi_helper=tgtadm - -volumes_dir=/var/lib/cinder/volumes - -volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver - -[keystone_authtoken] -auth_uri = http://{{ HA_VIP }}:5000/v2.0 -identity_uri = http://{{ HA_VIP }}:35357 -admin_tenant_name = service -admin_user = cinder -admin_password = {{ CINDER_PASS }} - -[database] -connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder diff --git a/compass/deploy/ansible/roles/common/files/sources.list.d/cloudarchive-juno.list b/compass/deploy/ansible/roles/common/files/sources.list.d/cloudarchive-juno.list deleted file mode 100644 index 920f3d2..0000000 --- a/compass/deploy/ansible/roles/common/files/sources.list.d/cloudarchive-juno.list +++ /dev/null @@ -1 +0,0 @@ -deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main diff --git a/compass/deploy/ansible/roles/common/tasks/main.yml b/compass/deploy/ansible/roles/common/tasks/main.yml deleted file mode 100644 index ce595f5..0000000 --- a/compass/deploy/ansible/roles/common/tasks/main.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -- name: install ubuntu-cloud-keyring(ubuntu) - apt: name={{ item }} state=latest - with_items: - - ubuntu-cloud-keyring - -- name: update hosts files to all hosts - template: src=hosts - dest=/etc/hosts - backup=yes - -- name: install common packages - apt: name={{ item }} state=latest - with_items: - - python-pip - - python-dev - - python-mysqldb - - ntp - -- name: restart ntp - command: su -s /bin/sh -c "service ntp stop; ntpd -gq; hwclock --systohc" - ignore_errors: True - -- name: update ntp conf - template: src=ntp.conf dest=/etc/ntp.conf backup=yes - -- name: restart ntp - service: name=ntp state=restarted enabled=yes diff --git a/compass/deploy/ansible/roles/common/templates/hosts b/compass/deploy/ansible/roles/common/templates/hosts deleted file mode 100644 index 9d27c0a..0000000 --- a/compass/deploy/ansible/roles/common/templates/hosts +++ /dev/null @@ -1,22 +0,0 @@ -# compute-controller -10.145.89.136 host-136 -# database -10.145.89.136 host-136 -# messaging -10.145.89.136 host-136 -# storage-controller -10.145.89.138 host-138 -# image -10.145.89.138 host-138 -# identity -10.145.89.136 host-136 -# network-server -10.145.89.138 host-138 -# dashboard -10.145.89.136 host-136 -# storage-volume -10.145.89.139 host-139 -# network-worker -10.145.89.139 host-139 -# compute-worker -10.145.89.137 host-137 diff --git a/compass/deploy/ansible/roles/common/templates/ntp.conf b/compass/deploy/ansible/roles/common/templates/ntp.conf deleted file mode 100644 index c613809..0000000 --- a/compass/deploy/ansible/roles/common/templates/ntp.conf +++ /dev/null @@ -1,56 +0,0 @@ -# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help - -driftfile /var/lib/ntp/ntp.drift - - -# Enable this if you want statistics to be logged. -#statsdir /var/log/ntpstats/ - -statistics loopstats peerstats clockstats -filegen loopstats file loopstats type day enable -filegen peerstats file peerstats type day enable -filegen clockstats file clockstats type day enable - -# Specify one or more NTP servers. - -# Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board -# on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for -# more information. -server {{ NTP_SERVER_LOCAL }} -server 0.ubuntu.pool.ntp.org -server 1.ubuntu.pool.ntp.org -server 2.ubuntu.pool.ntp.org -server 3.ubuntu.pool.ntp.org - -# Use Ubuntu's ntp server as a fallback. -server ntp.ubuntu.com - -# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for -# details. The web page -# might also be helpful. -# -# Note that "restrict" applies to both servers and clients, so a configuration -# that might be intended to block requests from certain clients could also end -# up blocking replies from your own upstream servers. - -# By default, exchange time with everybody, but don't allow configuration. -restrict -4 default kod notrap nomodify nopeer noquery -restrict -6 default kod notrap nomodify nopeer noquery - -# Local users may interrogate the ntp server more closely. -restrict 127.0.0.1 -restrict ::1 - -# Clients from this (example!) subnet have unlimited access, but only if -# cryptographically authenticated. -#restrict 192.168.123.0 mask 255.255.255.0 notrust - - -# If you want to provide time to your local subnet, change the next line. -# (Again, the address is an example only.) -#broadcast 192.168.123.255 - -# If you want to listen to time broadcasts on your local subnet, de-comment the -# next lines. Please do this only if you trust everybody on the network! -#disable auth -#broadcastclient diff --git a/compass/deploy/ansible/roles/dashboard/tasks/main.yml b/compass/deploy/ansible/roles/dashboard/tasks/main.yml deleted file mode 100644 index 465b996..0000000 --- a/compass/deploy/ansible/roles/dashboard/tasks/main.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -- name: install dashboard packages - apt: name={{ item }} state=present force=yes - with_items: - - apache2 - - memcached - - libapache2-mod-wsgi - - openstack-dashboard - -- name: remove ubuntu theme - apt: name=openstack-dashboard-ubuntu-theme - state=absent - -## horizon configuration is already enabled in apache2/conf-enabled -## by openstack-dashboard package deploy script. -#- name: update dashboard conf -# template: src=openstack-dashboard.conf -# dest=/etc/apache2/sites-available/openstack-dashboard.conf -# backup=yes - -- name: update horizon settings - template: src=local_settings.py - dest=/etc/openstack-dashboard/local_settings.py - backup=yes - -- name: restart apache2 - service: name=apache2 state=restarted enabled=yes - -- name: restart memcached - service: name=memcached state=restarted enabled=yes diff --git a/compass/deploy/ansible/roles/dashboard/templates/local_settings.py b/compass/deploy/ansible/roles/dashboard/templates/local_settings.py deleted file mode 100644 index 87e06e3..0000000 --- a/compass/deploy/ansible/roles/dashboard/templates/local_settings.py +++ /dev/null @@ -1,511 +0,0 @@ -import os - -from django.utils.translation import ugettext_lazy as _ - -from openstack_dashboard import exceptions - -DEBUG = True -TEMPLATE_DEBUG = DEBUG - -# Required for Django 1.5. -# If horizon is running in production (DEBUG is False), set this -# with the list of host/domain names that the application can serve. -# For more information see: -# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts -#ALLOWED_HOSTS = ['horizon.example.com', ] - -# Set SSL proxy settings: -# For Django 1.4+ pass this header from the proxy after terminating the SSL, -# and don't forget to strip it from the client's request. -# For more information see: -# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header -# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https') - -# If Horizon is being served through SSL, then uncomment the following two -# settings to better secure the cookies from security exploits -#CSRF_COOKIE_SECURE = True -#SESSION_COOKIE_SECURE = True - -# Overrides for OpenStack API versions. Use this setting to force the -# OpenStack dashboard to use a specific API version for a given service API. -# NOTE: The version should be formatted as it appears in the URL for the -# service API. For example, The identity service APIs have inconsistent -# use of the decimal point, so valid options would be "2.0" or "3". -# OPENSTACK_API_VERSIONS = { -# "identity": 3, -# "volume": 2 -# } - -# Set this to True if running on multi-domain model. When this is enabled, it -# will require user to enter the Domain name in addition to username for login. -# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False - -# Overrides the default domain used when running on single-domain model -# with Keystone V3. All entities will be created in the default domain. -# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default' - -# Set Console type: -# valid options would be "AUTO", "VNC", "SPICE" or "RDP" -# CONSOLE_TYPE = "AUTO" - -# Default OpenStack Dashboard configuration. -HORIZON_CONFIG = { - 'dashboards': ('project', 'admin', 'settings',), - 'default_dashboard': 'project', - 'user_home': 'openstack_dashboard.views.get_user_home', - 'ajax_queue_limit': 10, - 'auto_fade_alerts': { - 'delay': 3000, - 'fade_duration': 1500, - 'types': ['alert-success', 'alert-info'] - }, - 'help_url': "http://docs.openstack.org", - 'exceptions': {'recoverable': exceptions.RECOVERABLE, - 'not_found': exceptions.NOT_FOUND, - 'unauthorized': exceptions.UNAUTHORIZED}, -} - -# Specify a regular expression to validate user passwords. -# HORIZON_CONFIG["password_validator"] = { -# "regex": '.*', -# "help_text": _("Your password does not meet the requirements.") -# } - -# Disable simplified floating IP address management for deployments with -# multiple floating IP pools or complex network requirements. -# HORIZON_CONFIG["simple_ip_management"] = False - -# Turn off browser autocompletion for the login form if so desired. -# HORIZON_CONFIG["password_autocomplete"] = "off" - -LOCAL_PATH = os.path.dirname(os.path.abspath(__file__)) - -# Set custom secret key: -# You can either set it to a specific value or you can let horizion generate a -# default secret key that is unique on this machine, e.i. regardless of the -# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there -# may be situations where you would want to set this explicitly, e.g. when -# multiple dashboard instances are distributed on different machines (usually -# behind a load-balancer). Either you have to make sure that a session gets all -# requests routed to the same dashboard instance or you set the same SECRET_KEY -# for all of them. -from horizon.utils import secret_key -SECRET_KEY = 'AJDSKLAJDKASJDKASJDKSAJDKSJAKDSA' -# We recommend you use memcached for development; otherwise after every reload -# of the django development server, you will have to login again. To use -# memcached set CACHES to something like -CACHES = { - 'default': { - 'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache', - 'LOCATION' : '127.0.0.1:11211', - } -} - -#CACHES = { -# 'default': { -# 'BACKEND' : 'django.core.cache.backends.locmem.LocMemCache' -# } -#} - -# Enable the Ubuntu theme if it is present. -try: - from ubuntu_theme import * -except ImportError: - pass - -# Default Ubuntu apache configuration uses /horizon as the application root. -# Configure auth redirects here accordingly. -LOGIN_URL='/horizon/auth/login/' -LOGOUT_URL='/horizon/auth/logout/' -LOGIN_REDIRECT_URL='/horizon' - -# The Ubuntu package includes pre-compressed JS and compiled CSS to allow -# offline compression by default. To enable online compression, install -# the node-less package and enable the following option. -COMPRESS_OFFLINE = True - -# By default, validation of the HTTP Host header is disabled. Production -# installations should have this set accordingly. For more information -# see https://docs.djangoproject.com/en/dev/ref/settings/. -ALLOWED_HOSTS = ['{{ dashboard_host }}'] - -# Send email to the console by default -EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' -# Or send them to /dev/null -#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' - -# Configure these for your outgoing email host -# EMAIL_HOST = 'smtp.my-company.com' -# EMAIL_PORT = 25 -# EMAIL_HOST_USER = 'djangomail' -# EMAIL_HOST_PASSWORD = 'top-secret!' - -# For multiple regions uncomment this configuration, and add (endpoint, title). -# AVAILABLE_REGIONS = [ -# ('http://cluster1.example.com:5000/v2.0', 'cluster1'), -# ('http://cluster2.example.com:5000/v2.0', 'cluster2'), -# ] - -OPENSTACK_HOST = "{{ HA_VIP }}" -OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST -OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_" - -# Disable SSL certificate checks (useful for self-signed certificates): -# OPENSTACK_SSL_NO_VERIFY = True - -# The CA certificate to use to verify SSL connections -# OPENSTACK_SSL_CACERT = '/path/to/cacert.pem' - -# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the -# capabilities of the auth backend for Keystone. -# If Keystone has been configured to use LDAP as the auth backend then set -# can_edit_user to False and name to 'ldap'. -# -# TODO(tres): Remove these once Keystone has an API to identify auth backend. -OPENSTACK_KEYSTONE_BACKEND = { - 'name': 'native', - 'can_edit_user': True, - 'can_edit_group': True, - 'can_edit_project': True, - 'can_edit_domain': True, - 'can_edit_role': True -} - -#Setting this to True, will add a new "Retrieve Password" action on instance, -#allowing Admin session password retrieval/decryption. -#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False - -# The Xen Hypervisor has the ability to set the mount point for volumes -# attached to instances (other Hypervisors currently do not). Setting -# can_set_mount_point to True will add the option to set the mount point -# from the UI. -OPENSTACK_HYPERVISOR_FEATURES = { - 'can_set_mount_point': False, - 'can_set_password': False, -} - -# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional -# services provided by neutron. Options currently available are load -# balancer service, security groups, quotas, VPN service. -OPENSTACK_NEUTRON_NETWORK = { - 'enable_lb': False, - 'enable_firewall': False, - 'enable_quotas': True, - 'enable_vpn': False, - # The profile_support option is used to detect if an external router can be - # configured via the dashboard. When using specific plugins the - # profile_support can be turned on if needed. - 'profile_support': None, - #'profile_support': 'cisco', -} - -# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features -# in the OpenStack Dashboard related to the Image service, such as the list -# of supported image formats. -# OPENSTACK_IMAGE_BACKEND = { -# 'image_formats': [ -# ('', ''), -# ('aki', _('AKI - Amazon Kernel Image')), -# ('ami', _('AMI - Amazon Machine Image')), -# ('ari', _('ARI - Amazon Ramdisk Image')), -# ('iso', _('ISO - Optical Disk Image')), -# ('qcow2', _('QCOW2 - QEMU Emulator')), -# ('raw', _('Raw')), -# ('vdi', _('VDI')), -# ('vhd', _('VHD')), -# ('vmdk', _('VMDK')) -# ] -# } - -# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for -# image custom property attributes that appear on image detail pages. -IMAGE_CUSTOM_PROPERTY_TITLES = { - "architecture": _("Architecture"), - "kernel_id": _("Kernel ID"), - "ramdisk_id": _("Ramdisk ID"), - "image_state": _("Euca2ools state"), - "project_id": _("Project ID"), - "image_type": _("Image Type") -} - -# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints -# in the Keystone service catalog. Use this setting when Horizon is running -# external to the OpenStack environment. The default is 'publicURL'. -#OPENSTACK_ENDPOINT_TYPE = "publicURL" - -# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the -# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints -# in the Keystone service catalog. Use this setting when Horizon is running -# external to the OpenStack environment. The default is None. This -# value should differ from OPENSTACK_ENDPOINT_TYPE if used. -#SECONDARY_ENDPOINT_TYPE = "publicURL" - -# The number of objects (Swift containers/objects or images) to display -# on a single page before providing a paging element (a "more" link) -# to paginate results. -API_RESULT_LIMIT = 1000 -API_RESULT_PAGE_SIZE = 20 - -# The timezone of the server. This should correspond with the timezone -# of your entire OpenStack installation, and hopefully be in UTC. -TIME_ZONE = "UTC" - -# When launching an instance, the menu of available flavors is -# sorted by RAM usage, ascending. If you would like a different sort order, -# you can provide another flavor attribute as sorting key. Alternatively, you -# can provide a custom callback method to use for sorting. You can also provide -# a flag for reverse sort. For more info, see -# http://docs.python.org/2/library/functions.html#sorted -# CREATE_INSTANCE_FLAVOR_SORT = { -# 'key': 'name', -# # or -# 'key': my_awesome_callback_method, -# 'reverse': False, -# } - -# The Horizon Policy Enforcement engine uses these values to load per service -# policy rule files. The content of these files should match the files the -# OpenStack services are using to determine role based access control in the -# target installation. - -# Path to directory containing policy.json files -#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf") -# Map of local copy of service policy files -#POLICY_FILES = { -# 'identity': 'keystone_policy.json', -# 'compute': 'nova_policy.json', -# 'volume': 'cinder_policy.json', -# 'image': 'glance_policy.json', -#} - -# Trove user and database extension support. By default support for -# creating users and databases on database instances is turned on. -# To disable these extensions set the permission here to something -# unusable such as ["!"]. -# TROVE_ADD_USER_PERMS = [] -# TROVE_ADD_DATABASE_PERMS = [] - -LOGGING = { - 'version': 1, - # When set to True this will disable all logging except - # for loggers specified in this configuration dictionary. Note that - # if nothing is specified here and disable_existing_loggers is True, - # django.db.backends will still log unless it is disabled explicitly. - 'disable_existing_loggers': False, - 'handlers': { - 'null': { - 'level': 'DEBUG', - 'class': 'django.utils.log.NullHandler', - }, - 'console': { - # Set the level to "DEBUG" for verbose output logging. - 'level': 'INFO', - 'class': 'logging.StreamHandler', - }, - }, - 'loggers': { - # Logging from django.db.backends is VERY verbose, send to null - # by default. - 'django.db.backends': { - 'handlers': ['null'], - 'propagate': False, - }, - 'requests': { - 'handlers': ['null'], - 'propagate': False, - }, - 'horizon': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'openstack_dashboard': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'novaclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'cinderclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'keystoneclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'glanceclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'neutronclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'heatclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'ceilometerclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'troveclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'swiftclient': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'openstack_auth': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'nose.plugins.manager': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'django': { - 'handlers': ['console'], - 'level': 'DEBUG', - 'propagate': False, - }, - 'iso8601': { - 'handlers': ['null'], - 'propagate': False, - }, - } -} - -# 'direction' should not be specified for all_tcp/udp/icmp. -# It is specified in the form. -SECURITY_GROUP_RULES = { - 'all_tcp': { - 'name': 'ALL TCP', - 'ip_protocol': 'tcp', - 'from_port': '1', - 'to_port': '65535', - }, - 'all_udp': { - 'name': 'ALL UDP', - 'ip_protocol': 'udp', - 'from_port': '1', - 'to_port': '65535', - }, - 'all_icmp': { - 'name': 'ALL ICMP', - 'ip_protocol': 'icmp', - 'from_port': '-1', - 'to_port': '-1', - }, - 'ssh': { - 'name': 'SSH', - 'ip_protocol': 'tcp', - 'from_port': '22', - 'to_port': '22', - }, - 'smtp': { - 'name': 'SMTP', - 'ip_protocol': 'tcp', - 'from_port': '25', - 'to_port': '25', - }, - 'dns': { - 'name': 'DNS', - 'ip_protocol': 'tcp', - 'from_port': '53', - 'to_port': '53', - }, - 'http': { - 'name': 'HTTP', - 'ip_protocol': 'tcp', - 'from_port': '80', - 'to_port': '80', - }, - 'pop3': { - 'name': 'POP3', - 'ip_protocol': 'tcp', - 'from_port': '110', - 'to_port': '110', - }, - 'imap': { - 'name': 'IMAP', - 'ip_protocol': 'tcp', - 'from_port': '143', - 'to_port': '143', - }, - 'ldap': { - 'name': 'LDAP', - 'ip_protocol': 'tcp', - 'from_port': '389', - 'to_port': '389', - }, - 'https': { - 'name': 'HTTPS', - 'ip_protocol': 'tcp', - 'from_port': '443', - 'to_port': '443', - }, - 'smtps': { - 'name': 'SMTPS', - 'ip_protocol': 'tcp', - 'from_port': '465', - 'to_port': '465', - }, - 'imaps': { - 'name': 'IMAPS', - 'ip_protocol': 'tcp', - 'from_port': '993', - 'to_port': '993', - }, - 'pop3s': { - 'name': 'POP3S', - 'ip_protocol': 'tcp', - 'from_port': '995', - 'to_port': '995', - }, - 'ms_sql': { - 'name': 'MS SQL', - 'ip_protocol': 'tcp', - 'from_port': '1433', - 'to_port': '1433', - }, - 'mysql': { - 'name': 'MYSQL', - 'ip_protocol': 'tcp', - 'from_port': '3306', - 'to_port': '3306', - }, - 'rdp': { - 'name': 'RDP', - 'ip_protocol': 'tcp', - 'from_port': '3389', - 'to_port': '3389', - }, -} - -FLAVOR_EXTRA_KEYS = { - 'flavor_keys': [ - ('quota:read_bytes_sec', _('Quota: Read bytes')), - ('quota:write_bytes_sec', _('Quota: Write bytes')), - ('quota:cpu_quota', _('Quota: CPU')), - ('quota:cpu_period', _('Quota: CPU period')), - ('quota:inbound_average', _('Quota: Inbound average')), - ('quota:outbound_average', _('Quota: Outbound average')), - ] -} - diff --git a/compass/deploy/ansible/roles/dashboard/templates/openstack-dashboard.conf b/compass/deploy/ansible/roles/dashboard/templates/openstack-dashboard.conf deleted file mode 100644 index a5a791a..0000000 --- a/compass/deploy/ansible/roles/dashboard/templates/openstack-dashboard.conf +++ /dev/null @@ -1,14 +0,0 @@ - - -WSGIScriptAlias / /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi -WSGIDaemonProcess horizon user=www-data group=www-data processes=3 threads=10 -Alias /static /usr/share/openstack-dashboard/openstack_dashboard/static/ - - -Order allow,deny -Allow from all - - - - - diff --git a/compass/deploy/ansible/roles/database/files/my.cnf b/compass/deploy/ansible/roles/database/files/my.cnf deleted file mode 100644 index d61f947..0000000 --- a/compass/deploy/ansible/roles/database/files/my.cnf +++ /dev/null @@ -1,131 +0,0 @@ -# -# The MySQL database server configuration file. -# -# You can copy this to one of: -# - "/etc/mysql/my.cnf" to set global options, -# - "~/.my.cnf" to set user-specific options. -# -# One can use all long options that the program supports. -# Run program with --help to get a list of available options and with -# --print-defaults to see which it would actually understand and use. -# -# For explanations see -# http://dev.mysql.com/doc/mysql/en/server-system-variables.html - -# This will be passed to all mysql clients -# It has been reported that passwords should be enclosed with ticks/quotes -# escpecially if they contain "#" chars... -# Remember to edit /etc/mysql/debian.cnf when changing the socket location. -[client] -port = 3306 -socket = /var/run/mysqld/mysqld.sock - -# Here is entries for some specific programs -# The following values assume you have at least 32M ram - -# This was formally known as [safe_mysqld]. Both versions are currently parsed. -[mysqld_safe] -socket = /var/run/mysqld/mysqld.sock -nice = 0 - -[mysqld] -# -# * Basic Settings -# -user = mysql -pid-file = /var/run/mysqld/mysqld.pid -socket = /var/run/mysqld/mysqld.sock -port = 3306 -basedir = /usr -datadir = /var/lib/mysql -tmpdir = /tmp -lc-messages-dir = /usr/share/mysql -skip-external-locking -# -# Instead of skip-networking the default is now to listen only on -# localhost which is more compatible and is not less secure. -bind-address = 0.0.0.0 -# -# * Fine Tuning -# -key_buffer = 16M -max_allowed_packet = 16M -thread_stack = 192K -thread_cache_size = 8 -# This replaces the startup script and checks MyISAM tables if needed -# the first time they are touched -myisam-recover = BACKUP -#max_connections = 100 -#table_cache = 64 -#thread_concurrency = 10 -# -# * Query Cache Configuration -# -query_cache_limit = 1M -query_cache_size = 16M -# -# * Logging and Replication -# -# Both location gets rotated by the cronjob. -# Be aware that this log type is a performance killer. -# As of 5.1 you can enable the log at runtime! -#general_log_file = /var/log/mysql/mysql.log -#general_log = 1 -# -# Error log - should be very few entries. -# -log_error = /var/log/mysql/error.log -# -# Here you can see queries with especially long duration -#log_slow_queries = /var/log/mysql/mysql-slow.log -#long_query_time = 2 -#log-queries-not-using-indexes -# -# The following can be used as easy to replay backup logs or for replication. -# note: if you are setting up a replication slave, see README.Debian about -# other settings you may need to change. -#server-id = 1 -#log_bin = /var/log/mysql/mysql-bin.log -expire_logs_days = 10 -max_binlog_size = 100M -#binlog_do_db = include_database_name -#binlog_ignore_db = include_database_name -# -# * InnoDB -# -# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/. -# Read the manual for more InnoDB related options. There are many! -# -# * Security Features -# -# Read the manual, too, if you want chroot! -# chroot = /var/lib/mysql/ -# -# For generating SSL certificates I recommend the OpenSSL GUI "tinyca". -# -# ssl-ca=/etc/mysql/cacert.pem -# ssl-cert=/etc/mysql/server-cert.pem -# ssl-key=/etc/mysql/server-key.pem -default-storage-engine = innodb -innodb_file_per_table -collation-server = utf8_general_ci -init-connect = 'SET NAMES utf8' -character-set-server = utf8 - -[mysqldump] -quick -quote-names -max_allowed_packet = 16M - -[mysql] -#no-auto-rehash # faster start of mysql but no tab completition - -[isamchk] -key_buffer = 16M - -# -# * IMPORTANT: Additional settings that can override those from this file! -# The files must end with '.cnf', otherwise they'll be ignored. -# -!includedir /etc/mysql/conf.d/ - diff --git a/compass/deploy/ansible/roles/database/tasks/main.yml b/compass/deploy/ansible/roles/database/tasks/main.yml deleted file mode 100644 index e66f0cd..0000000 --- a/compass/deploy/ansible/roles/database/tasks/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -- name: copy data.sh - template: src=data.j2 dest=/opt/data.sh mode=777 - tags: - - mysql_user - -- include: mysql.yml - when: HA_CLUSTER is not defined - -- include: mariadb.yml - when: HA_CLUSTER is defined - diff --git a/compass/deploy/ansible/roles/database/tasks/mariadb.yml b/compass/deploy/ansible/roles/database/tasks/mariadb.yml deleted file mode 100644 index 093dfd1..0000000 --- a/compass/deploy/ansible/roles/database/tasks/mariadb.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -- name: install python-mysqldb - apt: name={{ item }} state=present force=yes - with_items: - - libaio1 - - libssl0.9.8 - #- mariadb-client-5.5 - - mysql-client-5.5 - - python-mysqldb - - mysql-server-wsrep - - galera - -- name: create mysql log directy - file: path=/var/log/mysql state=directory owner=mysql group=mysql mode=0755 - -- name: update mariadb my.cnf - template: src=my.cnf dest=/etc/mysql/my.cnf backup=yes - -- name: update galera wsrep.cnf - template: src=wsrep.cnf dest=/etc/mysql/conf.d/wsrep.cnf backup=yes - -- name: update wsrep_sst_rsync uid - lineinfile: dest=/usr/bin/wsrep_sst_rsync state=absent regexp="\s*uid = \$MYUID$" backup=yes - -- name: update wsrep_sst_rsync gid - lineinfile: dest=/usr/bin/wsrep_sst_rsync state=absent regexp="\s*gid = \$MYGID$" backup=yes - -- name: manually restart mysql server - service: name=mysql state=restarted enabled=yes - register: result - until: result|success - retries: 5 - delay: 5 - tags: - - mysql_restart - -- name: generate mysql service list - shell: echo {{ item }} >> /opt/service - with_items: - - mysql - -- name: create database/user - shell: /opt/data.sh - when: HA_CLUSTER[inventory_hostname] == '' - tags: - - mysql_user diff --git a/compass/deploy/ansible/roles/database/tasks/mysql.yml b/compass/deploy/ansible/roles/database/tasks/mysql.yml deleted file mode 100644 index 327b656..0000000 --- a/compass/deploy/ansible/roles/database/tasks/mysql.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- name: install mysql client and server packages - apt: name={{ item }} state=present - with_items: - - python-mysqldb - - mysql-server - -- name: create mysql log directy - file: path=/var/log/mysql state=directory owner=mysql group=mysql mode=0755 - -- name: update mysql my.cnf - copy: src=my.cnf - dest=/etc/mysql/my.cnf - backup=yes - -- name: manually restart mysql server - shell: service mysql restart - -- name: create database/user - shell: /opt/data.sh - tags: - - mysql_user diff --git a/compass/deploy/ansible/roles/database/templates/data.j2 b/compass/deploy/ansible/roles/database/templates/data.j2 deleted file mode 100644 index c894b32..0000000 --- a/compass/deploy/ansible/roles/database/templates/data.j2 +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/sh -mysql -uroot -Dmysql <[:port] of the node. -# The values supplied will be used as defaults for state transfer receiving, -# listening ports and so on. Default: address of the first network interface. -wsrep_node_address={{ hostvars[inventory_hostname]['ansible_' + INTERNAL_INTERFACE].ipv4.address }} - -# Address for incoming client connections. Autodetect by default. -#wsrep_node_incoming_address= - -# How many threads will process writesets from other nodes -wsrep_slave_threads=1 - -# DBUG options for wsrep provider -#wsrep_dbug_option - -# Generate fake primary keys for non-PK tables (required for multi-master -# and parallel applying operation) -wsrep_certify_nonPK=1 - -# Maximum number of rows in write set -wsrep_max_ws_rows=131072 - -# Maximum size of write set -wsrep_max_ws_size=1073741824 - -# to enable debug level logging, set this to 1 -wsrep_debug=1 - -# convert locking sessions into transactions -wsrep_convert_LOCK_to_trx=0 - -# how many times to retry deadlocked autocommits -wsrep_retry_autocommit=1 - -# change auto_increment_increment and auto_increment_offset automatically -wsrep_auto_increment_control=1 - -# retry autoinc insert, which failed for duplicate key error -wsrep_drupal_282555_workaround=0 - -# enable "strictly synchronous" semantics for read operations -wsrep_causal_reads=0 - -# Command to call when node status or cluster membership changes. -# Will be passed all or some of the following options: -# --status - new status of this node -# --uuid - UUID of the cluster -# --primary - whether the component is primary or not ("yes"/"no") -# --members - comma-separated list of members -# --index - index of this node in the list -wsrep_notify_cmd= - -## -## WSREP State Transfer options -## - -# State Snapshot Transfer method -wsrep_sst_method=rsync - -# Address on THIS node to receive SST at. DON'T SET IT TO DONOR ADDRESS!!! -# (SST method dependent. Defaults to the first IP of the first interface) -#wsrep_sst_receive_address= - -# SST authentication string. This will be used to send SST to joining nodes. -# Depends on SST method. For mysqldump method it is root: -wsrep_sst_auth={{ WSREP_SST_USER }}:{{ WSREP_SST_PASS }} - -# Desired SST donor name. -#wsrep_sst_donor= - -# Protocol version to use -# wsrep_protocol_version= diff --git a/compass/deploy/ansible/roles/glance/handlers/main.yml b/compass/deploy/ansible/roles/glance/handlers/main.yml deleted file mode 100644 index d8eaa44..0000000 --- a/compass/deploy/ansible/roles/glance/handlers/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: restart glance-api - service: name=glance-api state=restarted enabled=yes - -- name: restart glance-registry - service: name=glance-registry state=restarted enabled=yes diff --git a/compass/deploy/ansible/roles/glance/tasks/glance_config.yml b/compass/deploy/ansible/roles/glance/tasks/glance_config.yml deleted file mode 100644 index 28392a3..0000000 --- a/compass/deploy/ansible/roles/glance/tasks/glance_config.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -- name: init glance db version - shell: glance-manage db_version_control 0 - -- name: sync glance db - shell: sleep 15; su -s /bin/sh -c "glance-manage db_sync" glance - register: result - until: result.rc == 0 - retries: 5 - delay: 3 - notify: - - restart glance-registry - - restart glance-api - -- meta: flush_handlers - -- name: place image upload script - template: src=image_upload.sh dest=/opt/image_upload.sh mode=0744 - -- name: download cirros image file - get_url: url={{ build_in_image }} dest=/opt/{{ build_in_image_name }} - -- name: wait for 9292 port to become available - wait_for: host={{ image_host }} port=9292 delay=5 - -- name: run image upload - shell: for i in {0..5}; do /opt/image_upload.sh && touch image_upload_completed; if [ $? != 0 ] ;then sleep 5; else break;fi;done - args: - creates: image_upload_completed diff --git a/compass/deploy/ansible/roles/glance/tasks/glance_install.yml b/compass/deploy/ansible/roles/glance/tasks/glance_install.yml deleted file mode 100644 index 505b3b0..0000000 --- a/compass/deploy/ansible/roles/glance/tasks/glance_install.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: install glance packages - apt: name={{ item }} state=latest force=yes - with_items: - - glance - - python-glanceclient - -- name: generate glance service list - shell: echo {{ item }} >> /opt/service - with_items: - - glance-registry - - glance-api - -- name: update glance conf - template: src={{ item }} dest=/etc/glance/{{ item }} - backup=yes - with_items: - - glance-api.conf - - glance-registry.conf - notify: - - restart glance-registry - - restart glance-api - -- name: remove default sqlite db - shell: rm /var/lib/glance/glance.sqlite || touch glance.sqllite.db.removed - diff --git a/compass/deploy/ansible/roles/glance/tasks/main.yml b/compass/deploy/ansible/roles/glance/tasks/main.yml deleted file mode 100644 index 296f0dc..0000000 --- a/compass/deploy/ansible/roles/glance/tasks/main.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- include: glance_install.yml - tags: - - install - - glance_install - - glance - -- include: nfs.yml - tags: - - nfs - -- include: glance_config.yml - when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == '' - tags: - - config - - glance_config - - glance - diff --git a/compass/deploy/ansible/roles/glance/tasks/nfs.yml b/compass/deploy/ansible/roles/glance/tasks/nfs.yml deleted file mode 100644 index c03ab4d..0000000 --- a/compass/deploy/ansible/roles/glance/tasks/nfs.yml +++ /dev/null @@ -1,41 +0,0 @@ ---- -- name: get nfs server - local_action: shell /sbin/ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6| grep "10" -m 1 |awk '{print $2}'|tr -d "addr:" - register: ip_info - run_once: True - -- name: install nfs - local_action: yum name=nfs-utils state=present - run_once: True - -- name: create image directory - local_action: file path=/opt/images state=directory mode=0777 - run_once: True - -- name: update nfs config - local_action: lineinfile dest=/etc/exports state=present - regexp="/opt/images *(rw,insecure,sync,all_squash)" - line="/opt/images *(rw,insecure,sync,all_squash)" - run_once: True - -- name: restart nfs service - local_action: service name=nfs state=restarted enabled=yes - run_once: True - -- name: install nfs comm - apt: name=nfs-common state=present - -- name: get mount info - command: mount - register: mount_info - -- name: mount image directory - shell: | - mount -t nfs -onfsvers=3 {{ item }}:/opt/images /var/lib/glance/images - sed -i '/\/var\/lib\/glance\/images/d' /etc/fstab - echo {{ item }}:/opt/images /var/lib/glance/images/ nfs nfsvers=3 >> /etc/fstab - when: mount_info.stdout.find('images') == -1 - with_items: - ip_info.stdout_lines - retries: 5 - delay: 3 diff --git a/compass/deploy/ansible/roles/glance/templates/glance-api.conf b/compass/deploy/ansible/roles/glance/templates/glance-api.conf deleted file mode 100644 index 763539e..0000000 --- a/compass/deploy/ansible/roles/glance/templates/glance-api.conf +++ /dev/null @@ -1,677 +0,0 @@ -[DEFAULT] -# Show more verbose log output (sets INFO log level output) -#verbose = False - -# Show debugging output in logs (sets DEBUG log level output) -#debug = False - -# Which backend scheme should Glance use by default is not specified -# in a request to add a new image to Glance? Known schemes are determined -# by the known_stores option below. -# Default: 'file' -# "default_store" option has been moved to [glance_store] section in -# Juno release - -# List of which store classes and store class locations are -# currently known to glance at startup. -# Existing but disabled stores: -# glance.store.rbd.Store, -# glance.store.s3.Store, -# glance.store.swift.Store, -# glance.store.sheepdog.Store, -# glance.store.cinder.Store, -# glance.store.gridfs.Store, -# glance.store.vmware_datastore.Store, -#known_stores = glance.store.filesystem.Store, -# glance.store.http.Store - - -# Maximum image size (in bytes) that may be uploaded through the -# Glance API server. Defaults to 1 TB. -# WARNING: this value should only be increased after careful consideration -# and must be set to a value under 8 EB (9223372036854775808). -#image_size_cap = 1099511627776 - -# Address to bind the API server -bind_host = {{ image_host }} - -# Port the bind the API server to -bind_port = 9292 - -# Log to this file. Make sure you do not set the same log file for both the API -# and registry servers! -# -# If `log_file` is omitted and `use_syslog` is false, then log messages are -# sent to stdout as a fallback. -log_file = /var/log/glance/api.log - -# Backlog requests when creating socket -backlog = 4096 - -# TCP_KEEPIDLE value in seconds when creating socket. -# Not supported on OS X. -#tcp_keepidle = 600 - -# API to use for accessing data. Default value points to sqlalchemy -# package, it is also possible to use: glance.db.registry.api -# data_api = glance.db.sqlalchemy.api - -# Number of Glance API worker processes to start. -# On machines with more than one CPU increasing this value -# may improve performance (especially if using SSL with -# compression turned on). It is typically recommended to set -# this value to the number of CPUs present on your machine. -workers = 1 - -# Maximum line size of message headers to be accepted. -# max_header_line may need to be increased when using large tokens -# (typically those generated by the Keystone v3 API with big service -# catalogs) -# max_header_line = 16384 - -# Role used to identify an authenticated user as administrator -#admin_role = admin - -# Allow unauthenticated users to access the API with read-only -# privileges. This only applies when using ContextMiddleware. -#allow_anonymous_access = False - -# Allow access to version 1 of glance api -#enable_v1_api = True - -# Allow access to version 2 of glance api -#enable_v2_api = True - -# Return the URL that references where the data is stored on -# the backend storage system. For example, if using the -# file system store a URL of 'file:///path/to/image' will -# be returned to the user in the 'direct_url' meta-data field. -# The default value is false. -#show_image_direct_url = False - -# Send headers containing user and tenant information when making requests to -# the v1 glance registry. This allows the registry to function as if a user is -# authenticated without the need to authenticate a user itself using the -# auth_token middleware. -# The default value is false. -#send_identity_headers = False - -# Supported values for the 'container_format' image attribute -#container_formats=ami,ari,aki,bare,ovf,ova - -# Supported values for the 'disk_format' image attribute -#disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso - -# Directory to use for lock files. Default to a temp directory -# (string value). This setting needs to be the same for both -# glance-scrubber and glance-api. -#lock_path= - -# Property Protections config file -# This file contains the rules for property protections and the roles/policies -# associated with it. -# If this config value is not specified, by default, property protections -# won't be enforced. -# If a value is specified and the file is not found, then the glance-api -# service will not start. -#property_protection_file = - -# Specify whether 'roles' or 'policies' are used in the -# property_protection_file. -# The default value for property_protection_rule_format is 'roles'. -#property_protection_rule_format = roles - -# Specifies how long (in hours) a task is supposed to live in the tasks DB -# after succeeding or failing before getting soft-deleted. -# The default value for task_time_to_live is 48 hours. -# task_time_to_live = 48 - -# This value sets what strategy will be used to determine the image location -# order. Currently two strategies are packaged with Glance 'location_order' -# and 'store_type'. -#location_strategy = location_order - -# ================= Syslog Options ============================ - -# Send logs to syslog (/dev/log) instead of to file specified -# by `log_file` -#use_syslog = False - -# Facility to use. If unset defaults to LOG_USER. -#syslog_log_facility = LOG_LOCAL0 - -# ================= SSL Options =============================== - -# Certificate file to use when starting API server securely -#cert_file = /path/to/certfile - -# Private key file to use when starting API server securely -#key_file = /path/to/keyfile - -# CA certificate file to use to verify connecting clients -#ca_file = /path/to/cafile - -# ================= Security Options ========================== - -# AES key for encrypting store 'location' metadata, including -# -- if used -- Swift or S3 credentials -# Should be set to a random string of length 16, 24 or 32 bytes -#metadata_encryption_key = <16, 24 or 32 char registry metadata key> - -# ============ Registry Options =============================== - -# Address to find the registry server -registry_host = {{ internal_ip }} - -# Port the registry server is listening on -registry_port = 9191 - -# What protocol to use when connecting to the registry server? -# Set to https for secure HTTP communication -registry_client_protocol = http - -# The path to the key file to use in SSL connections to the -# registry server, if any. Alternately, you may set the -# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file -#registry_client_key_file = /path/to/key/file - -# The path to the cert file to use in SSL connections to the -# registry server, if any. Alternately, you may set the -# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file -#registry_client_cert_file = /path/to/cert/file - -# The path to the certifying authority cert file to use in SSL connections -# to the registry server, if any. Alternately, you may set the -# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file -#registry_client_ca_file = /path/to/ca/file - -# When using SSL in connections to the registry server, do not require -# validation via a certifying authority. This is the registry's equivalent of -# specifying --insecure on the command line using glanceclient for the API -# Default: False -#registry_client_insecure = False - -# The period of time, in seconds, that the API server will wait for a registry -# request to complete. A value of '0' implies no timeout. -# Default: 600 -#registry_client_timeout = 600 - -# Whether to automatically create the database tables. -# Default: False -#db_auto_create = False - -# Enable DEBUG log messages from sqlalchemy which prints every database -# query and response. -# Default: False -#sqlalchemy_debug = True - -# Pass the user's token through for API requests to the registry. -# Default: True -#use_user_token = True - -# If 'use_user_token' is not in effect then admin credentials -# can be specified. Requests to the registry on behalf of -# the API will use these credentials. -# Admin user name -#admin_user = None -# Admin password -#admin_password = None -# Admin tenant name -#admin_tenant_name = None -# Keystone endpoint -#auth_url = None -# Keystone region -#auth_region = None -# Auth strategy -#auth_strategy = keystone - -# ============ Notification System Options ===================== - -# Notifications can be sent when images are create, updated or deleted. -# There are three methods of sending notifications, logging (via the -# log_file directive), rabbit (via a rabbitmq queue), qpid (via a Qpid -# message queue), or noop (no notifications sent, the default) -# NOTE: THIS CONFIGURATION OPTION HAS BEEN DEPRECATED IN FAVOR OF `notification_driver` -# notifier_strategy = default - -# Driver or drivers to handle sending notifications -# notification_driver = noop - -# Default publisher_id for outgoing notifications. -# default_publisher_id = image.localhost - -# Configuration options if sending notifications via rabbitmq (these are -# the defaults) -rabbit_host = localhost -rabbit_port = 5672 -rabbit_use_ssl = false -rabbit_userid = {{ RABBIT_USER }} -rabbit_password = {{ RABBIT_PASS }} -rabbit_virtual_host = / -rabbit_notification_exchange = glance -rabbit_notification_topic = notifications -rabbit_durable_queues = False - -# Configuration options if sending notifications via Qpid (these are -# the defaults) -qpid_notification_exchange = glance -qpid_notification_topic = notifications -qpid_hostname = localhost -qpid_port = 5672 -qpid_username = -qpid_password = -qpid_sasl_mechanisms = -qpid_reconnect_timeout = 0 -qpid_reconnect_limit = 0 -qpid_reconnect_interval_min = 0 -qpid_reconnect_interval_max = 0 -qpid_reconnect_interval = 0 -qpid_heartbeat = 5 -# Set to 'ssl' to enable SSL -qpid_protocol = tcp -qpid_tcp_nodelay = True - -# ============ Filesystem Store Options ======================== - -# Directory that the Filesystem backend store -# writes image data to -# this option has been moved to [glance_store] for Juno release -# filesystem_store_datadir = /var/lib/glance/images/ - -# A list of directories where image data can be stored. -# This option may be specified multiple times for specifying multiple store -# directories. Either one of filesystem_store_datadirs or -# filesystem_store_datadir option is required. A priority number may be given -# after each directory entry, separated by a ":". -# When adding an image, the highest priority directory will be selected, unless -# there is not enough space available in cases where the image size is already -# known. If no priority is given, it is assumed to be zero and the directory -# will be considered for selection last. If multiple directories have the same -# priority, then the one with the most free space available is selected. -# If same store is specified multiple times then BadStoreConfiguration -# exception will be raised. -#filesystem_store_datadirs = /var/lib/glance/images/:1 - -# A path to a JSON file that contains metadata describing the storage -# system. When show_multiple_locations is True the information in this -# file will be returned with any location that is contained in this -# store. -#filesystem_store_metadata_file = None - -# ============ Swift Store Options ============================= - -# Version of the authentication service to use -# Valid versions are '2' for keystone and '1' for swauth and rackspace -swift_store_auth_version = 2 - -# Address where the Swift authentication service lives -# Valid schemes are 'http://' and 'https://' -# If no scheme specified, default to 'https://' -# For swauth, use something like '127.0.0.1:8080/v1.0/' -swift_store_auth_address = 127.0.0.1:5000/v2.0/ - -# User to authenticate against the Swift authentication service -# If you use Swift authentication service, set it to 'account':'user' -# where 'account' is a Swift storage account and 'user' -# is a user in that account -swift_store_user = jdoe:jdoe - -# Auth key for the user authenticating against the -# Swift authentication service -swift_store_key = a86850deb2742ec3cb41518e26aa2d89 - -# Container within the account that the account should use -# for storing images in Swift -swift_store_container = glance - -# Do we create the container if it does not exist? -swift_store_create_container_on_put = False - -# What size, in MB, should Glance start chunking image files -# and do a large object manifest in Swift? By default, this is -# the maximum object size in Swift, which is 5GB -swift_store_large_object_size = 5120 - -# When doing a large object manifest, what size, in MB, should -# Glance write chunks to Swift? This amount of data is written -# to a temporary disk buffer during the process of chunking -# the image file, and the default is 200MB -swift_store_large_object_chunk_size = 200 - -# Whether to use ServiceNET to communicate with the Swift storage servers. -# (If you aren't RACKSPACE, leave this False!) -# -# To use ServiceNET for authentication, prefix hostname of -# `swift_store_auth_address` with 'snet-'. -# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/ -swift_enable_snet = False - -# If set to True enables multi-tenant storage mode which causes Glance images -# to be stored in tenant specific Swift accounts. -#swift_store_multi_tenant = False - -# A list of swift ACL strings that will be applied as both read and -# write ACLs to the containers created by Glance in multi-tenant -# mode. This grants the specified tenants/users read and write access -# to all newly created image objects. The standard swift ACL string -# formats are allowed, including: -# : -# : -# *: -# Multiple ACLs can be combined using a comma separated list, for -# example: swift_store_admin_tenants = service:glance,*:admin -#swift_store_admin_tenants = - -# The region of the swift endpoint to be used for single tenant. This setting -# is only necessary if the tenant has multiple swift endpoints. -#swift_store_region = - -# If set to False, disables SSL layer compression of https swift requests. -# Setting to 'False' may improve performance for images which are already -# in a compressed format, eg qcow2. If set to True, enables SSL layer -# compression (provided it is supported by the target swift proxy). -#swift_store_ssl_compression = True - -# The number of times a Swift download will be retried before the -# request fails -#swift_store_retry_get_count = 0 - -# ============ S3 Store Options ============================= - -# Address where the S3 authentication service lives -# Valid schemes are 'http://' and 'https://' -# If no scheme specified, default to 'http://' -s3_store_host = 127.0.0.1:8080/v1.0/ - -# User to authenticate against the S3 authentication service -s3_store_access_key = <20-char AWS access key> - -# Auth key for the user authenticating against the -# S3 authentication service -s3_store_secret_key = <40-char AWS secret key> - -# Container within the account that the account should use -# for storing images in S3. Note that S3 has a flat namespace, -# so you need a unique bucket name for your glance images. An -# easy way to do this is append your AWS access key to "glance". -# S3 buckets in AWS *must* be lowercased, so remember to lowercase -# your AWS access key if you use it in your bucket name below! -s3_store_bucket = glance - -# Do we create the bucket if it does not exist? -s3_store_create_bucket_on_put = False - -# When sending images to S3, the data will first be written to a -# temporary buffer on disk. By default the platform's temporary directory -# will be used. If required, an alternative directory can be specified here. -#s3_store_object_buffer_dir = /path/to/dir - -# When forming a bucket url, boto will either set the bucket name as the -# subdomain or as the first token of the path. Amazon's S3 service will -# accept it as the subdomain, but Swift's S3 middleware requires it be -# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'. -#s3_store_bucket_url_format = subdomain - -# ============ RBD Store Options ============================= - -# Ceph configuration file path -# If using cephx authentication, this file should -# include a reference to the right keyring -# in a client. section -#rbd_store_ceph_conf = /etc/ceph/ceph.conf - -# RADOS user to authenticate as (only applicable if using cephx) -# If , a default will be chosen based on the client. section -# in rbd_store_ceph_conf -#rbd_store_user = - -# RADOS pool in which images are stored -#rbd_store_pool = images - -# RADOS images will be chunked into objects of this size (in megabytes). -# For best performance, this should be a power of two -#rbd_store_chunk_size = 8 - -# ============ Sheepdog Store Options ============================= - -sheepdog_store_address = localhost - -sheepdog_store_port = 7000 - -# Images will be chunked into objects of this size (in megabytes). -# For best performance, this should be a power of two -sheepdog_store_chunk_size = 64 - -# ============ Cinder Store Options =============================== - -# Info to match when looking for cinder in the service catalog -# Format is : separated values of the form: -# :: (string value) -#cinder_catalog_info = volume:cinder:publicURL - -# Override service catalog lookup with template for cinder endpoint -# e.g. http://localhost:8776/v1/%(project_id)s (string value) -#cinder_endpoint_template = - -# Region name of this node (string value) -#os_region_name = - -# Location of ca certicates file to use for cinder client requests -# (string value) -#cinder_ca_certificates_file = - -# Number of cinderclient retries on failed http calls (integer value) -#cinder_http_retries = 3 - -# Allow to perform insecure SSL requests to cinder (boolean value) -#cinder_api_insecure = False - -# ============ VMware Datastore Store Options ===================== - -# ESX/ESXi or vCenter Server target system. -# The server value can be an IP address or a DNS name -# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com -#vmware_server_host = - -# Server username (string value) -#vmware_server_username = - -# Server password (string value) -#vmware_server_password = - -# Inventory path to a datacenter (string value) -# Value optional when vmware_server_ip is an ESX/ESXi host: if specified -# should be `ha-datacenter`. -#vmware_datacenter_path = - -# Datastore associated with the datacenter (string value) -#vmware_datastore_name = - -# The number of times we retry on failures -# e.g., socket error, etc (integer value) -#vmware_api_retry_count = 10 - -# The interval used for polling remote tasks -# invoked on VMware ESX/VC server in seconds (integer value) -#vmware_task_poll_interval = 5 - -# Absolute path of the folder containing the images in the datastore -# (string value) -#vmware_store_image_dir = /openstack_glance - -# Allow to perform insecure SSL requests to the target system (boolean value) -#vmware_api_insecure = False - -# ============ Delayed Delete Options ============================= - -# Turn on/off delayed delete -delayed_delete = False - -# Delayed delete time in seconds -scrub_time = 43200 - -# Directory that the scrubber will use to remind itself of what to delete -# Make sure this is also set in glance-scrubber.conf -scrubber_datadir = /var/lib/glance/scrubber - -# =============== Quota Options ================================== - -# The maximum number of image members allowed per image -#image_member_quota = 128 - -# The maximum number of image properties allowed per image -#image_property_quota = 128 - -# The maximum number of tags allowed per image -#image_tag_quota = 128 - -# The maximum number of locations allowed per image -#image_location_quota = 10 - -# Set a system wide quota for every user. This value is the total number -# of bytes that a user can use across all storage systems. A value of -# 0 means unlimited. -#user_storage_quota = 0 - -# =============== Image Cache Options ============================= - -# Base directory that the Image Cache uses -image_cache_dir = /var/lib/glance/image-cache/ - -# =============== Manager Options ================================= - -# DEPRECATED. TO BE REMOVED IN THE JUNO RELEASE. -# Whether or not to enforce that all DB tables have charset utf8. -# If your database tables do not have charset utf8 you will -# need to convert before this option is removed. This option is -# only relevant if your database engine is MySQL. -#db_enforce_mysql_charset = True - -# =============== Glance Store ==================================== -[glance_store] -# Moved from [DEFAULT], for Juno release -default_store = file -filesystem_store_datadir = /var/lib/glance/images/ - -# =============== Database Options ================================= - -[database] -# The file name to use with SQLite (string value) -sqlite_db = /var/lib/glance/glance.sqlite - -# If True, SQLite uses synchronous mode (boolean value) -#sqlite_synchronous = True - -# The backend to use for db (string value) -# Deprecated group/name - [DEFAULT]/db_backend -backend = sqlalchemy - -# The SQLAlchemy connection string used to connect to the -# database (string value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -#connection = -connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance - -# The SQL mode to be used for MySQL sessions. This option, -# including the default, overrides any server-set SQL mode. To -# use whatever SQL mode is set by the server configuration, -# set this to no value. Example: mysql_sql_mode= (string -# value) -#mysql_sql_mode = TRADITIONAL - -# Timeout before idle sql connections are reaped (integer -# value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout = 3600 - -# Minimum number of SQL connections to keep open in a pool -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size = - -# Maximum db connection retries during startup. (setting -1 -# implies an infinite retry count) (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries = 10 - -# Interval between retries of opening a sql connection -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval = 10 - -# If set, use this value for max_overflow with sqlalchemy -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow = - -# Verbosity of SQL debugging information. 0=None, -# 100=Everything (integer value) -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug = 0 - -# Add python stack traces to SQL as comment strings (boolean -# value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -# (integer value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout = - -# Enable the experimental use of database reconnect on -# connection lost (boolean value) -#use_db_reconnect = False - -# seconds between db connection retries (integer value) -#db_retry_interval = 1 - -# Whether to increase interval between db connection retries, -# up to db_max_retry_interval (boolean value) -#db_inc_retry_interval = True - -# max seconds between db connection retries, if -# db_inc_retry_interval is enabled (integer value) -#db_max_retry_interval = 10 - -# maximum db connection retries before error is raised. -# (setting -1 implies an infinite retry count) (integer value) -#db_max_retries = 20 - -[keystone_authtoken] -auth_uri = http://{{ HA_VIP }}:5000/v2.0 -identity_uri = http://{{ HA_VIP }}:35357 -admin_tenant_name = service -admin_user = glance -admin_password = {{ GLANCE_PASS }} - -[paste_deploy] -# Name of the paste configuration file that defines the available pipelines -#config_file = glance-api-paste.ini - -# Partial name of a pipeline in your paste configuration file with the -# service name removed. For example, if your paste section name is -# [pipeline:glance-api-keystone], you would configure the flavor below -# as 'keystone'. -flavor= keystone - -[store_type_location_strategy] -# The scheme list to use to get store preference order. The scheme must be -# registered by one of the stores defined by the 'known_stores' config option. -# This option will be applied when you using 'store_type' option as image -# location strategy defined by the 'location_strategy' config option. -#store_type_preference = diff --git a/compass/deploy/ansible/roles/glance/templates/glance-registry.conf b/compass/deploy/ansible/roles/glance/templates/glance-registry.conf deleted file mode 100644 index 8d731a2..0000000 --- a/compass/deploy/ansible/roles/glance/templates/glance-registry.conf +++ /dev/null @@ -1,190 +0,0 @@ -[DEFAULT] -# Show more verbose log output (sets INFO log level output) -#verbose = False - -# Show debugging output in logs (sets DEBUG log level output) -#debug = False - -# Address to bind the registry server -bind_host = {{ internal_ip }} - -# Port the bind the registry server to -bind_port = 9191 - -# Log to this file. Make sure you do not set the same log file for both the API -# and registry servers! -# -# If `log_file` is omitted and `use_syslog` is false, then log messages are -# sent to stdout as a fallback. -log_file = /var/log/glance/registry.log - -# Backlog requests when creating socket -backlog = 4096 - -# TCP_KEEPIDLE value in seconds when creating socket. -# Not supported on OS X. -#tcp_keepidle = 600 - -# API to use for accessing data. Default value points to sqlalchemy -# package. -#data_api = glance.db.sqlalchemy.api - -# Enable Registry API versions individually or simultaneously -#enable_v1_registry = True -#enable_v2_registry = True - -# Limit the api to return `param_limit_max` items in a call to a container. If -# a larger `limit` query param is provided, it will be reduced to this value. -api_limit_max = 1000 - -# If a `limit` query param is not provided in an api request, it will -# default to `limit_param_default` -limit_param_default = 25 - -# Role used to identify an authenticated user as administrator -#admin_role = admin - -# Whether to automatically create the database tables. -# Default: False -#db_auto_create = False - -# Enable DEBUG log messages from sqlalchemy which prints every database -# query and response. -# Default: False -#sqlalchemy_debug = True - -# ================= Syslog Options ============================ - -# Send logs to syslog (/dev/log) instead of to file specified -# by `log_file` -#use_syslog = False - -# Facility to use. If unset defaults to LOG_USER. -#syslog_log_facility = LOG_LOCAL1 - -# ================= SSL Options =============================== - -# Certificate file to use when starting registry server securely -#cert_file = /path/to/certfile - -# Private key file to use when starting registry server securely -#key_file = /path/to/keyfile - -# CA certificate file to use to verify connecting clients -#ca_file = /path/to/cafile - -# ================= Database Options ========================== - -[database] -# The file name to use with SQLite (string value) -sqlite_db = /var/lib/glance/glance.sqlite - -# If True, SQLite uses synchronous mode (boolean value) -#sqlite_synchronous = True - -# The backend to use for db (string value) -# Deprecated group/name - [DEFAULT]/db_backend -backend = sqlalchemy - -# The SQLAlchemy connection string used to connect to the -# database (string value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -#connection = -connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance - -# The SQL mode to be used for MySQL sessions. This option, -# including the default, overrides any server-set SQL mode. To -# use whatever SQL mode is set by the server configuration, -# set this to no value. Example: mysql_sql_mode= (string -# value) -#mysql_sql_mode = TRADITIONAL - -# Timeout before idle sql connections are reaped (integer -# value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout = 3600 - -# Minimum number of SQL connections to keep open in a pool -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size = - -# Maximum db connection retries during startup. (setting -1 -# implies an infinite retry count) (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries = 10 - -# Interval between retries of opening a sql connection -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval = 10 - -# If set, use this value for max_overflow with sqlalchemy -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow = - -# Verbosity of SQL debugging information. 0=None, -# 100=Everything (integer value) -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug = 0 - -# Add python stack traces to SQL as comment strings (boolean -# value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -# (integer value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout = - -# Enable the experimental use of database reconnect on -# connection lost (boolean value) -#use_db_reconnect = False - -# seconds between db connection retries (integer value) -#db_retry_interval = 1 - -# Whether to increase interval between db connection retries, -# up to db_max_retry_interval (boolean value) -#db_inc_retry_interval = True - -# max seconds between db connection retries, if -# db_inc_retry_interval is enabled (integer value) -#db_max_retry_interval = 10 - -# maximum db connection retries before error is raised. -# (setting -1 implies an infinite retry count) (integer value) -#db_max_retries = 20 - -[keystone_authtoken] -auth_uri = http://{{ HA_VIP }}:5000/v2.0 -identity_uri = http://{{ HA_VIP }}:35357 -admin_tenant_name = service -admin_user = glance -admin_password = {{ GLANCE_PASS }} - -[paste_deploy] -# Name of the paste configuration file that defines the available pipelines -#config_file = glance-registry-paste.ini - -# Partial name of a pipeline in your paste configuration file with the -# service name removed. For example, if your paste section name is -# [pipeline:glance-registry-keystone], you would configure the flavor below -# as 'keystone'. -flavor= keystone diff --git a/compass/deploy/ansible/roles/glance/templates/image_upload.sh b/compass/deploy/ansible/roles/glance/templates/image_upload.sh deleted file mode 100644 index 9dd1fa8..0000000 --- a/compass/deploy/ansible/roles/glance/templates/image_upload.sh +++ /dev/null @@ -1,2 +0,0 @@ -sleep 10 -glance --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ HA_VIP }}:35357/v2.0 image-create --name="cirros" --disk-format=qcow2 --container-format=bare --is-public=true < /opt/{{ build_in_image_name }} && touch glance.import.completed diff --git a/compass/deploy/ansible/roles/ha/files/galera_chk b/compass/deploy/ansible/roles/ha/files/galera_chk deleted file mode 100644 index 9fd165c..0000000 --- a/compass/deploy/ansible/roles/ha/files/galera_chk +++ /dev/null @@ -1,10 +0,0 @@ -#! /bin/sh - -code=`mysql -uroot -e "show status" | awk '/Threads_running/{print $2}'` - -if [ "$code"=="1" ] -then - echo "HTTP/1.1 200 OK\r\n" -else - echo "HTTP/1.1 503 Service Unavailable\r\n" -fi diff --git a/compass/deploy/ansible/roles/ha/files/mysqlchk b/compass/deploy/ansible/roles/ha/files/mysqlchk deleted file mode 100644 index 2c03f19..0000000 --- a/compass/deploy/ansible/roles/ha/files/mysqlchk +++ /dev/null @@ -1,15 +0,0 @@ -# default: off -# description: An xinetd internal service which echo's characters back to -# clients. -# This is the tcp version. -service mysqlchk -{ - disable = no - flags = REUSE - socket_type = stream - protocol = tcp - user = root - wait = no - server = /usr/local/bin/galera_chk - port = 9200 -} diff --git a/compass/deploy/ansible/roles/ha/files/notify.sh b/compass/deploy/ansible/roles/ha/files/notify.sh deleted file mode 100644 index 5edffe8..0000000 --- a/compass/deploy/ansible/roles/ha/files/notify.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -python /usr/local/bin/failover.py $1 -mysql -uroot -e"flush hosts" -service mysql restart diff --git a/compass/deploy/ansible/roles/ha/handlers/main.yml b/compass/deploy/ansible/roles/ha/handlers/main.yml deleted file mode 100644 index a02c686..0000000 --- a/compass/deploy/ansible/roles/ha/handlers/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: restart haproxy - service: name=haproxy state=restarted enabled=yes - -- name: restart xinetd - service: name=xinetd state=restarted enabled=yes - -- name: restart keepalived - service: name=keepalived state=restarted enabled=yes diff --git a/compass/deploy/ansible/roles/ha/tasks/main.yml b/compass/deploy/ansible/roles/ha/tasks/main.yml deleted file mode 100644 index a00c21a..0000000 --- a/compass/deploy/ansible/roles/ha/tasks/main.yml +++ /dev/null @@ -1,94 +0,0 @@ ---- -- name: install keepalived xinet haproxy - apt: name={{ item }} state=present - with_items: - - keepalived - - xinetd - - haproxy - -- name: generate ha service list - shell: echo {{ item }} >> /opt/service - with_items: - - keepalived - - xinetd - - haproxy - -- name: install pexpect - pip: name=pexpect state=present - -- name: activate ip_nonlocal_bind - sysctl: name=net.ipv4.ip_nonlocal_bind value=1 - state=present reload=yes - -- name: set net.ipv4.tcp_keepalive_intvl - sysctl: name=net.ipv4.tcp_keepalive_intvl value=1 - state=present reload=yes - -- name: set net.ipv4.tcp_keepalive_probes - sysctl: name=net.ipv4.tcp_keepalive_probes value=5 - state=present reload=yes - -- name: set net.ipv4.tcp_keepalive_time - sysctl: name=net.ipv4.tcp_keepalive_time value=5 - state=present reload=yes - -- name: update haproxy cfg - template: src=haproxy.cfg dest=/etc/haproxy/haproxy.cfg - notify: restart haproxy - -- name: set haproxy enable flag - lineinfile: dest=/etc/default/haproxy state=present - regexp="ENABLED=*" - line="ENABLED=1" - notify: restart haproxy - -- name: set haproxy log - lineinfile: dest=/etc/rsyslog.conf state=present - regexp="local0.* /var/log/haproxy.log" - line="local0.* /var/log/haproxy.log" - -- name: set rsyslog udp module - lineinfile: dest=/etc/rsyslog.conf state=present - regexp="^#$ModLoad imudp" - line="$ModLoad imudp" - -- name: set rsyslog udp port - lineinfile: dest=/etc/rsyslog.conf state=present - regexp="^#$UDPServerRun 514" - line="$UDPServerRun 514" - -- name: copy galera_chk file - copy: src=galera_chk dest=/usr/local/bin/galera_chk mode=0777 - -- name: copy notify file - copy: src=notify.sh dest=/usr/local/bin/notify.sh mode=0777 - -- name: copy notify template file - template: src=failover.j2 dest=/usr/local/bin/failover.py mode=0777 - -- name: add network service - lineinfile: dest=/etc/services state=present - line="mysqlchk 9200/tcp" - insertafter="Local services" - notify: restart xinetd - -- name: copy mysqlchk file - copy: src=mysqlchk dest=/etc/xinetd.d/mysqlchk mode=0777 - notify: restart xinetd - -- name: set keepalived start param - lineinfile: dest=/etc/default/keepalived state=present - regexp="^DAEMON_ARGS=*" - line="DAEMON_ARGS=\"-D -d -S 1\"" - -- name: set keepalived log - lineinfile: dest=/etc/rsyslog.conf state=present - regexp="local1.* /var/log/keepalived.log" - line="local1.* /var/log/keepalived.log" - -- name: update keepalived info - template: src=keepalived.conf dest=/etc/keepalived/keepalived.conf - notify: restart keepalived - -- name: restart rsyslog - shell: service rsyslog restart diff --git a/compass/deploy/ansible/roles/ha/templates/failover.j2 b/compass/deploy/ansible/roles/ha/templates/failover.j2 deleted file mode 100644 index b03c737..0000000 --- a/compass/deploy/ansible/roles/ha/templates/failover.j2 +++ /dev/null @@ -1,65 +0,0 @@ -import ConfigParser, os, socket -import logging as LOG -import pxssh -import sys -import re - -LOG_FILE="/var/log/mysql_failover" -try: - os.remove(LOG_FILE) -except: - pass - -LOG.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename=LOG_FILE,level=LOG.DEBUG) -ha_vip = {{ HA_VIP }} -LOG.info("ha_vip: %s" % ha_vip) - -#ha_vip = "10.1.0.50" -galera_path = '/etc/mysql/conf.d/wsrep.cnf' -pattern = re.compile(r"gcomm://(?P.*)") - -def ssh_get_hostname(ip): - try: - s = pxssh.pxssh() - s.login("%s" % ip, "root", "root") - s.sendline('hostname') # run a command - s.prompt() # match the prompt - result = s.before.strip() # print everything before the prompt. - return result.split(os.linesep)[1] - except pxssh.ExceptionPxssh as e: - LOG.error("pxssh failed on login.") - raise - -def failover(mode): - config = ConfigParser.ConfigParser() - config.optionxform = str - config.readfp(open(galera_path)) - wsrep_cluster_address = config.get("mysqld", "wsrep_cluster_address") - wsrep_cluster_address = pattern.match(wsrep_cluster_address).groupdict()["prev_ip"] - - LOG.info("old wsrep_cluster_address = %s" % wsrep_cluster_address) - - if mode == "master": - # refresh wsrep_cluster_address to null - LOG.info("I'm being master, set wsrep_cluster_address to null") - wsrep_cluster_address = "" - - elif mode == "backup": - # refresh wsrep_cluster_address to master int ip - hostname = ssh_get_hostname(ha_vip) - wsrep_cluster_address = socket.gethostbyname(hostname) - LOG.info("I'm being slave, set wsrep_cluster_address to master internal ip") - - LOG.info("new wsrep_cluster_address = %s" % wsrep_cluster_address) - wsrep_cluster_address = "gcomm://%s" % wsrep_cluster_address - config.set("mysqld", "wsrep_cluster_address", wsrep_cluster_address) - with open(galera_path, 'wb') as fp: - #config.write(sys.stdout) - config.write(fp) - - os.system("service mysql restart") - LOG.info("failover success!!!") - -if __name__ == "__main__": - LOG.debug("call me: %s" % sys.argv) - failover(sys.argv[1]) diff --git a/compass/deploy/ansible/roles/ha/templates/haproxy.cfg b/compass/deploy/ansible/roles/ha/templates/haproxy.cfg deleted file mode 100644 index 4ed528a..0000000 --- a/compass/deploy/ansible/roles/ha/templates/haproxy.cfg +++ /dev/null @@ -1,133 +0,0 @@ - -global - #chroot /var/run/haproxy - daemon - user haproxy - group haproxy - maxconn 4000 - pidfile /var/run/haproxy/haproxy.pid - #log 127.0.0.1 local0 - tune.bufsize 1000000 - stats socket /var/run/haproxy.sock - stats timeout 2m - -defaults - log global - maxconn 8000 - option redispatch - option dontlognull - option splice-auto - timeout http-request 10s - timeout queue 1m - timeout connect 10s - timeout client 6m - timeout server 6m - timeout check 10s - retries 5 - -listen proxy-glance_registry_cluster - bind {{ HA_VIP }}:9191 - option tcpka - option tcplog - balance source -{% for host in groups['controller'] %} - server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:9191 weight 1 check inter 2000 rise 2 fall 5 -{% endfor %} - -listen proxy-glance_api_cluster - bind {{ HA_VIP }}:9292 - option tcpka - option httpchk - option tcplog - balance source -{% for host in groups['controller'] %} - server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:9292 weight 1 check inter 2000 rise 2 fall 5 -{% endfor %} - -listen proxy-nova-novncproxy - bind {{ HA_VIP }}:6080 - option tcpka - option tcplog - balance source -{% for host in groups['controller'] %} - server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:6080 weight 1 check inter 2000 rise 2 fall 5 -{% endfor %} - -listen proxy-network - bind {{ HA_VIP }}:9696 - option tcpka - option tcplog - balance source -{% for host in groups['controller'] %} - server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:9696 weight 1 check inter 2000 rise 2 fall 5 -{% endfor %} - -listen proxy-volume - bind {{ HA_VIP }}:8776 - option tcpka - option httpchk - option tcplog - balance source -{% for host in groups['controller'] %} - server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8776 weight 1 check inter 2000 rise 2 fall 5 -{% endfor %} - -listen proxy-keystone_admin_cluster - bind {{ HA_VIP }}:35357 - option tcpka - option httpchk - option tcplog - balance source -{% for host in groups['controller'] %} - server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:35357 weight 1 check inter 2000 rise 2 fall 5 -{% endfor %} - -listen proxy-keystone_public_internal_cluster - bind {{ HA_VIP }}:5000 - option tcpka - option httpchk - option tcplog - balance source -{% for host in groups['controller'] %} - server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:5000 weight 1 check inter 2000 rise 2 fall 5 -{% endfor %} - -listen proxy-nova_compute_api_cluster - bind {{ HA_VIP }}:8774 - mode tcp - option httpchk - option tcplog - balance source -{% for host in groups['controller'] %} - server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8774 weight 1 check inter 2000 rise 2 fall 5 -{% endfor %} - -listen proxy-nova_metadata_api_cluster - bind {{ HA_VIP }}:8775 - option tcpka - option tcplog - balance source -{% for host in groups['controller'] %} - server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8775 weight 1 check inter 2000 rise 2 fall 5 -{% endfor %} - -listen proxy-cinder_api_cluster - bind {{ HA_VIP }}:8776 - mode tcp - option httpchk - option tcplog - balance source -{% for host in groups['controller'] %} - server {{ host }} {{ hostvars[host]['ansible_' + INTERNAL_INTERFACE].ipv4.address }}:8776 weight 1 check inter 2000 rise 2 fall 5 -{% endfor %} - -listen stats - mode http - bind 0.0.0.0:8888 - stats enable - stats refresh 30s - stats uri / - stats realm Global\ statistics - stats auth admin:admin - - diff --git a/compass/deploy/ansible/roles/ha/templates/keepalived.conf b/compass/deploy/ansible/roles/ha/templates/keepalived.conf deleted file mode 100644 index 0b49137..0000000 --- a/compass/deploy/ansible/roles/ha/templates/keepalived.conf +++ /dev/null @@ -1,42 +0,0 @@ -global_defs { - - notification_email{ - root@huawei.com - } - - notification_email_from keepalived@huawei.com - - smtp_server localhost - - smtp_connect_timeout 30 - - router_id NodeA - -} - -vrrp_instance VI_1 { - - interface {{ INTERNAL_INTERFACE }} - virtual_router_id 51 - state BACKUP - nopreempt - advert_int 1 -{% for host in groups['controller'] %} -{% if host == inventory_hostname %} - priority {{ 100 - loop.index0 * 5 }} -{% endif %} -{% endfor %} - - authentication { - auth_type PASS - auth_pass 1111 - } - - virtual_ipaddress { - {{ HA_VIP }} dev {{ INTERNAL_INTERFACE }} - } - - notify_master "/usr/local/bin/notify.sh master" - notify_backup "/usr/local/bin/notify.sh backup" -} - diff --git a/compass/deploy/ansible/roles/keystone/tasks/keystone_config.yml b/compass/deploy/ansible/roles/keystone/tasks/keystone_config.yml deleted file mode 100644 index 3203b26..0000000 --- a/compass/deploy/ansible/roles/keystone/tasks/keystone_config.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- name: keystone-manage db-sync - shell: su -s /bin/sh -c "keystone-manage db_sync" - register: result - until: result.rc == 0 - retries: 5 - delay: 3 - -- name: place keystone init script under /opt/ - template: src=keystone_init dest=/opt/keystone_init mode=0744 - -- name: run keystone_init - shell: /opt/keystone_init && touch keystone_init_complete || keystone_init_failed - args: - creates: keystone_init_complete - diff --git a/compass/deploy/ansible/roles/keystone/tasks/keystone_install.yml b/compass/deploy/ansible/roles/keystone/tasks/keystone_install.yml deleted file mode 100644 index e69c069..0000000 --- a/compass/deploy/ansible/roles/keystone/tasks/keystone_install.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -- name: install keystone packages - apt: name=keystone state=present force=yes - -- name: generate keystone service list - shell: echo {{ item }} >> /opt/service - with_items: - - keystone - -- name: update keystone conf - template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes - -- name: delete sqlite database - shell: rm /var/lib/keystone/keystone.db || echo sqllite database already removed - -- name: cron job to purge expired tokens hourly - shell: (crontab -l -u keystone 2>&1 | grep -q token_flush) || echo '@hourly /usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1' >> /var/spool/cron/crontabs/keystone - -- name: modify keystone cron rights - file: path=/var/spool/cron/crontabs/keystone mode=0600 - -- name: keystone source files - template: src={{ item }} dest=/opt/{{ item }} - with_items: - - admin-openrc.sh - - demo-openrc.sh - -- name: manually start keystone - service: name=keystone state=restarted enabled=yes diff --git a/compass/deploy/ansible/roles/keystone/tasks/main.yml b/compass/deploy/ansible/roles/keystone/tasks/main.yml deleted file mode 100644 index 2f36e91..0000000 --- a/compass/deploy/ansible/roles/keystone/tasks/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- include: keystone_install.yml - tags: - - install - - keystone_install - - keystone - -- include: keystone_config.yml - when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == '' - tags: - - config - - keystone_config - - keystone diff --git a/compass/deploy/ansible/roles/keystone/templates/admin-openrc.sh b/compass/deploy/ansible/roles/keystone/templates/admin-openrc.sh deleted file mode 100644 index f2e0d61..0000000 --- a/compass/deploy/ansible/roles/keystone/templates/admin-openrc.sh +++ /dev/null @@ -1,6 +0,0 @@ -# Verify the Identity Service installation -export OS_PASSWORD={{ ADMIN_PASS }} -export OS_TENANT_NAME=admin -export OS_AUTH_URL=http://{{ HA_VIP }}:35357/v2.0 -export OS_USERNAME=ADMIN - diff --git a/compass/deploy/ansible/roles/keystone/templates/demo-openrc.sh b/compass/deploy/ansible/roles/keystone/templates/demo-openrc.sh deleted file mode 100644 index 8bdc51b..0000000 --- a/compass/deploy/ansible/roles/keystone/templates/demo-openrc.sh +++ /dev/null @@ -1,5 +0,0 @@ -export OS_USERNAME=demo -export OS_PASSWORD={{ DEMO_PASS }} -export OS_TENANT_NAME=demo -export OS_AUTH_URL=http://{{ HA_VIP }}:35357/v2.0 - diff --git a/compass/deploy/ansible/roles/keystone/templates/keystone.conf b/compass/deploy/ansible/roles/keystone/templates/keystone.conf deleted file mode 100644 index fc8bf1f..0000000 --- a/compass/deploy/ansible/roles/keystone/templates/keystone.conf +++ /dev/null @@ -1,1317 +0,0 @@ -[DEFAULT] - -admin_token={{ ADMIN_TOKEN }} - -public_bind_host= {{ identity_host }} - -admin_bind_host= {{ identity_host }} - -#compute_port=8774 - -#admin_port=35357 - -#public_port=5000 - -# The base public endpoint URL for keystone that are -# advertised to clients (NOTE: this does NOT affect how -# keystone listens for connections) (string value). -# Defaults to the base host URL of the request. Eg a -# request to http://server:5000/v2.0/users will -# default to http://server:5000. You should only need -# to set this value if the base URL contains a path -# (eg /prefix/v2.0) or the endpoint should be found on -# a different server. -#public_endpoint=http://localhost:%(public_port)s/ - -# The base admin endpoint URL for keystone that are advertised -# to clients (NOTE: this does NOT affect how keystone listens -# for connections) (string value). -# Defaults to the base host URL of the request. Eg a -# request to http://server:35357/v2.0/users will -# default to http://server:35357. You should only need -# to set this value if the base URL contains a path -# (eg /prefix/v2.0) or the endpoint should be found on -# a different server. -#admin_endpoint=http://localhost:%(admin_port)s/ - -# onready allows you to send a notification when the process -# is ready to serve For example, to have it notify using -# systemd, one could set shell command: "onready = systemd- -# notify --ready" or a module with notify() method: "onready = -# keystone.common.systemd". (string value) -#onready= - -# enforced by optional sizelimit middleware -# (keystone.middleware:RequestBodySizeLimiter). (integer -# value) -#max_request_body_size=114688 - -# limit the sizes of user & tenant ID/names. (integer value) -#max_param_size=64 - -# similar to max_param_size, but provides an exception for -# token values. (integer value) -#max_token_size=8192 - -# During a SQL upgrade member_role_id will be used to create a -# new role that will replace records in the -# user_tenant_membership table with explicit role grants. -# After migration, the member_role_id will be used in the API -# add_user_to_project. (string value) -#member_role_id=9fe2ff9ee4384b1894a90878d3e92bab - -# During a SQL upgrade member_role_id will be used to create a -# new role that will replace records in the -# user_tenant_membership table with explicit role grants. -# After migration, member_role_name will be ignored. (string -# value) -#member_role_name=_member_ - -# The value passed as the keyword "rounds" to passlib encrypt -# method. (integer value) -#crypt_strength=40000 - -# Set this to True if you want to enable TCP_KEEPALIVE on -# server sockets i.e. sockets used by the keystone wsgi server -# for client connections. (boolean value) -#tcp_keepalive=false - -# Sets the value of TCP_KEEPIDLE in seconds for each server -# socket. Only applies if tcp_keepalive is True. Not supported -# on OS X. (integer value) -#tcp_keepidle=600 - -# The maximum number of entities that will be returned in a -# collection can be set with list_limit, with no limit set by -# default. This global limit may be then overridden for a -# specific driver, by specifying a list_limit in the -# appropriate section (e.g. [assignment]). (integer value) -#list_limit= - -# Set this to false if you want to enable the ability for -# user, group and project entities to be moved between domains -# by updating their domain_id. Allowing such movement is not -# recommended if the scope of a domain admin is being -# restricted by use of an appropriate policy file (see -# policy.v3cloudsample as an example). (boolean value) -#domain_id_immutable=true - - -# -# Options defined in oslo.messaging -# - -# Use durable queues in amqp. (boolean value) -# Deprecated group/name - [DEFAULT]/rabbit_durable_queues -#amqp_durable_queues=false - -# Auto-delete queues in amqp. (boolean value) -#amqp_auto_delete=false - -# Size of RPC connection pool. (integer value) -#rpc_conn_pool_size=30 - -# Modules of exceptions that are permitted to be recreated -# upon receiving exception data from an rpc call. (list value) -#allowed_rpc_exception_modules=oslo.messaging.exceptions,nova.exception,cinder.exception,exceptions -# Qpid broker hostname. (string value) -#qpid_hostname=localhost - -# Qpid broker port. (integer value) -#qpid_port=5672 - -# Qpid HA cluster host:port pairs. (list value) -#qpid_hosts=$qpid_hostname:$qpid_port - -# Username for Qpid connection. (string value) -#qpid_username= - -# Password for Qpid connection. (string value) -#qpid_password= - -# Space separated list of SASL mechanisms to use for auth. -# (string value) -#qpid_sasl_mechanisms= - -# Seconds between connection keepalive heartbeats. (integer -# value) -#qpid_heartbeat=60 - -# Transport to use, either 'tcp' or 'ssl'. (string value) -#qpid_protocol=tcp - -# Whether to disable the Nagle algorithm. (boolean value) -#qpid_tcp_nodelay=true - -# The qpid topology version to use. Version 1 is what was -# originally used by impl_qpid. Version 2 includes some -# backwards-incompatible changes that allow broker federation -# to work. Users should update to version 2 when they are -# able to take everything down, as it requires a clean break. -# (integer value) -#qpid_topology_version=1 - -# SSL version to use (valid only if SSL enabled). valid values -# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some -# distributions. (string value) -#kombu_ssl_version= - -# SSL key file (valid only if SSL enabled). (string value) -#kombu_ssl_keyfile= - -# SSL cert file (valid only if SSL enabled). (string value) -#kombu_ssl_certfile= - -# SSL certification authority file (valid only if SSL -# enabled). (string value) -#kombu_ssl_ca_certs= - -# How long to wait before reconnecting in response to an AMQP -# consumer cancel notification. (floating point value) -#kombu_reconnect_delay=1.0 - -# The RabbitMQ broker address where a single node is used. -# (string value) -#rabbit_host=localhost - -# The RabbitMQ broker port where a single node is used. -# (integer value) -#rabbit_port=5672 - -# RabbitMQ HA cluster host:port pairs. (list value) -#rabbit_hosts=$rabbit_host:$rabbit_port - -# Connect over SSL for RabbitMQ. (boolean value) -#rabbit_use_ssl=false - -# The RabbitMQ userid. (string value) -rabbit_userid={{ RABBIT_USER }} - -# The RabbitMQ password. (string value) -rabbit_password={{ RABBIT_PASS }} - -# the RabbitMQ login method (string value) -#rabbit_login_method=AMQPLAIN - -# The RabbitMQ virtual host. (string value) -#rabbit_virtual_host=/ - -# How frequently to retry connecting with RabbitMQ. (integer -# value) -#rabbit_retry_interval=1 - -# How long to backoff for between retries when connecting to -# RabbitMQ. (integer value) -#rabbit_retry_backoff=2 - -# Maximum number of RabbitMQ connection retries. Default is 0 -# (infinite retry count). (integer value) -#rabbit_max_retries=0 - -# Use HA queues in RabbitMQ (x-ha-policy: all). If you change -# this option, you must wipe the RabbitMQ database. (boolean -# value) -#rabbit_ha_queues=false - -# If passed, use a fake RabbitMQ provider. (boolean value) -#fake_rabbit=false - -# ZeroMQ bind address. Should be a wildcard (*), an ethernet -# interface, or IP. The "host" option should point or resolve -# to this address. (string value) -#rpc_zmq_bind_address=* - -# MatchMaker driver. (string value) -#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost - -# ZeroMQ receiver listening port. (integer value) -#rpc_zmq_port=9501 - -# Number of ZeroMQ contexts, defaults to 1. (integer value) -#rpc_zmq_contexts=1 - -# Maximum number of ingress messages to locally buffer per -# topic. Default is unlimited. (integer value) -#rpc_zmq_topic_backlog= - -# Directory for holding IPC sockets. (string value) -#rpc_zmq_ipc_dir=/var/run/openstack - -# Name of this node. Must be a valid hostname, FQDN, or IP -# address. Must match "host" option, if running Nova. (string -# value) -#rpc_zmq_host=keystone - -# Seconds to wait before a cast expires (TTL). Only supported -# by impl_zmq. (integer value) -#rpc_cast_timeout=30 - -# Heartbeat frequency. (integer value) -#matchmaker_heartbeat_freq=300 - -# Heartbeat time-to-live. (integer value) -#matchmaker_heartbeat_ttl=600 - -# Host to locate redis. (string value) -#host=127.0.0.1 - -# Use this port to connect to redis host. (integer value) -#port=6379 - -# Password for Redis server (optional). (string value) -#password= - -# Size of RPC greenthread pool. (integer value) -#rpc_thread_pool_size=64 - -# Driver or drivers to handle sending notifications. (multi -# valued) -#notification_driver= - -# AMQP topic used for OpenStack notifications. (list value) -# Deprecated group/name - [rpc_notifier2]/topics -#notification_topics=notifications - -# Seconds to wait for a response from a call. (integer value) -#rpc_response_timeout=60 - -# A URL representing the messaging driver to use and its full -# configuration. If not set, we fall back to the rpc_backend -# option and driver specific configuration. (string value) -#transport_url= - -# The messaging driver to use, defaults to rabbit. Other -# drivers include qpid and zmq. (string value) -#rpc_backend=rabbit - -# The default exchange under which topics are scoped. May be -# overridden by an exchange name specified in the -# transport_url option. (string value) -#control_exchange=openstack - - -# -# Options defined in keystone.notifications -# - -# Default publisher_id for outgoing notifications (string -# value) -#default_publisher_id= - - -# -# Options defined in keystone.middleware.ec2_token -# - -# URL to get token from ec2 request. (string value) -#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens - -# Required if EC2 server requires client certificate. (string -# value) -#keystone_ec2_keyfile= - -# Client certificate key filename. Required if EC2 server -# requires client certificate. (string value) -#keystone_ec2_certfile= - -# A PEM encoded certificate authority to use when verifying -# HTTPS connections. Defaults to the system CAs. (string -# value) -#keystone_ec2_cafile= - -# Disable SSL certificate verification. (boolean value) -#keystone_ec2_insecure=false - - -# -# Options defined in keystone.openstack.common.eventlet_backdoor -# - -# Enable eventlet backdoor. Acceptable values are 0, , -# and :, where 0 results in listening on a random -# tcp port number; results in listening on the -# specified port number (and not enabling backdoor if that -# port is in use); and : results in listening on -# the smallest unused port number within the specified range -# of port numbers. The chosen port is displayed in the -# service's log file. (string value) -#backdoor_port= - - -# -# Options defined in keystone.openstack.common.lockutils -# - -# Whether to disable inter-process locks (boolean value) -#disable_process_locking=false - -# Directory to use for lock files. (string value) -#lock_path= - - -# -# Options defined in keystone.openstack.common.log -# - -# Print debugging output (set logging level to DEBUG instead -# of default WARNING level). (boolean value) -debug={{ DEBUG }} - -# Print more verbose output (set logging level to INFO instead -# of default WARNING level). (boolean value) -verbose={{ VERBOSE }} - -# Log output to standard error (boolean value) -#use_stderr=true - -# Format string to use for log messages with context (string -# value) -#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s - -# Format string to use for log messages without context -# (string value) -#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - -# Data to append to log format when level is DEBUG (string -# value) -#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d - -# Prefix each line of exception output with this format -# (string value) -#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s - -# List of logger=LEVEL pairs (list value) -#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN - -# Publish error events (boolean value) -#publish_errors=false - -# Make deprecations fatal (boolean value) -#fatal_deprecations=false - -# If an instance is passed with the log message, format it -# like this (string value) -#instance_format="[instance: %(uuid)s] " - -# If an instance UUID is passed with the log message, format -# it like this (string value) -#instance_uuid_format="[instance: %(uuid)s] " - -# The name of logging configuration file. It does not disable -# existing loggers, but just appends specified logging -# configuration to any other existing logging options. Please -# see the Python logging module documentation for details on -# logging configuration files. (string value) -# Deprecated group/name - [DEFAULT]/log_config -#log_config_append= - -# DEPRECATED. A logging.Formatter log message format string -# which may use any of the available logging.LogRecord -# attributes. This option is deprecated. Please use -# logging_context_format_string and -# logging_default_format_string instead. (string value) -#log_format= - -# Format string for %%(asctime)s in log records. Default: -# %(default)s (string value) -#log_date_format=%Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to output to. If no default is -# set, logging will go to stdout. (string value) -# Deprecated group/name - [DEFAULT]/logfile -#log_file= - -# (Optional) The base directory used for relative --log-file -# paths (string value) -# Deprecated group/name - [DEFAULT]/logdir -log_dir = /var/log/keystone - -# Use syslog for logging. Existing syslog format is DEPRECATED -# during I, and then will be changed in J to honor RFC5424 -# (boolean value) -#use_syslog=false - -# (Optional) Use syslog rfc5424 format for logging. If -# enabled, will add APP-NAME (RFC5424) before the MSG part of -# the syslog message. The old format without APP-NAME is -# deprecated in I, and will be removed in J. (boolean value) -#use_syslog_rfc_format=false - -# Syslog facility to receive log lines (string value) -#syslog_log_facility=LOG_USER - - -# -# Options defined in keystone.openstack.common.policy -# - -# JSON file containing policy (string value) -#policy_file=policy.json - -# Rule enforced when requested rule is not found (string -# value) -#policy_default_rule=default - - -[assignment] - -# -# Options defined in keystone -# - -# Keystone Assignment backend driver. (string value) -#driver= - -# Toggle for assignment caching. This has no effect unless -# global caching is enabled. (boolean value) -#caching=true - -# TTL (in seconds) to cache assignment data. This has no -# effect unless global caching is enabled. (integer value) -#cache_time= - -# Maximum number of entities that will be returned in an -# assignment collection. (integer value) -#list_limit= - - -[auth] - -# -# Options defined in keystone -# - -# Default auth methods. (list value) -#methods=external,password,token - -# The password auth plugin module. (string value) -#password=keystone.auth.plugins.password.Password - -# The token auth plugin module. (string value) -#token=keystone.auth.plugins.token.Token - -# The external (REMOTE_USER) auth plugin module. (string -# value) -#external=keystone.auth.plugins.external.DefaultDomain - - -[cache] - -# -# Options defined in keystone -# - -# Prefix for building the configuration dictionary for the -# cache region. This should not need to be changed unless -# there is another dogpile.cache region with the same -# configuration name. (string value) -#config_prefix=cache.keystone - -# Default TTL, in seconds, for any cached item in the -# dogpile.cache region. This applies to any cached method that -# doesn't have an explicit cache expiration time defined for -# it. (integer value) -#expiration_time=600 - -# Dogpile.cache backend module. It is recommended that -# Memcache (dogpile.cache.memcache) or Redis -# (dogpile.cache.redis) be used in production deployments. -# Small workloads (single process) like devstack can use the -# dogpile.cache.memory backend. (string value) -#backend=keystone.common.cache.noop - -# Use a key-mangling function (sha1) to ensure fixed length -# cache-keys. This is toggle-able for debugging purposes, it -# is highly recommended to always leave this set to True. -# (boolean value) -#use_key_mangler=true - -# Arguments supplied to the backend module. Specify this -# option once per argument to be passed to the dogpile.cache -# backend. Example format: ":". (multi valued) -#backend_argument= - -# Proxy Classes to import that will affect the way the -# dogpile.cache backend functions. See the dogpile.cache -# documentation on changing-backend-behavior. Comma delimited -# list e.g. my.dogpile.proxy.Class, my.dogpile.proxyClass2. -# (list value) -#proxies= - -# Global toggle for all caching using the should_cache_fn -# mechanism. (boolean value) -#enabled=false - -# Extra debugging from the cache backend (cache keys, -# get/set/delete/etc calls) This is only really useful if you -# need to see the specific cache-backend get/set/delete calls -# with the keys/values. Typically this should be left set to -# False. (boolean value) -#debug_cache_backend=false - - -[catalog] - -# -# Options defined in keystone -# - -# Catalog template file name for use with the template catalog -# backend. (string value) -#template_file=default_catalog.templates - -# Keystone catalog backend driver. (string value) -#driver=keystone.catalog.backends.sql.Catalog - -# Maximum number of entities that will be returned in a -# catalog collection. (integer value) -#list_limit= - - -[credential] - -# -# Options defined in keystone -# - -# Keystone Credential backend driver. (string value) -#driver=keystone.credential.backends.sql.Credential - - -[database] - -# -# Options defined in keystone.openstack.common.db.options -# - -# The file name to use with SQLite (string value) -#sqlite_db=keystone.sqlite - -# If True, SQLite uses synchronous mode (boolean value) -#sqlite_synchronous=true - -# The backend to use for db (string value) -# Deprecated group/name - [DEFAULT]/db_backend -#backend=sqlalchemy - -# The SQLAlchemy connection string used to connect to the -# database (string value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -#connection= -connection = mysql://keystone:{{ KEYSTONE_DBPASS }}@{{ db_host }}/keystone - -# The SQL mode to be used for MySQL sessions. This option, -# including the default, overrides any server-set SQL mode. To -# use whatever SQL mode is set by the server configuration, -# set this to no value. Example: mysql_sql_mode= (string -# value) -#mysql_sql_mode=TRADITIONAL - -# Timeout before idle sql connections are reaped (integer -# value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout=3600 - -# Minimum number of SQL connections to keep open in a pool -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size=1 - -# Maximum number of SQL connections to keep open in a pool -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size= - -# Maximum db connection retries during startup. (setting -1 -# implies an infinite retry count) (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries=10 - -# Interval between retries of opening a sql connection -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval=10 - -# If set, use this value for max_overflow with sqlalchemy -# (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow= - -# Verbosity of SQL debugging information. 0=None, -# 100=Everything (integer value) -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug=0 - -# Add python stack traces to SQL as comment strings (boolean -# value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace=false - -# If set, use this value for pool_timeout with sqlalchemy -# (integer value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout= - -# Enable the experimental use of database reconnect on -# connection lost (boolean value) -#use_db_reconnect=false - -# seconds between db connection retries (integer value) -#db_retry_interval=1 - -# Whether to increase interval between db connection retries, -# up to db_max_retry_interval (boolean value) -#db_inc_retry_interval=true - -# max seconds between db connection retries, if -# db_inc_retry_interval is enabled (integer value) -#db_max_retry_interval=10 - -# maximum db connection retries before error is raised. -# (setting -1 implies an infinite retry count) (integer value) -#db_max_retries=20 - - -[ec2] - -# -# Options defined in keystone -# - -# Keystone EC2Credential backend driver. (string value) -#driver=keystone.contrib.ec2.backends.kvs.Ec2 - - -[endpoint_filter] - -# -# Options defined in keystone -# - -# Keystone Endpoint Filter backend driver (string value) -#driver=keystone.contrib.endpoint_filter.backends.sql.EndpointFilter - -# Toggle to return all active endpoints if no filter exists. -# (boolean value) -#return_all_endpoints_if_no_filter=true - - -[federation] - -# -# Options defined in keystone -# - -# Keystone Federation backend driver. (string value) -#driver=keystone.contrib.federation.backends.sql.Federation - -# Value to be used when filtering assertion parameters from -# the environment. (string value) -#assertion_prefix= - - -[identity] - -# -# Options defined in keystone -# - -# This references the domain to use for all Identity API v2 -# requests (which are not aware of domains). A domain with -# this ID will be created for you by keystone-manage db_sync -# in migration 008. The domain referenced by this ID cannot -# be deleted on the v3 API, to prevent accidentally breaking -# the v2 API. There is nothing special about this domain, -# other than the fact that it must exist to order to maintain -# support for your v2 clients. (string value) -#default_domain_id=default - -# A subset (or all) of domains can have their own identity -# driver, each with their own partial configuration file in a -# domain configuration directory. Only values specific to the -# domain need to be placed in the domain specific -# configuration file. This feature is disabled by default; set -# to True to enable. (boolean value) -#domain_specific_drivers_enabled=false - -# Path for Keystone to locate the domain specificidentity -# configuration files if domain_specific_drivers_enabled is -# set to true. (string value) -#domain_config_dir=/etc/keystone/domains - -# Keystone Identity backend driver. (string value) -#driver=keystone.identity.backends.sql.Identity - -# Maximum supported length for user passwords; decrease to -# improve performance. (integer value) -#max_password_length=4096 - -# Maximum number of entities that will be returned in an -# identity collection. (integer value) -#list_limit= - - -[kvs] - -# -# Options defined in keystone -# - -# Extra dogpile.cache backend modules to register with the -# dogpile.cache library. (list value) -#backends= - -# Prefix for building the configuration dictionary for the KVS -# region. This should not need to be changed unless there is -# another dogpile.cache region with the same configuration -# name. (string value) -#config_prefix=keystone.kvs - -# Toggle to disable using a key-mangling function to ensure -# fixed length keys. This is toggle-able for debugging -# purposes, it is highly recommended to always leave this set -# to True. (boolean value) -#enable_key_mangler=true - -# Default lock timeout for distributed locking. (integer -# value) -#default_lock_timeout=5 - - -[ldap] - -# -# Options defined in keystone -# - -# URL for connecting to the LDAP server. (string value) -#url=ldap://localhost - -# User BindDN to query the LDAP server. (string value) -#user= - -# Password for the BindDN to query the LDAP server. (string -# value) -#password= - -# LDAP server suffix (string value) -#suffix=cn=example,cn=com - -# If true, will add a dummy member to groups. This is required -# if the objectclass for groups requires the "member" -# attribute. (boolean value) -#use_dumb_member=false - -# DN of the "dummy member" to use when "use_dumb_member" is -# enabled. (string value) -#dumb_member=cn=dumb,dc=nonexistent - -# allow deleting subtrees. (boolean value) -#allow_subtree_delete=false - -# The LDAP scope for queries, this can be either "one" -# (onelevel/singleLevel) or "sub" (subtree/wholeSubtree). -# (string value) -#query_scope=one - -# Maximum results per page; a value of zero ("0") disables -# paging. (integer value) -#page_size=0 - -# The LDAP dereferencing option for queries. This can be -# either "never", "searching", "always", "finding" or -# "default". The "default" option falls back to using default -# dereferencing configured by your ldap.conf. (string value) -#alias_dereferencing=default - -# Override the system's default referral chasing behavior for -# queries. (boolean value) -#chase_referrals= - -# Search base for users. (string value) -#user_tree_dn= - -# LDAP search filter for users. (string value) -#user_filter= - -# LDAP objectClass for users. (string value) -#user_objectclass=inetOrgPerson - -# LDAP attribute mapped to user id. (string value) -#user_id_attribute=cn - -# LDAP attribute mapped to user name. (string value) -#user_name_attribute=sn - -# LDAP attribute mapped to user email. (string value) -#user_mail_attribute=email - -# LDAP attribute mapped to password. (string value) -#user_pass_attribute=userPassword - -# LDAP attribute mapped to user enabled flag. (string value) -#user_enabled_attribute=enabled - -# Bitmask integer to indicate the bit that the enabled value -# is stored in if the LDAP server represents "enabled" as a -# bit on an integer rather than a boolean. A value of "0" -# indicates the mask is not used. If this is not set to "0" -# the typical value is "2". This is typically used when -# "user_enabled_attribute = userAccountControl". (integer -# value) -#user_enabled_mask=0 - -# Default value to enable users. This should match an -# appropriate int value if the LDAP server uses non-boolean -# (bitmask) values to indicate if a user is enabled or -# disabled. If this is not set to "True"the typical value is -# "512". This is typically used when "user_enabled_attribute = -# userAccountControl". (string value) -#user_enabled_default=True - -# List of attributes stripped off the user on update. (list -# value) -#user_attribute_ignore=default_project_id,tenants - -# LDAP attribute mapped to default_project_id for users. -# (string value) -#user_default_project_id_attribute= - -# Allow user creation in LDAP backend. (boolean value) -#user_allow_create=true - -# Allow user updates in LDAP backend. (boolean value) -#user_allow_update=true - -# Allow user deletion in LDAP backend. (boolean value) -#user_allow_delete=true - -# If True, Keystone uses an alternative method to determine if -# a user is enabled or not by checking if they are a member of -# the "user_enabled_emulation_dn" group. (boolean value) -#user_enabled_emulation=false - -# DN of the group entry to hold enabled users when using -# enabled emulation. (string value) -#user_enabled_emulation_dn= - -# List of additional LDAP attributes used for mapping -# Additional attribute mappings for users. Attribute mapping -# format is :, where ldap_attr is the -# attribute in the LDAP entry and user_attr is the Identity -# API attribute. (list value) -#user_additional_attribute_mapping= - -# Search base for projects (string value) -#tenant_tree_dn= - -# LDAP search filter for projects. (string value) -#tenant_filter= - -# LDAP objectClass for projects. (string value) -#tenant_objectclass=groupOfNames - -# LDAP attribute mapped to project id. (string value) -#tenant_id_attribute=cn - -# LDAP attribute mapped to project membership for user. -# (string value) -#tenant_member_attribute=member - -# LDAP attribute mapped to project name. (string value) -#tenant_name_attribute=ou - -# LDAP attribute mapped to project description. (string value) -#tenant_desc_attribute=description - -# LDAP attribute mapped to project enabled. (string value) -#tenant_enabled_attribute=enabled - -# LDAP attribute mapped to project domain_id. (string value) -#tenant_domain_id_attribute=businessCategory - -# List of attributes stripped off the project on update. (list -# value) -#tenant_attribute_ignore= - -# Allow tenant creation in LDAP backend. (boolean value) -#tenant_allow_create=true - -# Allow tenant update in LDAP backend. (boolean value) -#tenant_allow_update=true - -# Allow tenant deletion in LDAP backend. (boolean value) -#tenant_allow_delete=true - -# If True, Keystone uses an alternative method to determine if -# a project is enabled or not by checking if they are a member -# of the "tenant_enabled_emulation_dn" group. (boolean value) -#tenant_enabled_emulation=false - -# DN of the group entry to hold enabled projects when using -# enabled emulation. (string value) -#tenant_enabled_emulation_dn= - -# Additional attribute mappings for projects. Attribute -# mapping format is :, where ldap_attr -# is the attribute in the LDAP entry and user_attr is the -# Identity API attribute. (list value) -#tenant_additional_attribute_mapping= - -# Search base for roles. (string value) -#role_tree_dn= - -# LDAP search filter for roles. (string value) -#role_filter= - -# LDAP objectClass for roles. (string value) -#role_objectclass=organizationalRole - -# LDAP attribute mapped to role id. (string value) -#role_id_attribute=cn - -# LDAP attribute mapped to role name. (string value) -#role_name_attribute=ou - -# LDAP attribute mapped to role membership. (string value) -#role_member_attribute=roleOccupant - -# List of attributes stripped off the role on update. (list -# value) -#role_attribute_ignore= - -# Allow role creation in LDAP backend. (boolean value) -#role_allow_create=true - -# Allow role update in LDAP backend. (boolean value) -#role_allow_update=true - -# Allow role deletion in LDAP backend. (boolean value) -#role_allow_delete=true - -# Additional attribute mappings for roles. Attribute mapping -# format is :, where ldap_attr is the -# attribute in the LDAP entry and user_attr is the Identity -# API attribute. (list value) -#role_additional_attribute_mapping= - -# Search base for groups. (string value) -#group_tree_dn= - -# LDAP search filter for groups. (string value) -#group_filter= - -# LDAP objectClass for groups. (string value) -#group_objectclass=groupOfNames - -# LDAP attribute mapped to group id. (string value) -#group_id_attribute=cn - -# LDAP attribute mapped to group name. (string value) -#group_name_attribute=ou - -# LDAP attribute mapped to show group membership. (string -# value) -#group_member_attribute=member - -# LDAP attribute mapped to group description. (string value) -#group_desc_attribute=description - -# List of attributes stripped off the group on update. (list -# value) -#group_attribute_ignore= - -# Allow group creation in LDAP backend. (boolean value) -#group_allow_create=true - -# Allow group update in LDAP backend. (boolean value) -#group_allow_update=true - -# Allow group deletion in LDAP backend. (boolean value) -#group_allow_delete=true - -# Additional attribute mappings for groups. Attribute mapping -# format is :, where ldap_attr is the -# attribute in the LDAP entry and user_attr is the Identity -# API attribute. (list value) -#group_additional_attribute_mapping= - -# CA certificate file path for communicating with LDAP -# servers. (string value) -#tls_cacertfile= - -# CA certificate directory path for communicating with LDAP -# servers. (string value) -#tls_cacertdir= - -# Enable TLS for communicating with LDAP servers. (boolean -# value) -#use_tls=false - -# valid options for tls_req_cert are demand, never, and allow. -# (string value) -#tls_req_cert=demand - - -[matchmaker_ring] - -# -# Options defined in oslo.messaging -# - -# Matchmaker ring file (JSON). (string value) -# Deprecated group/name - [DEFAULT]/matchmaker_ringfile -#ringfile=/etc/oslo/matchmaker_ring.json - - -[memcache] - -# -# Options defined in keystone -# - -# Memcache servers in the format of "host:port" (list value) -#servers=localhost:11211 - -# Number of compare-and-set attempts to make when using -# compare-and-set in the token memcache back end. (integer -# value) -#max_compare_and_set_retry=16 - - -[oauth1] - -# -# Options defined in keystone -# - -# Keystone Credential backend driver. (string value) -#driver=keystone.contrib.oauth1.backends.sql.OAuth1 - -# Duration (in seconds) for the OAuth Request Token. (integer -# value) -#request_token_duration=28800 - -# Duration (in seconds) for the OAuth Access Token. (integer -# value) -#access_token_duration=86400 - - -[os_inherit] - -# -# Options defined in keystone -# - -# role-assignment inheritance to projects from owning domain -# can be optionally enabled. (boolean value) -#enabled=false - - -[paste_deploy] - -# -# Options defined in keystone -# - -# Name of the paste configuration file that defines the -# available pipelines. (string value) -#config_file=keystone-paste.ini - - -[policy] - -# -# Options defined in keystone -# - -# Keystone Policy backend driver. (string value) -#driver=keystone.policy.backends.sql.Policy - -# Maximum number of entities that will be returned in a policy -# collection. (integer value) -#list_limit= - - -[revoke] - -# -# Options defined in keystone -# - -# An implementation of the backend for persisting revocation -# events. (string value) -#driver=keystone.contrib.revoke.backends.kvs.Revoke - -# This value (calculated in seconds) is added to token -# expiration before a revocation event may be removed from the -# backend. (integer value) -#expiration_buffer=1800 - -# Toggle for revocation event cacheing. This has no effect -# unless global caching is enabled. (boolean value) -#caching=true - - -[signing] - -# -# Options defined in keystone -# - -# Deprecated in favor of provider in the [token] section. -# (string value) -#token_format= - -# Path of the certfile for token signing. (string value) -#certfile=/etc/keystone/ssl/certs/signing_cert.pem - -# Path of the keyfile for token signing. (string value) -#keyfile=/etc/keystone/ssl/private/signing_key.pem - -# Path of the CA for token signing. (string value) -#ca_certs=/etc/keystone/ssl/certs/ca.pem - -# Path of the CA Key for token signing. (string value) -#ca_key=/etc/keystone/ssl/private/cakey.pem - -# Key Size (in bits) for token signing cert (auto generated -# certificate). (integer value) -#key_size=2048 - -# Day the token signing cert is valid for (auto generated -# certificate). (integer value) -#valid_days=3650 - -# Certificate Subject (auto generated certificate) for token -# signing. (string value) -#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com - - -[ssl] - -# -# Options defined in keystone -# - -# Toggle for SSL support on the keystone eventlet servers. -# (boolean value) -#enable=false - -# Path of the certfile for SSL. (string value) -#certfile=/etc/keystone/ssl/certs/keystone.pem - -# Path of the keyfile for SSL. (string value) -#keyfile=/etc/keystone/ssl/private/keystonekey.pem - -# Path of the ca cert file for SSL. (string value) -#ca_certs=/etc/keystone/ssl/certs/ca.pem - -# Path of the CA key file for SSL. (string value) -#ca_key=/etc/keystone/ssl/private/cakey.pem - -# Require client certificate. (boolean value) -#cert_required=false - -# SSL Key Length (in bits) (auto generated certificate). -# (integer value) -#key_size=1024 - -# Days the certificate is valid for once signed (auto -# generated certificate). (integer value) -#valid_days=3650 - -# SSL Certificate Subject (auto generated certificate). -# (string value) -#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost - - -[stats] - -# -# Options defined in keystone -# - -# Keystone stats backend driver. (string value) -#driver=keystone.contrib.stats.backends.kvs.Stats - - -[token] - -# -# Options defined in keystone -# - -# External auth mechanisms that should add bind information to -# token e.g. kerberos, x509. (list value) -#bind= - -# Enforcement policy on tokens presented to keystone with bind -# information. One of disabled, permissive, strict, required -# or a specifically required bind mode e.g. kerberos or x509 -# to require binding to that authentication. (string value) -#enforce_token_bind=permissive - -# Amount of time a token should remain valid (in seconds). -# (integer value) -#expiration=3600 - -# Controls the token construction, validation, and revocation -# operations. Core providers are -# "keystone.token.providers.[pki|uuid].Provider". (string -# value) -provider=keystone.token.providers.uuid.Provider - -# Keystone Token persistence backend driver. (string value) -driver=keystone.token.persistence.backends.sql.Token - -# Toggle for token system cacheing. This has no effect unless -# global caching is enabled. (boolean value) -#caching=true - -# Time to cache the revocation list and the revocation events -# if revoke extension is enabled (in seconds). This has no -# effect unless global and token caching are enabled. (integer -# value) -revocation_cache_time=3600 - -# Time to cache tokens (in seconds). This has no effect unless -# global and token caching are enabled. (integer value) -#cache_time= - -# Revoke token by token identifier. Setting revoke_by_id to -# True enables various forms of enumerating tokens, e.g. `list -# tokens for user`. These enumerations are processed to -# determine the list of tokens to revoke. Only disable if -# you are switching to using the Revoke extension with a -# backend other than KVS, which stores events in memory. -# (boolean value) -#revoke_by_id=true - - -[trust] - -# -# Options defined in keystone -# - -# delegation and impersonation features can be optionally -# disabled. (boolean value) -#enabled=true - -# Keystone Trust backend driver. (string value) -#driver=keystone.trust.backends.sql.Trust - - -[extra_headers] -Distribution = Ubuntu - diff --git a/compass/deploy/ansible/roles/keystone/templates/keystone_init b/compass/deploy/ansible/roles/keystone/templates/keystone_init deleted file mode 100644 index 729669b..0000000 --- a/compass/deploy/ansible/roles/keystone/templates/keystone_init +++ /dev/null @@ -1,43 +0,0 @@ -# create an administrative user - -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=admin --pass={{ ADMIN_PASS }} --email=admin@admin.com -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 role-create --name=admin -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=admin --pass={{ ADMIN_PASS }} --email=admin@admin.com -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-create --name=admin --description="Admin Tenant" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=admin --tenant=admin --role=admin -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=admin --role=_member_ --tenant=admin - -# create a normal user - -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=demo --pass={{ DEMO_PASS }} --email=DEMO_EMAIL -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-create --name=demo --description="Demo Tenant" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=demo --role=_member_ --tenant=demo - -# create a service tenant -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-create --name=service --description="Service Tenant" - -# regist keystone -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=keystone --type=identity --description="OpenStack Identity" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service_id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ identity / {print $2}') --publicurl=http://{{ HA_VIP }}:5000/v2.0 --internalurl=http://{{ HA_VIP }}:5000/v2.0 --adminurl=http://{{ HA_VIP }}:35357/v2.0 - -# Create a glance user that the Image Service can use to authenticate with the Identity service -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=glance --pass={{ GLANCE_PASS }} --email=glance@example.com -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=glance --tenant=service --role=admin - -#Register the Image Service with the Identity service so that other OpenStack services can locate it -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=glance --type=image --description="OpenStack Image Service" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ image / {print $2}') --publicurl=http://{{ HA_VIP }}:9292 --internalurl=http://{{ HA_VIP }}:9292 --adminurl=http://{{ HA_VIP }}:9292 - -#Create a nova user that Compute uses to authenticate with the Identity Service -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name=nova --pass={{ NOVA_PASS }} --email=nova@example.com -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user=nova --tenant=service --role=admin - -# register Compute with the Identity Service so that other OpenStack services can locate it -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name=nova --type=compute --description="OpenStack Compute" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ compute / {print $2}') --publicurl=http://{{ HA_VIP }}:8774/v2/%\(tenant_id\)s --internalurl=http://{{ HA_VIP }}:8774/v2/%\(tenant_id\)s --adminurl=http://{{ HA_VIP }}:8774/v2/%\(tenant_id\)s - -# register netron user, role and service -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-create --name neutron --pass {{ NEUTRON_PASS }} --email neutron@example.com -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 user-role-add --user neutron --tenant service --role admin -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-create --name neutron --type network --description "OpenStack Networking" -keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 endpoint-create --service-id $(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 service-list | awk '/ network / {print $2}') --publicurl http://{{ HA_VIP }}:9696 --adminurl http://{{ HA_VIP }}:9696 --internalurl http://{{ HA_VIP }}:9696 diff --git a/compass/deploy/ansible/roles/monitor/files/check_service.sh b/compass/deploy/ansible/roles/monitor/files/check_service.sh deleted file mode 100644 index d309673..0000000 --- a/compass/deploy/ansible/roles/monitor/files/check_service.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -services=`cat /opt/service | uniq` -for service in $services; do - if [ `/sbin/initctl list|awk '/stop\/waiting/{print $1}'|uniq | grep $service` ]; then - /sbin/start $service - fi -done diff --git a/compass/deploy/ansible/roles/monitor/files/root b/compass/deploy/ansible/roles/monitor/files/root deleted file mode 100644 index 9c55c4f..0000000 --- a/compass/deploy/ansible/roles/monitor/files/root +++ /dev/null @@ -1 +0,0 @@ -* * * * * /usr/local/bin/check_service.sh >> /var/log/check_service.log 2>&1 diff --git a/compass/deploy/ansible/roles/monitor/tasks/main.yml b/compass/deploy/ansible/roles/monitor/tasks/main.yml deleted file mode 100644 index e5b93f3..0000000 --- a/compass/deploy/ansible/roles/monitor/tasks/main.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: copy service check file - copy: src=check_service.sh dest=/usr/local/bin/check_service.sh mode=0777 - -- name: copy cron file - copy: src=root dest=/var/spool/cron/crontabs/root mode=0600 - -- name: restart cron - service: name=cron state=restarted - - diff --git a/compass/deploy/ansible/roles/mq/tasks/main.yml b/compass/deploy/ansible/roles/mq/tasks/main.yml deleted file mode 100644 index 4ae4065..0000000 --- a/compass/deploy/ansible/roles/mq/tasks/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -- include: rabbitmq.yml - -#- include: rabbitmq_cluster.yml -# when: HA_CLUSTER is defined diff --git a/compass/deploy/ansible/roles/mq/tasks/rabbitmq.yml b/compass/deploy/ansible/roles/mq/tasks/rabbitmq.yml deleted file mode 100644 index 5714406..0000000 --- a/compass/deploy/ansible/roles/mq/tasks/rabbitmq.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -- name: create rabbitmq directory - file: path=/etc/rabbitmq state=directory mode=0755 - -- name: copy rabbitmq config file - template: src=rabbitmq-env.conf dest=/etc/rabbitmq/rabbitmq-env.conf mode=0755 - -- name: install rabbitmq-server - apt: name=rabbitmq-server state=present - -- name: stop rabbitmq-server - service: name=rabbitmq-server - state=stopped - -- name: update .erlang.cookie - template: src=.erlang.cookie dest=/var/lib/rabbitmq/.erlang.cookie - group=rabbitmq - owner=rabbitmq - mode=0400 - when: ERLANG_TOKEN is defined - -- name: start and enable rabbitmq-server - service: name=rabbitmq-server - state=started - enabled=yes - -- name: generate mq service list - shell: echo {{ item }} >> /opt/service - with_items: - - rabbitmq-server - -- name: modify rabbitmq password - command: rabbitmqctl change_password guest {{ RABBIT_PASS }} - when: "RABBIT_USER is defined and RABBIT_USER == 'guest'" - ignore_errors: True - -- name: add rabbitmq user - command: rabbitmqctl add_user {{ RABBIT_USER }} {{ RABBIT_PASS }} - when: "RABBIT_USER is defined and RABBIT_USER != 'guest'" - ignore_errors: True - -- name: set rabbitmq user permission - command: rabbitmqctl set_permissions -p / {{ RABBIT_USER }} ".*" ".*" ".*" - when: "RABBIT_USER is defined and RABBIT_USER != 'guest'" - diff --git a/compass/deploy/ansible/roles/mq/tasks/rabbitmq_cluster.yml b/compass/deploy/ansible/roles/mq/tasks/rabbitmq_cluster.yml deleted file mode 100644 index afd4c77..0000000 --- a/compass/deploy/ansible/roles/mq/tasks/rabbitmq_cluster.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -- name: stop rabbitmq app - command: rabbitmqctl stop_app - when: HA_CLUSTER[inventory_hostname] != '' - -- name: rabbitmqctl reset - command: rabbitmqctl reset - when: HA_CLUSTER[inventory_hostname] != '' - -- name: stop rabbitmq - shell: rabbitmqctl stop - -- name: set detach - shell: rabbitmq-server -detached - -- name: join cluster - command: rabbitmqctl join_cluster rabbit@{{ item }} - when: item != inventory_hostname and HA_CLUSTER[item] == '' - with_items: - groups['controller'] - -- name: start rabbitmq app - command: rabbitmqctl start_app - -- name: set the HA policy - rabbitmq_policy: name=ha-all pattern='^(?!amq\.).*' tags="ha-mode=all" - diff --git a/compass/deploy/ansible/roles/mq/templates/.erlang.cookie b/compass/deploy/ansible/roles/mq/templates/.erlang.cookie deleted file mode 100644 index cadcfaf..0000000 --- a/compass/deploy/ansible/roles/mq/templates/.erlang.cookie +++ /dev/null @@ -1 +0,0 @@ -{{ ERLANG_TOKEN }} diff --git a/compass/deploy/ansible/roles/mq/templates/rabbitmq-env.conf b/compass/deploy/ansible/roles/mq/templates/rabbitmq-env.conf deleted file mode 100644 index 6dd7349..0000000 --- a/compass/deploy/ansible/roles/mq/templates/rabbitmq-env.conf +++ /dev/null @@ -1 +0,0 @@ -RABBITMQ_NODE_IP_ADDRESS={{ HA_VIP }} diff --git a/compass/deploy/ansible/roles/neutron-common/handlers/main.yml b/compass/deploy/ansible/roles/neutron-common/handlers/main.yml deleted file mode 100644 index 36d779d..0000000 --- a/compass/deploy/ansible/roles/neutron-common/handlers/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: restart neutron-plugin-openvswitch-agent - service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes - when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}" - -- name: restart neutron-l3-agent - service: name=neutron-l3-agent state=restarted enabled=yes - -- name: restart neutron-dhcp-agent - service: name=neutron-dhcp-agent state=restarted enabled=yes - -- name: restart neutron-metadata-agent - service: name=neutron-metadata-agent state=restarted enabled=yes diff --git a/compass/deploy/ansible/roles/neutron-compute/defaults/main.yml b/compass/deploy/ansible/roles/neutron-compute/defaults/main.yml deleted file mode 100644 index 825178b..0000000 --- a/compass/deploy/ansible/roles/neutron-compute/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -neutron_ovs_bridge_mappings: "" diff --git a/compass/deploy/ansible/roles/neutron-compute/handlers/main.yml b/compass/deploy/ansible/roles/neutron-compute/handlers/main.yml deleted file mode 100644 index 36d779d..0000000 --- a/compass/deploy/ansible/roles/neutron-compute/handlers/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: restart neutron-plugin-openvswitch-agent - service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes - when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}" - -- name: restart neutron-l3-agent - service: name=neutron-l3-agent state=restarted enabled=yes - -- name: restart neutron-dhcp-agent - service: name=neutron-dhcp-agent state=restarted enabled=yes - -- name: restart neutron-metadata-agent - service: name=neutron-metadata-agent state=restarted enabled=yes diff --git a/compass/deploy/ansible/roles/neutron-compute/tasks/main.yml b/compass/deploy/ansible/roles/neutron-compute/tasks/main.yml deleted file mode 100644 index 93ee46f..0000000 --- a/compass/deploy/ansible/roles/neutron-compute/tasks/main.yml +++ /dev/null @@ -1,55 +0,0 @@ ---- - -- name: activate ipv4 forwarding - sysctl: name=net.ipv4.ip_forward value=1 - state=present reload=yes - -- name: deactivate ipv4 rp filter - sysctl: name=net.ipv4.conf.all.rp_filter value=0 - state=present reload=yes - -- name: deactivate ipv4 default rp filter - sysctl: name=net.ipv4.conf.default.rp_filter - value=0 state=present reload=yes - -- name: install compute-related neutron packages - apt: name={{ item }} state=present force=yes - with_items: - - neutron-common - - neutron-plugin-ml2 - - openvswitch-datapath-dkms - - openvswitch-switch - -- name: generate neutron computer service list - shell: echo {{ item }} >> /opt/service - with_items: - - neutron-plugin-openvswitch-agent - -- name: install neutron openvswitch agent - apt: name=neutron-plugin-openvswitch-agent - state=present force=yes - when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}" - -- name: config neutron - template: src=neutron-network.conf - dest=/etc/neutron/neutron.conf backup=yes - notify: - - restart neutron-plugin-openvswitch-agent - -- name: config ml2 plugin - template: src=ml2_conf.ini - dest=/etc/neutron/plugins/ml2/ml2_conf.ini - backup=yes - notify: - - restart neutron-plugin-openvswitch-agent - -- name: add br-int - openvswitch_bridge: bridge=br-int state=present - notify: - - restart neutron-plugin-openvswitch-agent - - restart nova-compute - -- include: ../../neutron-network/tasks/odl.yml - when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}" - -- meta: flush_handlers diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/dhcp_agent.ini b/compass/deploy/ansible/roles/neutron-compute/templates/dhcp_agent.ini deleted file mode 100644 index 19eb62e..0000000 --- a/compass/deploy/ansible/roles/neutron-compute/templates/dhcp_agent.ini +++ /dev/null @@ -1,90 +0,0 @@ -[DEFAULT] -# Show debugging output in log (sets DEBUG log level output) -# debug = False -verbose = True - -# The DHCP agent will resync its state with Neutron to recover from any -# transient notification or rpc errors. The interval is number of -# seconds between attempts. -resync_interval = 5 - -# The DHCP agent requires an interface driver be set. Choose the one that best -# matches your plugin. -# interface_driver = - -# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP, -# BigSwitch/Floodlight) -interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver - -# Name of Open vSwitch bridge to use -# ovs_integration_bridge = br-int - -# Use veth for an OVS interface or not. -# Support kernels with limited namespace support -# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. -ovs_use_veth = False - -# Example of interface_driver option for LinuxBridge -# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver - -# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires -# no additional setup of the DHCP server. -dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq - -# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and -# iproute2 package that supports namespaces). -use_namespaces = True - -# The DHCP server can assist with providing metadata support on isolated -# networks. Setting this value to True will cause the DHCP server to append -# specific host routes to the DHCP request. The metadata service will only -# be activated when the subnet does not contain any router port. The guest -# instance must be configured to request host routes via DHCP (Option 121). -enable_isolated_metadata = False - -# Allows for serving metadata requests coming from a dedicated metadata -# access network whose cidr is 169.254.169.254/16 (or larger prefix), and -# is connected to a Neutron router from which the VMs send metadata -# request. In this case DHCP Option 121 will not be injected in VMs, as -# they will be able to reach 169.254.169.254 through a router. -# This option requires enable_isolated_metadata = True -enable_metadata_network = False - -# Number of threads to use during sync process. Should not exceed connection -# pool size configured on server. -# num_sync_threads = 4 - -# Location to store DHCP server config files -# dhcp_confs = $state_path/dhcp - -# Domain to use for building the hostnames -dhcp_domain = openstacklocal - -# Override the default dnsmasq settings with this file -# dnsmasq_config_file = -dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf - -# Comma-separated list of DNS servers which will be used by dnsmasq -# as forwarders. -# dnsmasq_dns_servers = - -# Limit number of leases to prevent a denial-of-service. -dnsmasq_lease_max = 16777216 - -# Location to DHCP lease relay UNIX domain socket -# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay - -# Location of Metadata Proxy UNIX domain socket -# metadata_proxy_socket = $state_path/metadata_proxy - -# dhcp_delete_namespaces, which is false by default, can be set to True if -# namespaces can be deleted cleanly on the host running the dhcp agent. -# Do not enable this until you understand the problem with the Linux iproute -# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and -# you are sure that your version of iproute does not suffer from the problem. -# If True, namespaces will be deleted when a dhcp server is disabled. -# dhcp_delete_namespaces = False - -# Timeout for ovs-vsctl commands. -# If the timeout expires, ovs commands will fail with ALARMCLOCK error. -# ovs_vsctl_timeout = 10 diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf deleted file mode 100644 index 7bcbd9d..0000000 --- a/compass/deploy/ansible/roles/neutron-compute/templates/dnsmasq-neutron.conf +++ /dev/null @@ -1,2 +0,0 @@ -dhcp-option-force=26,1454 - diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/etc/xorp/config.boot b/compass/deploy/ansible/roles/neutron-compute/templates/etc/xorp/config.boot deleted file mode 100644 index 32caf96..0000000 --- a/compass/deploy/ansible/roles/neutron-compute/templates/etc/xorp/config.boot +++ /dev/null @@ -1,25 +0,0 @@ -interfaces { - restore-original-config-on-shutdown: false - interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { - description: "Internal pNodes interface" - disable: false - default-system-config - } -} - -protocols { - igmp { - disable: false - interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { - vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { - disable: false - version: 3 - } - } - traceoptions { - flag all { - disable: false - } - } - } -} diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/l3_agent.ini b/compass/deploy/ansible/roles/neutron-compute/templates/l3_agent.ini deleted file mode 100644 index b394c00..0000000 --- a/compass/deploy/ansible/roles/neutron-compute/templates/l3_agent.ini +++ /dev/null @@ -1,81 +0,0 @@ -[DEFAULT] -# Show debugging output in log (sets DEBUG log level output) -# debug = False -verbose = True - -# L3 requires that an interface driver be set. Choose the one that best -# matches your plugin. -# interface_driver = - -# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC) -# that supports L3 agent -# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver -interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver - -# Use veth for an OVS interface or not. -# Support kernels with limited namespace support -# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. -# ovs_use_veth = False - -# Example of interface_driver option for LinuxBridge -# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver - -# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and -# iproute2 package that supports namespaces). -use_namespaces = True - -# If use_namespaces is set as False then the agent can only configure one router. - -# This is done by setting the specific router_id. -# router_id = - -# When external_network_bridge is set, each L3 agent can be associated -# with no more than one external network. This value should be set to the UUID -# of that external network. To allow L3 agent support multiple external -# networks, both the external_network_bridge and gateway_external_network_id -# must be left empty. -# gateway_external_network_id = - -# Indicates that this L3 agent should also handle routers that do not have -# an external network gateway configured. This option should be True only -# for a single agent in a Neutron deployment, and may be False for all agents -# if all routers must have an external network gateway -handle_internal_only_routers = True - -# Name of bridge used for external network traffic. This should be set to -# empty value for the linux bridge. when this parameter is set, each L3 agent -# can be associated with no more than one external network. -external_network_bridge = br-ex - -# TCP Port used by Neutron metadata server -metadata_port = 9697 - -# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 -# to disable this feature. -send_arp_for_ha = 3 - -# seconds between re-sync routers' data if needed -periodic_interval = 40 - -# seconds to start to sync routers' data after -# starting agent -periodic_fuzzy_delay = 5 - -# enable_metadata_proxy, which is true by default, can be set to False -# if the Nova metadata server is not available -# enable_metadata_proxy = True - -# Location of Metadata Proxy UNIX domain socket -# metadata_proxy_socket = $state_path/metadata_proxy - -# router_delete_namespaces, which is false by default, can be set to True if -# namespaces can be deleted cleanly on the host running the L3 agent. -# Do not enable this until you understand the problem with the Linux iproute -# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and -# you are sure that your version of iproute does not suffer from the problem. -# If True, namespaces will be deleted when a router is destroyed. -# router_delete_namespaces = False - -# Timeout for ovs-vsctl commands. -# If the timeout expires, ovs commands will fail with ALARMCLOCK error. -# ovs_vsctl_timeout = 10 diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/metadata_agent.ini b/compass/deploy/ansible/roles/neutron-compute/templates/metadata_agent.ini deleted file mode 100644 index 6badf28..0000000 --- a/compass/deploy/ansible/roles/neutron-compute/templates/metadata_agent.ini +++ /dev/null @@ -1,46 +0,0 @@ -[DEFAULT] -# Show debugging output in log (sets DEBUG log level output) -debug = True - -# The Neutron user information for accessing the Neutron API. -auth_url = http://{{ HA_VIP }}:5000/v2.0 -auth_region = RegionOne -# Turn off verification of the certificate for ssl -# auth_insecure = False -# Certificate Authority public key (CA cert) file for ssl -# auth_ca_cert = -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} - -# Network service endpoint type to pull from the keystone catalog -# endpoint_type = adminURL - -# IP address used by Nova metadata server -nova_metadata_ip = {{ HA_VIP }} - -# TCP Port used by Nova metadata server -nova_metadata_port = 8775 - -# When proxying metadata requests, Neutron signs the Instance-ID header with a -# shared secret to prevent spoofing. You may select any string for a secret, -# but it must match here and in the configuration used by the Nova Metadata -# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret -metadata_proxy_shared_secret = {{ METADATA_SECRET }} - -# Location of Metadata Proxy UNIX domain socket -# metadata_proxy_socket = $state_path/metadata_proxy - -# Number of separate worker processes for metadata server -# metadata_workers = 0 - -# Number of backlog requests to configure the metadata server socket with -# metadata_backlog = 128 - -# URL to connect to the cache backend. -# Example of URL using memory caching backend -# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5 -# default_ttl=0 parameter will cause cache entries to never expire. -# Otherwise default_ttl specifies time in seconds a cache entry is valid for. -# No cache is used in case no value is passed. -# cache_url = diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/ml2_conf.ini b/compass/deploy/ansible/roles/neutron-compute/templates/ml2_conf.ini deleted file mode 100644 index a790069..0000000 --- a/compass/deploy/ansible/roles/neutron-compute/templates/ml2_conf.ini +++ /dev/null @@ -1,108 +0,0 @@ -[ml2] -# (ListOpt) List of network type driver entrypoints to be loaded from -# the neutron.ml2.type_drivers namespace. -# -# type_drivers = local,flat,vlan,gre,vxlan -# Example: type_drivers = flat,vlan,gre,vxlan -type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }} - -# (ListOpt) Ordered list of network_types to allocate as tenant -# networks. The default value 'local' is useful for single-box testing -# but provides no connectivity between hosts. -# -# tenant_network_types = local -# Example: tenant_network_types = vlan,gre,vxlan -tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }} - -# (ListOpt) Ordered list of networking mechanism driver entrypoints -# to be loaded from the neutron.ml2.mechanism_drivers namespace. -# mechanism_drivers = -# Example: mechanism_drivers = openvswitch,mlnx -# Example: mechanism_drivers = arista -# Example: mechanism_drivers = cisco,logger -# Example: mechanism_drivers = openvswitch,brocade -# Example: mechanism_drivers = linuxbridge,brocade -mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }} - -[ml2_type_flat] -# (ListOpt) List of physical_network names with which flat networks -# can be created. Use * to allow flat networks with arbitrary -# physical_network names. -# -flat_networks = external -# Example:flat_networks = physnet1,physnet2 -# Example:flat_networks = * - -[ml2_type_vlan] -# (ListOpt) List of [::] tuples -# specifying physical_network names usable for VLAN provider and -# tenant networks, as well as ranges of VLAN tags on each -# physical_network available for allocation as tenant networks. -# -network_vlan_ranges = -# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 - -[ml2_type_gre] -# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation -tunnel_id_ranges = 1:1000 - -[ml2_type_vxlan] -# (ListOpt) Comma-separated list of : tuples enumerating -# ranges of VXLAN VNI IDs that are available for tenant network allocation. -# -vni_ranges = 1001:4095 - -# (StrOpt) Multicast group for the VXLAN interface. When configured, will -# enable sending all broadcast traffic to this multicast group. When left -# unconfigured, will disable multicast VXLAN mode. -# -vxlan_group = 239.1.1.1 -# Example: vxlan_group = 239.1.1.1 - -[securitygroup] -# Controls if neutron security group is enabled or not. -# It should be false when you use nova security group. -# enable_security_group = True -firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver -enable_security_group = True - -[database] -connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8 - -[ovs] -local_ip = {{ internal_ip }} -{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %} -integration_bridge = br-int -tunnel_bridge = br-tun -tunnel_id_ranges = 1001:4095 -tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }} -bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }} -{% endif %} - -[agent] -root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf -tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }} -{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %} -vxlan_udp_port = 4789 -{% endif %} -l2_population = False - -[odl] -{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} -network_vlan_ranges = 1001:4095 -tunnel_id_ranges = 1001:4095 -tun_peer_patch_port = patch-int -int_peer_patch_port = patch-tun -tenant_network_type = vxlan -tunnel_bridge = br-tun -integration_bridge = br-int -controllers = 10.1.0.15:8080:admin:admin -{% endif %} - -[ml2_odl] -{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} -username = {{ odl_username }} -password = {{ odl_password }} -url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron -{% endif %} - diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/neutron-network.conf b/compass/deploy/ansible/roles/neutron-compute/templates/neutron-network.conf deleted file mode 100644 index 93be9cb..0000000 --- a/compass/deploy/ansible/roles/neutron-compute/templates/neutron-network.conf +++ /dev/null @@ -1,465 +0,0 @@ -[DEFAULT] -# Print more verbose output (set logging level to INFO instead of default WARNING level). -verbose = {{ VERBOSE }} - -# Print debugging output (set logging level to DEBUG instead of default WARNING level). -debug = {{ DEBUG }} - -# Where to store Neutron state files. This directory must be writable by the -# user executing the agent. -state_path = /var/lib/neutron - -# Where to store lock files -lock_path = $state_path/lock - -# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s -# log_date_format = %Y-%m-%d %H:%M:%S - -# use_syslog -> syslog -# log_file and log_dir -> log_dir/log_file -# (not log_file) and log_dir -> log_dir/{binary_name}.log -# use_stderr -> stderr -# (not user_stderr) and (not log_file) -> stdout -# publish_errors -> notification system - -# use_syslog = False -# syslog_log_facility = LOG_USER - -# use_stderr = True -# log_file = -log_dir = /var/log/neutron - -# publish_errors = False - -# Address to bind the API server to -bind_host = {{ network_server_host }} - -# Port the bind the API server to -bind_port = 9696 - -# Path to the extensions. Note that this can be a colon-separated list of -# paths. For example: -# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions -# The __path__ of neutron.extensions is appended to this, so if your -# extensions are in there you don't need to specify them here -# api_extensions_path = - -# (StrOpt) Neutron core plugin entrypoint to be loaded from the -# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the -# plugins included in the neutron source distribution. For compatibility with -# previous versions, the class name of a plugin can be specified instead of its -# entrypoint name. -# -#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin -core_plugin = ml2 -# Example: core_plugin = ml2 - -# (ListOpt) List of service plugin entrypoints to be loaded from the -# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of -# the plugins included in the neutron source distribution. For compatibility -# with previous versions, the class name of a plugin can be specified instead -# of its entrypoint name. -# -# service_plugins = -# Example: service_plugins = router,firewall,lbaas,vpnaas,metering -service_plugins = router - -# Paste configuration file -api_paste_config = api-paste.ini - -# The strategy to be used for auth. -# Supported values are 'keystone'(default), 'noauth'. -auth_strategy = keystone - -# Base MAC address. The first 3 octets will remain unchanged. If the -# 4h octet is not 00, it will also be used. The others will be -# randomly generated. -# 3 octet -# base_mac = fa:16:3e:00:00:00 -# 4 octet -# base_mac = fa:16:3e:4f:00:00 - -# Maximum amount of retries to generate a unique MAC address -# mac_generation_retries = 16 - -# DHCP Lease duration (in seconds) -dhcp_lease_duration = 86400 - -# Allow sending resource operation notification to DHCP agent -# dhcp_agent_notification = True - -# Enable or disable bulk create/update/delete operations -# allow_bulk = True -# Enable or disable pagination -# allow_pagination = False -# Enable or disable sorting -# allow_sorting = False -# Enable or disable overlapping IPs for subnets -# Attention: the following parameter MUST be set to False if Neutron is -# being used in conjunction with nova security groups -allow_overlapping_ips = True -# Ensure that configured gateway is on subnet -# force_gateway_on_subnet = False - - -# RPC configuration options. Defined in rpc __init__ -# The messaging module to use, defaults to kombu. -# rpc_backend = neutron.openstack.common.rpc.impl_kombu -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_password = {{ RABBIT_PASS }} - -# Size of RPC thread pool -rpc_thread_pool_size = 240 -# Size of RPC connection pool -rpc_conn_pool_size = 100 -# Seconds to wait for a response from call or multicall -rpc_response_timeout = 300 -# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. -rpc_cast_timeout = 300 -# Modules of exceptions that are permitted to be recreated -# upon receiving exception data from an rpc call. -# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception -# AMQP exchange to connect to if using RabbitMQ or QPID -# control_exchange = neutron - -# If passed, use a fake RabbitMQ provider -# fake_rabbit = False - -# Configuration options if sending notifications via kombu rpc (these are -# the defaults) -# SSL version to use (valid only if SSL enabled) -# kombu_ssl_version = -# SSL key file (valid only if SSL enabled) -# kombu_ssl_keyfile = -# SSL cert file (valid only if SSL enabled) -# kombu_ssl_certfile = -# SSL certification authority file (valid only if SSL enabled) -# kombu_ssl_ca_certs = -# Port where RabbitMQ server is running/listening -rabbit_port = 5672 -# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' -# rabbit_hosts = localhost:5672 -# User ID used for RabbitMQ connections -rabbit_userid = {{ RABBIT_USER }} -# Location of a virtual RabbitMQ installation. -# rabbit_virtual_host = / -# Maximum retries with trying to connect to RabbitMQ -# (the default of 0 implies an infinite retry count) -# rabbit_max_retries = 0 -# RabbitMQ connection retry interval -# rabbit_retry_interval = 1 -# Use HA queues in RabbitMQ (x-ha-policy: all). You need to -# wipe RabbitMQ database when changing this option. (boolean value) -# rabbit_ha_queues = false -# QPID -# rpc_backend=neutron.openstack.common.rpc.impl_qpid -# Qpid broker hostname -# qpid_hostname = localhost -# Qpid broker port -# qpid_port = 5672 -# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' -# qpid_hosts = localhost:5672 -# Username for qpid connection -# qpid_username = '' -# Password for qpid connection -# qpid_password = '' -# Space separated list of SASL mechanisms to use for auth -# qpid_sasl_mechanisms = '' -# Seconds between connection keepalive heartbeats -# qpid_heartbeat = 60 -# Transport to use, either 'tcp' or 'ssl' -# qpid_protocol = tcp -# Disable Nagle algorithm -# qpid_tcp_nodelay = True - -# ZMQ -# rpc_backend=neutron.openstack.common.rpc.impl_zmq -# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. -# The "host" option should point or resolve to this address. -# rpc_zmq_bind_address = * - -# ============ Notification System Options ===================== - -# Notifications can be sent when network/subnet/port are created, updated or deleted. -# There are three methods of sending notifications: logging (via the -# log_file directive), rpc (via a message queue) and -# noop (no notifications sent, the default) - -# Notification_driver can be defined multiple times -# Do nothing driver -# notification_driver = neutron.openstack.common.notifier.no_op_notifier -# Logging driver -# notification_driver = neutron.openstack.common.notifier.log_notifier -# RPC driver. -notification_driver = neutron.openstack.common.notifier.rpc_notifier - -# default_notification_level is used to form actual topic name(s) or to set logging level -default_notification_level = INFO - -# default_publisher_id is a part of the notification payload -# host = myhost.com -# default_publisher_id = $host - -# Defined in rpc_notifier, can be comma separated values. -# The actual topic names will be %s.%(default_notification_level)s -notification_topics = notifications - -# Default maximum number of items returned in a single response, -# value == infinite and value < 0 means no max limit, and value must -# be greater than 0. If the number of items requested is greater than -# pagination_max_limit, server will just return pagination_max_limit -# of number of items. -# pagination_max_limit = -1 - -# Maximum number of DNS nameservers per subnet -# max_dns_nameservers = 5 - -# Maximum number of host routes per subnet -# max_subnet_host_routes = 20 - -# Maximum number of fixed ips per port -# max_fixed_ips_per_port = 5 - -# =========== items for agent management extension ============= -# Seconds to regard the agent as down; should be at least twice -# report_interval, to be sure the agent is down for good -agent_down_time = 75 -# =========== end of items for agent management extension ===== - -# =========== items for agent scheduler extension ============= -# Driver to use for scheduling network to DHCP agent -network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler -# Driver to use for scheduling router to a default L3 agent -router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler -# Driver to use for scheduling a loadbalancer pool to an lbaas agent -# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler - -# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted -# networks to first DHCP agent which sends get_active_networks message to -# neutron server -# network_auto_schedule = True - -# Allow auto scheduling routers to L3 agent. It will schedule non-hosted -# routers to first L3 agent which sends sync_routers message to neutron server -# router_auto_schedule = True - -# Number of DHCP agents scheduled to host a network. This enables redundant -# DHCP agents for configured networks. -# dhcp_agents_per_network = 1 - -# =========== end of items for agent scheduler extension ===== - -# =========== WSGI parameters related to the API server ============== -# Number of separate worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as workers. The parent process manages them. -api_workers = 8 - -# Number of separate RPC worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as RPC workers. The parent process manages them. -# This feature is experimental until issues are addressed and testing has been -# enabled for various plugins for compatibility. -rpc_workers = 8 - -# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when -# starting API server. Not supported on OS X. -# tcp_keepidle = 600 - -# Number of seconds to keep retrying to listen -# retry_until_window = 30 - -# Number of backlog requests to configure the socket with. -# backlog = 4096 - -# Max header line to accommodate large tokens -# max_header_line = 16384 - -# Enable SSL on the API server -# use_ssl = False - -# Certificate file to use when starting API server securely -# ssl_cert_file = /path/to/certfile - -# Private key file to use when starting API server securely -# ssl_key_file = /path/to/keyfile - -# CA certificate file to use when starting API server securely to -# verify connecting clients. This is an optional parameter only required if -# API clients need to authenticate to the API server using SSL certificates -# signed by a trusted CA -# ssl_ca_file = /path/to/cafile -# ======== end of WSGI parameters related to the API server ========== - - -# ======== neutron nova interactions ========== -# Send notification to nova when port status is active. -notify_nova_on_port_status_changes = True - -# Send notifications to nova when port data (fixed_ips/floatingips) change -# so nova can update it's cache. -notify_nova_on_port_data_changes = True - -# URL for connection to nova (Only supports one nova region currently). -nova_url = http://{{ HA_VIP }}:8774/v2 - -# Name of nova region to use. Useful if keystone manages more than one region -nova_region_name = RegionOne - -# Username for connection to nova in admin context -nova_admin_username = nova - -# The uuid of the admin nova tenant - -# Password for connection to nova in admin context. -nova_admin_password = {{ NOVA_PASS }} - -# Authorization URL for connection to nova in admin context. -nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 - -# Number of seconds between sending events to nova if there are any events to send -send_events_interval = 2 - -# ======== end of neutron nova interactions ========== - -[quotas] -# Default driver to use for quota checks -quota_driver = neutron.db.quota_db.DbQuotaDriver - -# Resource name(s) that are supported in quota features -quota_items = network,subnet,port - -# Default number of resource allowed per tenant. A negative value means -# unlimited. -default_quota = -1 - -# Number of networks allowed per tenant. A negative value means unlimited. -quota_network = 100 - -# Number of subnets allowed per tenant. A negative value means unlimited. -quota_subnet = 100 - -# Number of ports allowed per tenant. A negative value means unlimited. -quota_port = 8000 - -# Number of security groups allowed per tenant. A negative value means -# unlimited. -quota_security_group = 1000 - -# Number of security group rules allowed per tenant. A negative value means -# unlimited. -quota_security_group_rule = 1000 - -# Number of vips allowed per tenant. A negative value means unlimited. -# quota_vip = 10 - -# Number of pools allowed per tenant. A negative value means unlimited. -# quota_pool = 10 - -# Number of pool members allowed per tenant. A negative value means unlimited. -# The default is unlimited because a member is not a real resource consumer -# on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_member = -1 - -# Number of health monitors allowed per tenant. A negative value means -# unlimited. -# The default is unlimited because a health monitor is not a real resource -# consumer on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_health_monitors = -1 - -# Number of routers allowed per tenant. A negative value means unlimited. -# quota_router = 10 - -# Number of floating IPs allowed per tenant. A negative value means unlimited. -# quota_floatingip = 50 - -[agent] -# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real -# root filter facility. -# Change to "sudo" to skip the filtering and just run the comand directly -root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" - -# =========== items for agent management extension ============= -# seconds between nodes reporting state to server; should be less than -# agent_down_time, best if it is half or less than agent_down_time -report_interval = 30 - -# =========== end of items for agent management extension ===== - -[keystone_authtoken] -auth_uri = http://{{ HA_VIP }}:5000/v2.0 -identity_uri = http://{{ HA_VIP }}:35357 -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} -signing_dir = $state_path/keystone-signing - -[database] -# This line MUST be changed to actually run the plugin. -# Example: -# connection = mysql://root:pass@127.0.0.1:3306/neutron -# Replace 127.0.0.1 above with the IP address of the database used by the -# main neutron server. (Leave it as is if the database runs on this host.) -# connection = sqlite:////var/lib/neutron/neutron.sqlite -#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron - -# The SQLAlchemy connection string used to connect to the slave database -slave_connection = - -# Database reconnection retry times - in event connectivity is lost -# set to -1 implies an infinite retry count -max_retries = 10 - -# Database reconnection interval in seconds - if the initial connection to the -# database fails -retry_interval = 10 - -# Minimum number of SQL connections to keep open in a pool -min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -max_pool_size = 100 - -# Timeout in seconds before idle sql connections are reaped -idle_timeout = 3600 - -# If set, use this value for max_overflow with sqlalchemy -max_overflow = 100 - -# Verbosity of SQL debugging information. 0=None, 100=Everything -connection_debug = 0 - -# Add python stack traces to SQL as comment strings -connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -pool_timeout = 10 - -[service_providers] -# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. -# Must be in form: -# service_provider=::[:default] -# List of allowed service types includes LOADBALANCER, FIREWALL, VPN -# Combination of and must be unique; must also be unique -# This is multiline option, example for default provider: -# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default -# example of non-default provider: -# service_provider=FIREWALL:name2:firewall_driver_path -# --- Reference implementations --- -service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default -service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default -# In order to activate Radware's lbaas driver you need to uncomment the next line. -# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. -# Otherwise comment the HA Proxy line -# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default -# uncomment the following line to make the 'netscaler' LBaaS provider available. -# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver -# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. -# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default -# Uncomment the line below to use Embrane heleos as Load Balancer service provider. -# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/neutron.conf b/compass/deploy/ansible/roles/neutron-compute/templates/neutron.conf deleted file mode 100644 index 1575367..0000000 --- a/compass/deploy/ansible/roles/neutron-compute/templates/neutron.conf +++ /dev/null @@ -1,466 +0,0 @@ -[DEFAULT] -# Print more verbose output (set logging level to INFO instead of default WARNING level). -verbose = {{ VERBOSE }} - -# Print debugging output (set logging level to DEBUG instead of default WARNING level). -debug = {{ VERBOSE }} - -# Where to store Neutron state files. This directory must be writable by the -# user executing the agent. -state_path = /var/lib/neutron - -# Where to store lock files -lock_path = $state_path/lock - -# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s -# log_date_format = %Y-%m-%d %H:%M:%S - -# use_syslog -> syslog -# log_file and log_dir -> log_dir/log_file -# (not log_file) and log_dir -> log_dir/{binary_name}.log -# use_stderr -> stderr -# (not user_stderr) and (not log_file) -> stdout -# publish_errors -> notification system - -# use_syslog = False -# syslog_log_facility = LOG_USER - -# use_stderr = True -# log_file = -log_dir = /var/log/neutron - -# publish_errors = False - -# Address to bind the API server to -bind_host = {{ network_server_host }} - -# Port the bind the API server to -bind_port = 9696 - -# Path to the extensions. Note that this can be a colon-separated list of -# paths. For example: -# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions -# The __path__ of neutron.extensions is appended to this, so if your -# extensions are in there you don't need to specify them here -# api_extensions_path = - -# (StrOpt) Neutron core plugin entrypoint to be loaded from the -# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the -# plugins included in the neutron source distribution. For compatibility with -# previous versions, the class name of a plugin can be specified instead of its -# entrypoint name. -# -#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin -core_plugin = ml2 -# Example: core_plugin = ml2 - -# (ListOpt) List of service plugin entrypoints to be loaded from the -# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of -# the plugins included in the neutron source distribution. For compatibility -# with previous versions, the class name of a plugin can be specified instead -# of its entrypoint name. -# -# service_plugins = -# Example: service_plugins = router,firewall,lbaas,vpnaas,metering -service_plugins = router - -# Paste configuration file -api_paste_config = api-paste.ini - -# The strategy to be used for auth. -# Supported values are 'keystone'(default), 'noauth'. -auth_strategy = keystone - -# Base MAC address. The first 3 octets will remain unchanged. If the -# 4h octet is not 00, it will also be used. The others will be -# randomly generated. -# 3 octet -# base_mac = fa:16:3e:00:00:00 -# 4 octet -# base_mac = fa:16:3e:4f:00:00 - -# Maximum amount of retries to generate a unique MAC address -# mac_generation_retries = 16 - -# DHCP Lease duration (in seconds) -dhcp_lease_duration = 86400 - -# Allow sending resource operation notification to DHCP agent -# dhcp_agent_notification = True - -# Enable or disable bulk create/update/delete operations -# allow_bulk = True -# Enable or disable pagination -# allow_pagination = False -# Enable or disable sorting -# allow_sorting = False -# Enable or disable overlapping IPs for subnets -# Attention: the following parameter MUST be set to False if Neutron is -# being used in conjunction with nova security groups -allow_overlapping_ips = True -# Ensure that configured gateway is on subnet -# force_gateway_on_subnet = False - - -# RPC configuration options. Defined in rpc __init__ -# The messaging module to use, defaults to kombu. -# rpc_backend = neutron.openstack.common.rpc.impl_kombu -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_password = {{ RABBIT_PASS }} - -# Size of RPC thread pool -rpc_thread_pool_size = 240 -# Size of RPC connection pool -rpc_conn_pool_size = 100 -# Seconds to wait for a response from call or multicall -rpc_response_timeout = 300 -# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. -rpc_cast_timeout = 300 -# Modules of exceptions that are permitted to be recreated -# upon receiving exception data from an rpc call. -# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception -# AMQP exchange to connect to if using RabbitMQ or QPID -# control_exchange = neutron - -# If passed, use a fake RabbitMQ provider -# fake_rabbit = False - -# Configuration options if sending notifications via kombu rpc (these are -# the defaults) -# SSL version to use (valid only if SSL enabled) -# kombu_ssl_version = -# SSL key file (valid only if SSL enabled) -# kombu_ssl_keyfile = -# SSL cert file (valid only if SSL enabled) -# kombu_ssl_certfile = -# SSL certification authority file (valid only if SSL enabled) -# kombu_ssl_ca_certs = -# Port where RabbitMQ server is running/listening -rabbit_port = 5672 -# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' -# rabbit_hosts = localhost:5672 -# User ID used for RabbitMQ connections -rabbit_userid = {{ RABBIT_USER }} -# Location of a virtual RabbitMQ installation. -# rabbit_virtual_host = / -# Maximum retries with trying to connect to RabbitMQ -# (the default of 0 implies an infinite retry count) -# rabbit_max_retries = 0 -# RabbitMQ connection retry interval -# rabbit_retry_interval = 1 -# Use HA queues in RabbitMQ (x-ha-policy: all). You need to -# wipe RabbitMQ database when changing this option. (boolean value) -# rabbit_ha_queues = false -# QPID -# rpc_backend=neutron.openstack.common.rpc.impl_qpid -# Qpid broker hostname -# qpid_hostname = localhost -# Qpid broker port -# qpid_port = 5672 -# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' -# qpid_hosts = localhost:5672 -# Username for qpid connection -# qpid_username = '' -# Password for qpid connection -# qpid_password = '' -# Space separated list of SASL mechanisms to use for auth -# qpid_sasl_mechanisms = '' -# Seconds between connection keepalive heartbeats -# qpid_heartbeat = 60 -# Transport to use, either 'tcp' or 'ssl' -# qpid_protocol = tcp -# Disable Nagle algorithm -# qpid_tcp_nodelay = True - -# ZMQ -# rpc_backend=neutron.openstack.common.rpc.impl_zmq -# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. -# The "host" option should point or resolve to this address. -# rpc_zmq_bind_address = * - -# ============ Notification System Options ===================== - -# Notifications can be sent when network/subnet/port are created, updated or deleted. -# There are three methods of sending notifications: logging (via the -# log_file directive), rpc (via a message queue) and -# noop (no notifications sent, the default) - -# Notification_driver can be defined multiple times -# Do nothing driver -# notification_driver = neutron.openstack.common.notifier.no_op_notifier -# Logging driver -# notification_driver = neutron.openstack.common.notifier.log_notifier -# RPC driver. -notification_driver = neutron.openstack.common.notifier.rpc_notifier - -# default_notification_level is used to form actual topic name(s) or to set logging level -default_notification_level = INFO - -# default_publisher_id is a part of the notification payload -# host = myhost.com -# default_publisher_id = $host - -# Defined in rpc_notifier, can be comma separated values. -# The actual topic names will be %s.%(default_notification_level)s -notification_topics = notifications - -# Default maximum number of items returned in a single response, -# value == infinite and value < 0 means no max limit, and value must -# be greater than 0. If the number of items requested is greater than -# pagination_max_limit, server will just return pagination_max_limit -# of number of items. -# pagination_max_limit = -1 - -# Maximum number of DNS nameservers per subnet -# max_dns_nameservers = 5 - -# Maximum number of host routes per subnet -# max_subnet_host_routes = 20 - -# Maximum number of fixed ips per port -# max_fixed_ips_per_port = 5 - -# =========== items for agent management extension ============= -# Seconds to regard the agent as down; should be at least twice -# report_interval, to be sure the agent is down for good -agent_down_time = 75 -# =========== end of items for agent management extension ===== - -# =========== items for agent scheduler extension ============= -# Driver to use for scheduling network to DHCP agent -network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler -# Driver to use for scheduling router to a default L3 agent -router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler -# Driver to use for scheduling a loadbalancer pool to an lbaas agent -# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler - -# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted -# networks to first DHCP agent which sends get_active_networks message to -# neutron server -# network_auto_schedule = True - -# Allow auto scheduling routers to L3 agent. It will schedule non-hosted -# routers to first L3 agent which sends sync_routers message to neutron server -# router_auto_schedule = True - -# Number of DHCP agents scheduled to host a network. This enables redundant -# DHCP agents for configured networks. -# dhcp_agents_per_network = 1 - -# =========== end of items for agent scheduler extension ===== - -# =========== WSGI parameters related to the API server ============== -# Number of separate worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as workers. The parent process manages them. -api_workers = 8 - -# Number of separate RPC worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as RPC workers. The parent process manages them. -# This feature is experimental until issues are addressed and testing has been -# enabled for various plugins for compatibility. -rpc_workers = 8 - -# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when -# starting API server. Not supported on OS X. -# tcp_keepidle = 600 - -# Number of seconds to keep retrying to listen -# retry_until_window = 30 - -# Number of backlog requests to configure the socket with. -# backlog = 4096 - -# Max header line to accommodate large tokens -# max_header_line = 16384 - -# Enable SSL on the API server -# use_ssl = False - -# Certificate file to use when starting API server securely -# ssl_cert_file = /path/to/certfile - -# Private key file to use when starting API server securely -# ssl_key_file = /path/to/keyfile - -# CA certificate file to use when starting API server securely to -# verify connecting clients. This is an optional parameter only required if -# API clients need to authenticate to the API server using SSL certificates -# signed by a trusted CA -# ssl_ca_file = /path/to/cafile -# ======== end of WSGI parameters related to the API server ========== - - -# ======== neutron nova interactions ========== -# Send notification to nova when port status is active. -notify_nova_on_port_status_changes = True - -# Send notifications to nova when port data (fixed_ips/floatingips) change -# so nova can update it's cache. -notify_nova_on_port_data_changes = True - -# URL for connection to nova (Only supports one nova region currently). -nova_url = http://{{ HA_VIP }}:8774/v2 - -# Name of nova region to use. Useful if keystone manages more than one region -nova_region_name = RegionOne - -# Username for connection to nova in admin context -nova_admin_username = nova - -# The uuid of the admin nova tenant -nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }} - -# Password for connection to nova in admin context. -nova_admin_password = {{ NOVA_PASS }} - -# Authorization URL for connection to nova in admin context. -nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 - -# Number of seconds between sending events to nova if there are any events to send -send_events_interval = 2 - -# ======== end of neutron nova interactions ========== - -[quotas] -# Default driver to use for quota checks -quota_driver = neutron.db.quota_db.DbQuotaDriver - -# Resource name(s) that are supported in quota features -quota_items = network,subnet,port - -# Default number of resource allowed per tenant. A negative value means -# unlimited. -default_quota = -1 - -# Number of networks allowed per tenant. A negative value means unlimited. -quota_network = 100 - -# Number of subnets allowed per tenant. A negative value means unlimited. -quota_subnet = 100 - -# Number of ports allowed per tenant. A negative value means unlimited. -quota_port = 8000 - -# Number of security groups allowed per tenant. A negative value means -# unlimited. -quota_security_group = 1000 - -# Number of security group rules allowed per tenant. A negative value means -# unlimited. -quota_security_group_rule = 1000 - -# Number of vips allowed per tenant. A negative value means unlimited. -# quota_vip = 10 - -# Number of pools allowed per tenant. A negative value means unlimited. -# quota_pool = 10 - -# Number of pool members allowed per tenant. A negative value means unlimited. -# The default is unlimited because a member is not a real resource consumer -# on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_member = -1 - -# Number of health monitors allowed per tenant. A negative value means -# unlimited. -# The default is unlimited because a health monitor is not a real resource -# consumer on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_health_monitors = -1 - -# Number of routers allowed per tenant. A negative value means unlimited. -# quota_router = 10 - -# Number of floating IPs allowed per tenant. A negative value means unlimited. -# quota_floatingip = 50 - -[agent] -# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real -# root filter facility. -# Change to "sudo" to skip the filtering and just run the comand directly -root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" - -# =========== items for agent management extension ============= -# seconds between nodes reporting state to server; should be less than -# agent_down_time, best if it is half or less than agent_down_time -report_interval = 30 - -# =========== end of items for agent management extension ===== - -[keystone_authtoken] -auth_uri = http://{{ HA_VIP }}:5000/v2.0 -identity_uri = http://{{ HA_VIP }}:35357 -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} -signing_dir = $state_path/keystone-signing - -[database] -# This line MUST be changed to actually run the plugin. -# Example: -# connection = mysql://root:pass@127.0.0.1:3306/neutron -# Replace 127.0.0.1 above with the IP address of the database used by the -# main neutron server. (Leave it as is if the database runs on this host.) -# connection = sqlite:////var/lib/neutron/neutron.sqlite -#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron - -# The SQLAlchemy connection string used to connect to the slave database -slave_connection = - -# Database reconnection retry times - in event connectivity is lost -# set to -1 implies an infinite retry count -max_retries = 10 - -# Database reconnection interval in seconds - if the initial connection to the -# database fails -retry_interval = 10 - -# Minimum number of SQL connections to keep open in a pool -min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -max_pool_size = 100 - -# Timeout in seconds before idle sql connections are reaped -idle_timeout = 3600 - -# If set, use this value for max_overflow with sqlalchemy -max_overflow = 100 - -# Verbosity of SQL debugging information. 0=None, 100=Everything -connection_debug = 0 - -# Add python stack traces to SQL as comment strings -connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -pool_timeout = 10 - -[service_providers] -# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. -# Must be in form: -# service_provider=::[:default] -# List of allowed service types includes LOADBALANCER, FIREWALL, VPN -# Combination of and must be unique; must also be unique -# This is multiline option, example for default provider: -# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default -# example of non-default provider: -# service_provider=FIREWALL:name2:firewall_driver_path -# --- Reference implementations --- -service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default -service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default -# In order to activate Radware's lbaas driver you need to uncomment the next line. -# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. -# Otherwise comment the HA Proxy line -# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default -# uncomment the following line to make the 'netscaler' LBaaS provider available. -# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver -# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. -# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default -# Uncomment the line below to use Embrane heleos as Load Balancer service provider. -# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/neutron_init.sh b/compass/deploy/ansible/roles/neutron-compute/templates/neutron_init.sh deleted file mode 100644 index b92e202..0000000 --- a/compass/deploy/ansible/roles/neutron-compute/templates/neutron_init.sh +++ /dev/null @@ -1,4 +0,0 @@ -# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True - -# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}} - diff --git a/compass/deploy/ansible/roles/neutron-compute/templates/nova.conf b/compass/deploy/ansible/roles/neutron-compute/templates/nova.conf deleted file mode 100644 index 4988cb0..0000000 --- a/compass/deploy/ansible/roles/neutron-compute/templates/nova.conf +++ /dev/null @@ -1,73 +0,0 @@ -[DEFAULT] -dhcpbridge_flagfile=/etc/nova/nova.conf -dhcpbridge=/usr/bin/nova-dhcpbridge -logdir=/var/log/nova -state_path=/var/lib/nova -lock_path=/var/lock/nova -force_dhcp_release=True -iscsi_helper=tgtadm -libvirt_use_virtio_for_bridges=True -connection_type=libvirt -root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf -verbose={{ VERBOSE}} -debug={{ DEBUG }} -ec2_private_dns_show_ip=True -api_paste_config=/etc/nova/api-paste.ini -volumes_path=/var/lib/nova/volumes -enabled_apis=ec2,osapi_compute,metadata - -vif_plugging_is_fatal: false -vif_plugging_timeout: 0 - -auth_strategy = keystone - -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_userid = {{ RABBIT_USER }} -rabbit_password = {{ RABBIT_PASS }} - -my_ip = {{ internal_ip }} -vnc_enabled = True -vncserver_listen = {{ internal_ip }} -vncserver_proxyclient_address = {{ internal_ip }} -novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html - -novncproxy_host = {{ internal_ip }} -novncproxy_port = 6080 - -network_api_class = nova.network.neutronv2.api.API -linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver -firewall_driver = nova.virt.firewall.NoopFirewallDriver -security_group_api = neutron - -instance_usage_audit = True -instance_usage_audit_period = hour -notify_on_state_change = vm_and_task_state -notification_driver = nova.openstack.common.notifier.rpc_notifier -notification_driver = ceilometer.compute.nova_notifier - -[database] -# The SQLAlchemy connection string used to connect to the database -connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova - -[conductor] -manager = nova.conductor.manager.ConductorManager -topic = conductor - -[keystone_authtoken] -auth_uri = http://{{ HA_VIP }}:5000/2.0 -identity_uri = http://{{ HA_VIP }}:35357 -admin_tenant_name = service -admin_user = nova -admin_password = {{ NOVA_PASS }} - -[glance] -host = {{ HA_VIP }} - -[neutron] -url = http://{{ HA_VIP }}:9696 -auth_strategy = keystone -admin_tenant_name = service -admin_username = neutron -admin_password = {{ NEUTRON_PASS }} -admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 diff --git a/compass/deploy/ansible/roles/neutron-controller/handlers/main.yml b/compass/deploy/ansible/roles/neutron-controller/handlers/main.yml deleted file mode 100644 index b4c1585..0000000 --- a/compass/deploy/ansible/roles/neutron-controller/handlers/main.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- name: restart nova-api - service: name=nova-api state=restarted enabled=yes - -- name: restart nova-cert - service: name=nova-cert state=restarted enabled=yes - -- name: restart nova-consoleauth - service: name=nova-consoleauth state=restarted enabled=yes - -- name: restart nova-scheduler - service: name=nova-scheduler state=restarted enabled=yes - -- name: restart nova-conductor - service: name=nova-conductor state=restarted enabled=yes - -- name: restart nova-novncproxy - service: name=nova-novncproxy state=restarted enabled=yes - -- name: remove nova-sqlite-db - shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.db.removed - -- name: restart neutron-server - service: name=neutron-server state=restarted enabled=yes diff --git a/compass/deploy/ansible/roles/neutron-controller/tasks/main.yml b/compass/deploy/ansible/roles/neutron-controller/tasks/main.yml deleted file mode 100644 index 9c04d74..0000000 --- a/compass/deploy/ansible/roles/neutron-controller/tasks/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- include: neutron_install.yml - tags: - - install - - neutron_install - - neutron - -- include: neutron_config.yml - when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == '' - tags: - - config - - neutron_config - - neutron diff --git a/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_config.yml b/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_config.yml deleted file mode 100644 index 77cc29a..0000000 --- a/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_config.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: neutron-db-manage upgrade to Juno - shell: neutron-db-manage --config-file=/etc/neutron/neutron.conf --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini upgrade head - register: result - until: result.rc == 0 - retries: 5 - delay: 3 - notify: - - restart neutron-server - diff --git a/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_install.yml b/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_install.yml deleted file mode 100644 index 6165299..0000000 --- a/compass/deploy/ansible/roles/neutron-controller/tasks/neutron_install.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -- name: install controller-related neutron packages - apt: name={{ item }} state=present force=yes - with_items: - - neutron-server - - neutron-plugin-ml2 - -- name: generate neutron controll service list - shell: echo {{ item }} >> /opt/service - with_items: - - neutron-server - - neutron-plugin-ml2 - -- name: get tenant id to fill neutron.conf - shell: keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ HA_VIP }}:35357/v2.0 tenant-get service | grep id | awk '{print $4}' - register: NOVA_ADMIN_TENANT_ID - -- name: update neutron conf - template: src=neutron.conf dest=/etc/neutron/neutron.conf backup=yes - notify: - - restart neutron-server - -- name: update ml2 plugin conf - template: src=ml2_conf.ini dest=/etc/neutron/plugins/ml2/ml2_conf.ini backup=yes - notify: - - restart neutron-server - -- meta: flush_handlers - diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/dhcp_agent.ini b/compass/deploy/ansible/roles/neutron-controller/templates/dhcp_agent.ini deleted file mode 100644 index 19eb62e..0000000 --- a/compass/deploy/ansible/roles/neutron-controller/templates/dhcp_agent.ini +++ /dev/null @@ -1,90 +0,0 @@ -[DEFAULT] -# Show debugging output in log (sets DEBUG log level output) -# debug = False -verbose = True - -# The DHCP agent will resync its state with Neutron to recover from any -# transient notification or rpc errors. The interval is number of -# seconds between attempts. -resync_interval = 5 - -# The DHCP agent requires an interface driver be set. Choose the one that best -# matches your plugin. -# interface_driver = - -# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP, -# BigSwitch/Floodlight) -interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver - -# Name of Open vSwitch bridge to use -# ovs_integration_bridge = br-int - -# Use veth for an OVS interface or not. -# Support kernels with limited namespace support -# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. -ovs_use_veth = False - -# Example of interface_driver option for LinuxBridge -# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver - -# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires -# no additional setup of the DHCP server. -dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq - -# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and -# iproute2 package that supports namespaces). -use_namespaces = True - -# The DHCP server can assist with providing metadata support on isolated -# networks. Setting this value to True will cause the DHCP server to append -# specific host routes to the DHCP request. The metadata service will only -# be activated when the subnet does not contain any router port. The guest -# instance must be configured to request host routes via DHCP (Option 121). -enable_isolated_metadata = False - -# Allows for serving metadata requests coming from a dedicated metadata -# access network whose cidr is 169.254.169.254/16 (or larger prefix), and -# is connected to a Neutron router from which the VMs send metadata -# request. In this case DHCP Option 121 will not be injected in VMs, as -# they will be able to reach 169.254.169.254 through a router. -# This option requires enable_isolated_metadata = True -enable_metadata_network = False - -# Number of threads to use during sync process. Should not exceed connection -# pool size configured on server. -# num_sync_threads = 4 - -# Location to store DHCP server config files -# dhcp_confs = $state_path/dhcp - -# Domain to use for building the hostnames -dhcp_domain = openstacklocal - -# Override the default dnsmasq settings with this file -# dnsmasq_config_file = -dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf - -# Comma-separated list of DNS servers which will be used by dnsmasq -# as forwarders. -# dnsmasq_dns_servers = - -# Limit number of leases to prevent a denial-of-service. -dnsmasq_lease_max = 16777216 - -# Location to DHCP lease relay UNIX domain socket -# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay - -# Location of Metadata Proxy UNIX domain socket -# metadata_proxy_socket = $state_path/metadata_proxy - -# dhcp_delete_namespaces, which is false by default, can be set to True if -# namespaces can be deleted cleanly on the host running the dhcp agent. -# Do not enable this until you understand the problem with the Linux iproute -# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and -# you are sure that your version of iproute does not suffer from the problem. -# If True, namespaces will be deleted when a dhcp server is disabled. -# dhcp_delete_namespaces = False - -# Timeout for ovs-vsctl commands. -# If the timeout expires, ovs commands will fail with ALARMCLOCK error. -# ovs_vsctl_timeout = 10 diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf deleted file mode 100644 index 7bcbd9d..0000000 --- a/compass/deploy/ansible/roles/neutron-controller/templates/dnsmasq-neutron.conf +++ /dev/null @@ -1,2 +0,0 @@ -dhcp-option-force=26,1454 - diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/etc/xorp/config.boot b/compass/deploy/ansible/roles/neutron-controller/templates/etc/xorp/config.boot deleted file mode 100644 index 32caf96..0000000 --- a/compass/deploy/ansible/roles/neutron-controller/templates/etc/xorp/config.boot +++ /dev/null @@ -1,25 +0,0 @@ -interfaces { - restore-original-config-on-shutdown: false - interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { - description: "Internal pNodes interface" - disable: false - default-system-config - } -} - -protocols { - igmp { - disable: false - interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { - vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { - disable: false - version: 3 - } - } - traceoptions { - flag all { - disable: false - } - } - } -} diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/l3_agent.ini b/compass/deploy/ansible/roles/neutron-controller/templates/l3_agent.ini deleted file mode 100644 index b394c00..0000000 --- a/compass/deploy/ansible/roles/neutron-controller/templates/l3_agent.ini +++ /dev/null @@ -1,81 +0,0 @@ -[DEFAULT] -# Show debugging output in log (sets DEBUG log level output) -# debug = False -verbose = True - -# L3 requires that an interface driver be set. Choose the one that best -# matches your plugin. -# interface_driver = - -# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC) -# that supports L3 agent -# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver -interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver - -# Use veth for an OVS interface or not. -# Support kernels with limited namespace support -# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. -# ovs_use_veth = False - -# Example of interface_driver option for LinuxBridge -# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver - -# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and -# iproute2 package that supports namespaces). -use_namespaces = True - -# If use_namespaces is set as False then the agent can only configure one router. - -# This is done by setting the specific router_id. -# router_id = - -# When external_network_bridge is set, each L3 agent can be associated -# with no more than one external network. This value should be set to the UUID -# of that external network. To allow L3 agent support multiple external -# networks, both the external_network_bridge and gateway_external_network_id -# must be left empty. -# gateway_external_network_id = - -# Indicates that this L3 agent should also handle routers that do not have -# an external network gateway configured. This option should be True only -# for a single agent in a Neutron deployment, and may be False for all agents -# if all routers must have an external network gateway -handle_internal_only_routers = True - -# Name of bridge used for external network traffic. This should be set to -# empty value for the linux bridge. when this parameter is set, each L3 agent -# can be associated with no more than one external network. -external_network_bridge = br-ex - -# TCP Port used by Neutron metadata server -metadata_port = 9697 - -# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 -# to disable this feature. -send_arp_for_ha = 3 - -# seconds between re-sync routers' data if needed -periodic_interval = 40 - -# seconds to start to sync routers' data after -# starting agent -periodic_fuzzy_delay = 5 - -# enable_metadata_proxy, which is true by default, can be set to False -# if the Nova metadata server is not available -# enable_metadata_proxy = True - -# Location of Metadata Proxy UNIX domain socket -# metadata_proxy_socket = $state_path/metadata_proxy - -# router_delete_namespaces, which is false by default, can be set to True if -# namespaces can be deleted cleanly on the host running the L3 agent. -# Do not enable this until you understand the problem with the Linux iproute -# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and -# you are sure that your version of iproute does not suffer from the problem. -# If True, namespaces will be deleted when a router is destroyed. -# router_delete_namespaces = False - -# Timeout for ovs-vsctl commands. -# If the timeout expires, ovs commands will fail with ALARMCLOCK error. -# ovs_vsctl_timeout = 10 diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/metadata_agent.ini b/compass/deploy/ansible/roles/neutron-controller/templates/metadata_agent.ini deleted file mode 100644 index 6badf28..0000000 --- a/compass/deploy/ansible/roles/neutron-controller/templates/metadata_agent.ini +++ /dev/null @@ -1,46 +0,0 @@ -[DEFAULT] -# Show debugging output in log (sets DEBUG log level output) -debug = True - -# The Neutron user information for accessing the Neutron API. -auth_url = http://{{ HA_VIP }}:5000/v2.0 -auth_region = RegionOne -# Turn off verification of the certificate for ssl -# auth_insecure = False -# Certificate Authority public key (CA cert) file for ssl -# auth_ca_cert = -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} - -# Network service endpoint type to pull from the keystone catalog -# endpoint_type = adminURL - -# IP address used by Nova metadata server -nova_metadata_ip = {{ HA_VIP }} - -# TCP Port used by Nova metadata server -nova_metadata_port = 8775 - -# When proxying metadata requests, Neutron signs the Instance-ID header with a -# shared secret to prevent spoofing. You may select any string for a secret, -# but it must match here and in the configuration used by the Nova Metadata -# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret -metadata_proxy_shared_secret = {{ METADATA_SECRET }} - -# Location of Metadata Proxy UNIX domain socket -# metadata_proxy_socket = $state_path/metadata_proxy - -# Number of separate worker processes for metadata server -# metadata_workers = 0 - -# Number of backlog requests to configure the metadata server socket with -# metadata_backlog = 128 - -# URL to connect to the cache backend. -# Example of URL using memory caching backend -# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5 -# default_ttl=0 parameter will cause cache entries to never expire. -# Otherwise default_ttl specifies time in seconds a cache entry is valid for. -# No cache is used in case no value is passed. -# cache_url = diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/ml2_conf.ini b/compass/deploy/ansible/roles/neutron-controller/templates/ml2_conf.ini deleted file mode 100644 index a790069..0000000 --- a/compass/deploy/ansible/roles/neutron-controller/templates/ml2_conf.ini +++ /dev/null @@ -1,108 +0,0 @@ -[ml2] -# (ListOpt) List of network type driver entrypoints to be loaded from -# the neutron.ml2.type_drivers namespace. -# -# type_drivers = local,flat,vlan,gre,vxlan -# Example: type_drivers = flat,vlan,gre,vxlan -type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }} - -# (ListOpt) Ordered list of network_types to allocate as tenant -# networks. The default value 'local' is useful for single-box testing -# but provides no connectivity between hosts. -# -# tenant_network_types = local -# Example: tenant_network_types = vlan,gre,vxlan -tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }} - -# (ListOpt) Ordered list of networking mechanism driver entrypoints -# to be loaded from the neutron.ml2.mechanism_drivers namespace. -# mechanism_drivers = -# Example: mechanism_drivers = openvswitch,mlnx -# Example: mechanism_drivers = arista -# Example: mechanism_drivers = cisco,logger -# Example: mechanism_drivers = openvswitch,brocade -# Example: mechanism_drivers = linuxbridge,brocade -mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }} - -[ml2_type_flat] -# (ListOpt) List of physical_network names with which flat networks -# can be created. Use * to allow flat networks with arbitrary -# physical_network names. -# -flat_networks = external -# Example:flat_networks = physnet1,physnet2 -# Example:flat_networks = * - -[ml2_type_vlan] -# (ListOpt) List of [::] tuples -# specifying physical_network names usable for VLAN provider and -# tenant networks, as well as ranges of VLAN tags on each -# physical_network available for allocation as tenant networks. -# -network_vlan_ranges = -# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 - -[ml2_type_gre] -# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation -tunnel_id_ranges = 1:1000 - -[ml2_type_vxlan] -# (ListOpt) Comma-separated list of : tuples enumerating -# ranges of VXLAN VNI IDs that are available for tenant network allocation. -# -vni_ranges = 1001:4095 - -# (StrOpt) Multicast group for the VXLAN interface. When configured, will -# enable sending all broadcast traffic to this multicast group. When left -# unconfigured, will disable multicast VXLAN mode. -# -vxlan_group = 239.1.1.1 -# Example: vxlan_group = 239.1.1.1 - -[securitygroup] -# Controls if neutron security group is enabled or not. -# It should be false when you use nova security group. -# enable_security_group = True -firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver -enable_security_group = True - -[database] -connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8 - -[ovs] -local_ip = {{ internal_ip }} -{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %} -integration_bridge = br-int -tunnel_bridge = br-tun -tunnel_id_ranges = 1001:4095 -tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }} -bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }} -{% endif %} - -[agent] -root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf -tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }} -{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %} -vxlan_udp_port = 4789 -{% endif %} -l2_population = False - -[odl] -{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} -network_vlan_ranges = 1001:4095 -tunnel_id_ranges = 1001:4095 -tun_peer_patch_port = patch-int -int_peer_patch_port = patch-tun -tenant_network_type = vxlan -tunnel_bridge = br-tun -integration_bridge = br-int -controllers = 10.1.0.15:8080:admin:admin -{% endif %} - -[ml2_odl] -{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} -username = {{ odl_username }} -password = {{ odl_password }} -url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron -{% endif %} - diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/neutron-network.conf b/compass/deploy/ansible/roles/neutron-controller/templates/neutron-network.conf deleted file mode 100644 index 93be9cb..0000000 --- a/compass/deploy/ansible/roles/neutron-controller/templates/neutron-network.conf +++ /dev/null @@ -1,465 +0,0 @@ -[DEFAULT] -# Print more verbose output (set logging level to INFO instead of default WARNING level). -verbose = {{ VERBOSE }} - -# Print debugging output (set logging level to DEBUG instead of default WARNING level). -debug = {{ DEBUG }} - -# Where to store Neutron state files. This directory must be writable by the -# user executing the agent. -state_path = /var/lib/neutron - -# Where to store lock files -lock_path = $state_path/lock - -# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s -# log_date_format = %Y-%m-%d %H:%M:%S - -# use_syslog -> syslog -# log_file and log_dir -> log_dir/log_file -# (not log_file) and log_dir -> log_dir/{binary_name}.log -# use_stderr -> stderr -# (not user_stderr) and (not log_file) -> stdout -# publish_errors -> notification system - -# use_syslog = False -# syslog_log_facility = LOG_USER - -# use_stderr = True -# log_file = -log_dir = /var/log/neutron - -# publish_errors = False - -# Address to bind the API server to -bind_host = {{ network_server_host }} - -# Port the bind the API server to -bind_port = 9696 - -# Path to the extensions. Note that this can be a colon-separated list of -# paths. For example: -# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions -# The __path__ of neutron.extensions is appended to this, so if your -# extensions are in there you don't need to specify them here -# api_extensions_path = - -# (StrOpt) Neutron core plugin entrypoint to be loaded from the -# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the -# plugins included in the neutron source distribution. For compatibility with -# previous versions, the class name of a plugin can be specified instead of its -# entrypoint name. -# -#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin -core_plugin = ml2 -# Example: core_plugin = ml2 - -# (ListOpt) List of service plugin entrypoints to be loaded from the -# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of -# the plugins included in the neutron source distribution. For compatibility -# with previous versions, the class name of a plugin can be specified instead -# of its entrypoint name. -# -# service_plugins = -# Example: service_plugins = router,firewall,lbaas,vpnaas,metering -service_plugins = router - -# Paste configuration file -api_paste_config = api-paste.ini - -# The strategy to be used for auth. -# Supported values are 'keystone'(default), 'noauth'. -auth_strategy = keystone - -# Base MAC address. The first 3 octets will remain unchanged. If the -# 4h octet is not 00, it will also be used. The others will be -# randomly generated. -# 3 octet -# base_mac = fa:16:3e:00:00:00 -# 4 octet -# base_mac = fa:16:3e:4f:00:00 - -# Maximum amount of retries to generate a unique MAC address -# mac_generation_retries = 16 - -# DHCP Lease duration (in seconds) -dhcp_lease_duration = 86400 - -# Allow sending resource operation notification to DHCP agent -# dhcp_agent_notification = True - -# Enable or disable bulk create/update/delete operations -# allow_bulk = True -# Enable or disable pagination -# allow_pagination = False -# Enable or disable sorting -# allow_sorting = False -# Enable or disable overlapping IPs for subnets -# Attention: the following parameter MUST be set to False if Neutron is -# being used in conjunction with nova security groups -allow_overlapping_ips = True -# Ensure that configured gateway is on subnet -# force_gateway_on_subnet = False - - -# RPC configuration options. Defined in rpc __init__ -# The messaging module to use, defaults to kombu. -# rpc_backend = neutron.openstack.common.rpc.impl_kombu -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_password = {{ RABBIT_PASS }} - -# Size of RPC thread pool -rpc_thread_pool_size = 240 -# Size of RPC connection pool -rpc_conn_pool_size = 100 -# Seconds to wait for a response from call or multicall -rpc_response_timeout = 300 -# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. -rpc_cast_timeout = 300 -# Modules of exceptions that are permitted to be recreated -# upon receiving exception data from an rpc call. -# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception -# AMQP exchange to connect to if using RabbitMQ or QPID -# control_exchange = neutron - -# If passed, use a fake RabbitMQ provider -# fake_rabbit = False - -# Configuration options if sending notifications via kombu rpc (these are -# the defaults) -# SSL version to use (valid only if SSL enabled) -# kombu_ssl_version = -# SSL key file (valid only if SSL enabled) -# kombu_ssl_keyfile = -# SSL cert file (valid only if SSL enabled) -# kombu_ssl_certfile = -# SSL certification authority file (valid only if SSL enabled) -# kombu_ssl_ca_certs = -# Port where RabbitMQ server is running/listening -rabbit_port = 5672 -# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' -# rabbit_hosts = localhost:5672 -# User ID used for RabbitMQ connections -rabbit_userid = {{ RABBIT_USER }} -# Location of a virtual RabbitMQ installation. -# rabbit_virtual_host = / -# Maximum retries with trying to connect to RabbitMQ -# (the default of 0 implies an infinite retry count) -# rabbit_max_retries = 0 -# RabbitMQ connection retry interval -# rabbit_retry_interval = 1 -# Use HA queues in RabbitMQ (x-ha-policy: all). You need to -# wipe RabbitMQ database when changing this option. (boolean value) -# rabbit_ha_queues = false -# QPID -# rpc_backend=neutron.openstack.common.rpc.impl_qpid -# Qpid broker hostname -# qpid_hostname = localhost -# Qpid broker port -# qpid_port = 5672 -# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' -# qpid_hosts = localhost:5672 -# Username for qpid connection -# qpid_username = '' -# Password for qpid connection -# qpid_password = '' -# Space separated list of SASL mechanisms to use for auth -# qpid_sasl_mechanisms = '' -# Seconds between connection keepalive heartbeats -# qpid_heartbeat = 60 -# Transport to use, either 'tcp' or 'ssl' -# qpid_protocol = tcp -# Disable Nagle algorithm -# qpid_tcp_nodelay = True - -# ZMQ -# rpc_backend=neutron.openstack.common.rpc.impl_zmq -# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. -# The "host" option should point or resolve to this address. -# rpc_zmq_bind_address = * - -# ============ Notification System Options ===================== - -# Notifications can be sent when network/subnet/port are created, updated or deleted. -# There are three methods of sending notifications: logging (via the -# log_file directive), rpc (via a message queue) and -# noop (no notifications sent, the default) - -# Notification_driver can be defined multiple times -# Do nothing driver -# notification_driver = neutron.openstack.common.notifier.no_op_notifier -# Logging driver -# notification_driver = neutron.openstack.common.notifier.log_notifier -# RPC driver. -notification_driver = neutron.openstack.common.notifier.rpc_notifier - -# default_notification_level is used to form actual topic name(s) or to set logging level -default_notification_level = INFO - -# default_publisher_id is a part of the notification payload -# host = myhost.com -# default_publisher_id = $host - -# Defined in rpc_notifier, can be comma separated values. -# The actual topic names will be %s.%(default_notification_level)s -notification_topics = notifications - -# Default maximum number of items returned in a single response, -# value == infinite and value < 0 means no max limit, and value must -# be greater than 0. If the number of items requested is greater than -# pagination_max_limit, server will just return pagination_max_limit -# of number of items. -# pagination_max_limit = -1 - -# Maximum number of DNS nameservers per subnet -# max_dns_nameservers = 5 - -# Maximum number of host routes per subnet -# max_subnet_host_routes = 20 - -# Maximum number of fixed ips per port -# max_fixed_ips_per_port = 5 - -# =========== items for agent management extension ============= -# Seconds to regard the agent as down; should be at least twice -# report_interval, to be sure the agent is down for good -agent_down_time = 75 -# =========== end of items for agent management extension ===== - -# =========== items for agent scheduler extension ============= -# Driver to use for scheduling network to DHCP agent -network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler -# Driver to use for scheduling router to a default L3 agent -router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler -# Driver to use for scheduling a loadbalancer pool to an lbaas agent -# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler - -# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted -# networks to first DHCP agent which sends get_active_networks message to -# neutron server -# network_auto_schedule = True - -# Allow auto scheduling routers to L3 agent. It will schedule non-hosted -# routers to first L3 agent which sends sync_routers message to neutron server -# router_auto_schedule = True - -# Number of DHCP agents scheduled to host a network. This enables redundant -# DHCP agents for configured networks. -# dhcp_agents_per_network = 1 - -# =========== end of items for agent scheduler extension ===== - -# =========== WSGI parameters related to the API server ============== -# Number of separate worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as workers. The parent process manages them. -api_workers = 8 - -# Number of separate RPC worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as RPC workers. The parent process manages them. -# This feature is experimental until issues are addressed and testing has been -# enabled for various plugins for compatibility. -rpc_workers = 8 - -# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when -# starting API server. Not supported on OS X. -# tcp_keepidle = 600 - -# Number of seconds to keep retrying to listen -# retry_until_window = 30 - -# Number of backlog requests to configure the socket with. -# backlog = 4096 - -# Max header line to accommodate large tokens -# max_header_line = 16384 - -# Enable SSL on the API server -# use_ssl = False - -# Certificate file to use when starting API server securely -# ssl_cert_file = /path/to/certfile - -# Private key file to use when starting API server securely -# ssl_key_file = /path/to/keyfile - -# CA certificate file to use when starting API server securely to -# verify connecting clients. This is an optional parameter only required if -# API clients need to authenticate to the API server using SSL certificates -# signed by a trusted CA -# ssl_ca_file = /path/to/cafile -# ======== end of WSGI parameters related to the API server ========== - - -# ======== neutron nova interactions ========== -# Send notification to nova when port status is active. -notify_nova_on_port_status_changes = True - -# Send notifications to nova when port data (fixed_ips/floatingips) change -# so nova can update it's cache. -notify_nova_on_port_data_changes = True - -# URL for connection to nova (Only supports one nova region currently). -nova_url = http://{{ HA_VIP }}:8774/v2 - -# Name of nova region to use. Useful if keystone manages more than one region -nova_region_name = RegionOne - -# Username for connection to nova in admin context -nova_admin_username = nova - -# The uuid of the admin nova tenant - -# Password for connection to nova in admin context. -nova_admin_password = {{ NOVA_PASS }} - -# Authorization URL for connection to nova in admin context. -nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 - -# Number of seconds between sending events to nova if there are any events to send -send_events_interval = 2 - -# ======== end of neutron nova interactions ========== - -[quotas] -# Default driver to use for quota checks -quota_driver = neutron.db.quota_db.DbQuotaDriver - -# Resource name(s) that are supported in quota features -quota_items = network,subnet,port - -# Default number of resource allowed per tenant. A negative value means -# unlimited. -default_quota = -1 - -# Number of networks allowed per tenant. A negative value means unlimited. -quota_network = 100 - -# Number of subnets allowed per tenant. A negative value means unlimited. -quota_subnet = 100 - -# Number of ports allowed per tenant. A negative value means unlimited. -quota_port = 8000 - -# Number of security groups allowed per tenant. A negative value means -# unlimited. -quota_security_group = 1000 - -# Number of security group rules allowed per tenant. A negative value means -# unlimited. -quota_security_group_rule = 1000 - -# Number of vips allowed per tenant. A negative value means unlimited. -# quota_vip = 10 - -# Number of pools allowed per tenant. A negative value means unlimited. -# quota_pool = 10 - -# Number of pool members allowed per tenant. A negative value means unlimited. -# The default is unlimited because a member is not a real resource consumer -# on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_member = -1 - -# Number of health monitors allowed per tenant. A negative value means -# unlimited. -# The default is unlimited because a health monitor is not a real resource -# consumer on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_health_monitors = -1 - -# Number of routers allowed per tenant. A negative value means unlimited. -# quota_router = 10 - -# Number of floating IPs allowed per tenant. A negative value means unlimited. -# quota_floatingip = 50 - -[agent] -# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real -# root filter facility. -# Change to "sudo" to skip the filtering and just run the comand directly -root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" - -# =========== items for agent management extension ============= -# seconds between nodes reporting state to server; should be less than -# agent_down_time, best if it is half or less than agent_down_time -report_interval = 30 - -# =========== end of items for agent management extension ===== - -[keystone_authtoken] -auth_uri = http://{{ HA_VIP }}:5000/v2.0 -identity_uri = http://{{ HA_VIP }}:35357 -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} -signing_dir = $state_path/keystone-signing - -[database] -# This line MUST be changed to actually run the plugin. -# Example: -# connection = mysql://root:pass@127.0.0.1:3306/neutron -# Replace 127.0.0.1 above with the IP address of the database used by the -# main neutron server. (Leave it as is if the database runs on this host.) -# connection = sqlite:////var/lib/neutron/neutron.sqlite -#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron - -# The SQLAlchemy connection string used to connect to the slave database -slave_connection = - -# Database reconnection retry times - in event connectivity is lost -# set to -1 implies an infinite retry count -max_retries = 10 - -# Database reconnection interval in seconds - if the initial connection to the -# database fails -retry_interval = 10 - -# Minimum number of SQL connections to keep open in a pool -min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -max_pool_size = 100 - -# Timeout in seconds before idle sql connections are reaped -idle_timeout = 3600 - -# If set, use this value for max_overflow with sqlalchemy -max_overflow = 100 - -# Verbosity of SQL debugging information. 0=None, 100=Everything -connection_debug = 0 - -# Add python stack traces to SQL as comment strings -connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -pool_timeout = 10 - -[service_providers] -# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. -# Must be in form: -# service_provider=::[:default] -# List of allowed service types includes LOADBALANCER, FIREWALL, VPN -# Combination of and must be unique; must also be unique -# This is multiline option, example for default provider: -# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default -# example of non-default provider: -# service_provider=FIREWALL:name2:firewall_driver_path -# --- Reference implementations --- -service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default -service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default -# In order to activate Radware's lbaas driver you need to uncomment the next line. -# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. -# Otherwise comment the HA Proxy line -# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default -# uncomment the following line to make the 'netscaler' LBaaS provider available. -# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver -# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. -# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default -# Uncomment the line below to use Embrane heleos as Load Balancer service provider. -# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/neutron.conf b/compass/deploy/ansible/roles/neutron-controller/templates/neutron.conf deleted file mode 100644 index 2a66e94..0000000 --- a/compass/deploy/ansible/roles/neutron-controller/templates/neutron.conf +++ /dev/null @@ -1,466 +0,0 @@ -[DEFAULT] -# Print more verbose output (set logging level to INFO instead of default WARNING level). -verbose = {{ VERBOSE }} - -# Print debugging output (set logging level to DEBUG instead of default WARNING level). -debug = {{ VERBOSE }} - -# Where to store Neutron state files. This directory must be writable by the -# user executing the agent. -state_path = /var/lib/neutron - -# Where to store lock files -lock_path = $state_path/lock - -# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s -# log_date_format = %Y-%m-%d %H:%M:%S - -# use_syslog -> syslog -# log_file and log_dir -> log_dir/log_file -# (not log_file) and log_dir -> log_dir/{binary_name}.log -# use_stderr -> stderr -# (not user_stderr) and (not log_file) -> stdout -# publish_errors -> notification system - -# use_syslog = False -# syslog_log_facility = LOG_USER - -# use_stderr = True -# log_file = -log_dir = /var/log/neutron - -# publish_errors = False - -# Address to bind the API server to -bind_host = {{ network_server_host }} - -# Port the bind the API server to -bind_port = 9696 - -# Path to the extensions. Note that this can be a colon-separated list of -# paths. For example: -# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions -# The __path__ of neutron.extensions is appended to this, so if your -# extensions are in there you don't need to specify them here -# api_extensions_path = - -# (StrOpt) Neutron core plugin entrypoint to be loaded from the -# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the -# plugins included in the neutron source distribution. For compatibility with -# previous versions, the class name of a plugin can be specified instead of its -# entrypoint name. -# -#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin -core_plugin = ml2 -# Example: core_plugin = ml2 - -# (ListOpt) List of service plugin entrypoints to be loaded from the -# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of -# the plugins included in the neutron source distribution. For compatibility -# with previous versions, the class name of a plugin can be specified instead -# of its entrypoint name. -# -# service_plugins = -# Example: service_plugins = router,firewall,lbaas,vpnaas,metering -service_plugins = router - -# Paste configuration file -api_paste_config = api-paste.ini - -# The strategy to be used for auth. -# Supported values are 'keystone'(default), 'noauth'. -auth_strategy = keystone - -# Base MAC address. The first 3 octets will remain unchanged. If the -# 4h octet is not 00, it will also be used. The others will be -# randomly generated. -# 3 octet -# base_mac = fa:16:3e:00:00:00 -# 4 octet -# base_mac = fa:16:3e:4f:00:00 - -# Maximum amount of retries to generate a unique MAC address -# mac_generation_retries = 16 - -# DHCP Lease duration (in seconds) -dhcp_lease_duration = 86400 - -# Allow sending resource operation notification to DHCP agent -# dhcp_agent_notification = True - -# Enable or disable bulk create/update/delete operations -# allow_bulk = True -# Enable or disable pagination -# allow_pagination = False -# Enable or disable sorting -# allow_sorting = False -# Enable or disable overlapping IPs for subnets -# Attention: the following parameter MUST be set to False if Neutron is -# being used in conjunction with nova security groups -allow_overlapping_ips = True -# Ensure that configured gateway is on subnet -# force_gateway_on_subnet = False - - -# RPC configuration options. Defined in rpc __init__ -# The messaging module to use, defaults to kombu. -# rpc_backend = neutron.openstack.common.rpc.impl_kombu -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_password = {{ RABBIT_PASS }} - -# Size of RPC thread pool -rpc_thread_pool_size = 240 -# Size of RPC connection pool -rpc_conn_pool_size = 100 -# Seconds to wait for a response from call or multicall -rpc_response_timeout = 300 -# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. -rpc_cast_timeout = 300 -# Modules of exceptions that are permitted to be recreated -# upon receiving exception data from an rpc call. -# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception -# AMQP exchange to connect to if using RabbitMQ or QPID -# control_exchange = neutron - -# If passed, use a fake RabbitMQ provider -# fake_rabbit = False - -# Configuration options if sending notifications via kombu rpc (these are -# the defaults) -# SSL version to use (valid only if SSL enabled) -# kombu_ssl_version = -# SSL key file (valid only if SSL enabled) -# kombu_ssl_keyfile = -# SSL cert file (valid only if SSL enabled) -# kombu_ssl_certfile = -# SSL certification authority file (valid only if SSL enabled) -# kombu_ssl_ca_certs = -# Port where RabbitMQ server is running/listening -rabbit_port = 5672 -# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' -# rabbit_hosts = localhost:5672 -# User ID used for RabbitMQ connections -rabbit_userid = {{ RABBIT_USER }} -# Location of a virtual RabbitMQ installation. -# rabbit_virtual_host = / -# Maximum retries with trying to connect to RabbitMQ -# (the default of 0 implies an infinite retry count) -# rabbit_max_retries = 0 -# RabbitMQ connection retry interval -# rabbit_retry_interval = 1 -# Use HA queues in RabbitMQ (x-ha-policy: all). You need to -# wipe RabbitMQ database when changing this option. (boolean value) -# rabbit_ha_queues = false -# QPID -# rpc_backend=neutron.openstack.common.rpc.impl_qpid -# Qpid broker hostname -# qpid_hostname = localhost -# Qpid broker port -# qpid_port = 5672 -# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' -# qpid_hosts = localhost:5672 -# Username for qpid connection -# qpid_username = '' -# Password for qpid connection -# qpid_password = '' -# Space separated list of SASL mechanisms to use for auth -# qpid_sasl_mechanisms = '' -# Seconds between connection keepalive heartbeats -# qpid_heartbeat = 60 -# Transport to use, either 'tcp' or 'ssl' -# qpid_protocol = tcp -# Disable Nagle algorithm -# qpid_tcp_nodelay = True - -# ZMQ -# rpc_backend=neutron.openstack.common.rpc.impl_zmq -# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. -# The "host" option should point or resolve to this address. -# rpc_zmq_bind_address = * - -# ============ Notification System Options ===================== - -# Notifications can be sent when network/subnet/port are created, updated or deleted. -# There are three methods of sending notifications: logging (via the -# log_file directive), rpc (via a message queue) and -# noop (no notifications sent, the default) - -# Notification_driver can be defined multiple times -# Do nothing driver -# notification_driver = neutron.openstack.common.notifier.no_op_notifier -# Logging driver -# notification_driver = neutron.openstack.common.notifier.log_notifier -# RPC driver. -notification_driver = neutron.openstack.common.notifier.rpc_notifier - -# default_notification_level is used to form actual topic name(s) or to set logging level -default_notification_level = INFO - -# default_publisher_id is a part of the notification payload -# host = myhost.com -# default_publisher_id = $host - -# Defined in rpc_notifier, can be comma separated values. -# The actual topic names will be %s.%(default_notification_level)s -notification_topics = notifications - -# Default maximum number of items returned in a single response, -# value == infinite and value < 0 means no max limit, and value must -# be greater than 0. If the number of items requested is greater than -# pagination_max_limit, server will just return pagination_max_limit -# of number of items. -# pagination_max_limit = -1 - -# Maximum number of DNS nameservers per subnet -# max_dns_nameservers = 5 - -# Maximum number of host routes per subnet -# max_subnet_host_routes = 20 - -# Maximum number of fixed ips per port -# max_fixed_ips_per_port = 5 - -# =========== items for agent management extension ============= -# Seconds to regard the agent as down; should be at least twice -# report_interval, to be sure the agent is down for good -agent_down_time = 75 -# =========== end of items for agent management extension ===== - -# =========== items for agent scheduler extension ============= -# Driver to use for scheduling network to DHCP agent -network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler -# Driver to use for scheduling router to a default L3 agent -router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler -# Driver to use for scheduling a loadbalancer pool to an lbaas agent -# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler - -# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted -# networks to first DHCP agent which sends get_active_networks message to -# neutron server -# network_auto_schedule = True - -# Allow auto scheduling routers to L3 agent. It will schedule non-hosted -# routers to first L3 agent which sends sync_routers message to neutron server -# router_auto_schedule = True - -# Number of DHCP agents scheduled to host a network. This enables redundant -# DHCP agents for configured networks. -# dhcp_agents_per_network = 1 - -# =========== end of items for agent scheduler extension ===== - -# =========== WSGI parameters related to the API server ============== -# Number of separate worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as workers. The parent process manages them. -api_workers = 8 - -# Number of separate RPC worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as RPC workers. The parent process manages them. -# This feature is experimental until issues are addressed and testing has been -# enabled for various plugins for compatibility. -rpc_workers = 8 - -# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when -# starting API server. Not supported on OS X. -# tcp_keepidle = 600 - -# Number of seconds to keep retrying to listen -# retry_until_window = 30 - -# Number of backlog requests to configure the socket with. -# backlog = 4096 - -# Max header line to accommodate large tokens -# max_header_line = 16384 - -# Enable SSL on the API server -# use_ssl = False - -# Certificate file to use when starting API server securely -# ssl_cert_file = /path/to/certfile - -# Private key file to use when starting API server securely -# ssl_key_file = /path/to/keyfile - -# CA certificate file to use when starting API server securely to -# verify connecting clients. This is an optional parameter only required if -# API clients need to authenticate to the API server using SSL certificates -# signed by a trusted CA -# ssl_ca_file = /path/to/cafile -# ======== end of WSGI parameters related to the API server ========== - - -# ======== neutron nova interactions ========== -# Send notification to nova when port status is active. -notify_nova_on_port_status_changes = True - -# Send notifications to nova when port data (fixed_ips/floatingips) change -# so nova can update it's cache. -notify_nova_on_port_data_changes = True - -# URL for connection to nova (Only supports one nova region currently). -nova_url = http://{{ HA_VIP }}:8774/v2 - -# Name of nova region to use. Useful if keystone manages more than one region -nova_region_name = RegionOne - -# Username for connection to nova in admin context -nova_admin_username = nova - -# The uuid of the admin nova tenant -nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }} - -# Password for connection to nova in admin context. -nova_admin_password = {{ NOVA_PASS }} - -# Authorization URL for connection to nova in admin context. -nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 - -# Number of seconds between sending events to nova if there are any events to send -send_events_interval = 2 - -# ======== end of neutron nova interactions ========== - -[quotas] -# Default driver to use for quota checks -quota_driver = neutron.db.quota_db.DbQuotaDriver - -# Resource name(s) that are supported in quota features -quota_items = network,subnet,port - -# Default number of resource allowed per tenant. A negative value means -# unlimited. -default_quota = -1 - -# Number of networks allowed per tenant. A negative value means unlimited. -quota_network = 100 - -# Number of subnets allowed per tenant. A negative value means unlimited. -quota_subnet = 100 - -# Number of ports allowed per tenant. A negative value means unlimited. -quota_port = 8000 - -# Number of security groups allowed per tenant. A negative value means -# unlimited. -quota_security_group = 1000 - -# Number of security group rules allowed per tenant. A negative value means -# unlimited. -quota_security_group_rule = 1000 - -# Number of vips allowed per tenant. A negative value means unlimited. -# quota_vip = 10 - -# Number of pools allowed per tenant. A negative value means unlimited. -# quota_pool = 10 - -# Number of pool members allowed per tenant. A negative value means unlimited. -# The default is unlimited because a member is not a real resource consumer -# on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_member = -1 - -# Number of health monitors allowed per tenant. A negative value means -# unlimited. -# The default is unlimited because a health monitor is not a real resource -# consumer on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_health_monitors = -1 - -# Number of routers allowed per tenant. A negative value means unlimited. -# quota_router = 10 - -# Number of floating IPs allowed per tenant. A negative value means unlimited. -# quota_floatingip = 50 - -[agent] -# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real -# root filter facility. -# Change to "sudo" to skip the filtering and just run the comand directly -root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" - -# =========== items for agent management extension ============= -# seconds between nodes reporting state to server; should be less than -# agent_down_time, best if it is half or less than agent_down_time -report_interval = 30 - -# =========== end of items for agent management extension ===== - -[keystone_authtoken] -auth_uri = http://{{ HA_VIP }}:5000/v2.0 -identity_uri = http://{{ HA_VIP }}:35357 -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} -signing_dir = $state_path/keystone-signing - -[database] -# This line MUST be changed to actually run the plugin. -# Example: -# connection = mysql://root:pass@127.0.0.1:3306/neutron -# Replace 127.0.0.1 above with the IP address of the database used by the -# main neutron server. (Leave it as is if the database runs on this host.) -# connection = sqlite:////var/lib/neutron/neutron.sqlite -connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron - -# The SQLAlchemy connection string used to connect to the slave database -slave_connection = - -# Database reconnection retry times - in event connectivity is lost -# set to -1 implies an infinite retry count -max_retries = 10 - -# Database reconnection interval in seconds - if the initial connection to the -# database fails -retry_interval = 10 - -# Minimum number of SQL connections to keep open in a pool -min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -max_pool_size = 100 - -# Timeout in seconds before idle sql connections are reaped -idle_timeout = 3600 - -# If set, use this value for max_overflow with sqlalchemy -max_overflow = 100 - -# Verbosity of SQL debugging information. 0=None, 100=Everything -connection_debug = 0 - -# Add python stack traces to SQL as comment strings -connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -pool_timeout = 10 - -[service_providers] -# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. -# Must be in form: -# service_provider=::[:default] -# List of allowed service types includes LOADBALANCER, FIREWALL, VPN -# Combination of and must be unique; must also be unique -# This is multiline option, example for default provider: -# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default -# example of non-default provider: -# service_provider=FIREWALL:name2:firewall_driver_path -# --- Reference implementations --- -service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default -service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default -# In order to activate Radware's lbaas driver you need to uncomment the next line. -# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. -# Otherwise comment the HA Proxy line -# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default -# uncomment the following line to make the 'netscaler' LBaaS provider available. -# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver -# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. -# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default -# Uncomment the line below to use Embrane heleos as Load Balancer service provider. -# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/neutron_init.sh b/compass/deploy/ansible/roles/neutron-controller/templates/neutron_init.sh deleted file mode 100644 index b92e202..0000000 --- a/compass/deploy/ansible/roles/neutron-controller/templates/neutron_init.sh +++ /dev/null @@ -1,4 +0,0 @@ -# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True - -# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}} - diff --git a/compass/deploy/ansible/roles/neutron-controller/templates/nova.conf b/compass/deploy/ansible/roles/neutron-controller/templates/nova.conf deleted file mode 100644 index 9587073..0000000 --- a/compass/deploy/ansible/roles/neutron-controller/templates/nova.conf +++ /dev/null @@ -1,69 +0,0 @@ -[DEFAULT] -dhcpbridge_flagfile=/etc/nova/nova.conf -dhcpbridge=/usr/bin/nova-dhcpbridge -logdir=/var/log/nova -state_path=/var/lib/nova -lock_path=/var/lock/nova -force_dhcp_release=True -iscsi_helper=tgtadm -libvirt_use_virtio_for_bridges=True -connection_type=libvirt -root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf -verbose={{ VERBOSE}} -debug={{ DEBUG }} -ec2_private_dns_show_ip=True -api_paste_config=/etc/nova/api-paste.ini -volumes_path=/var/lib/nova/volumes -enabled_apis=ec2,osapi_compute,metadata - -vif_plugging_is_fatal: false -vif_plugging_timeout: 0 - -auth_strategy = keystone - -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_userid = {{ RABBIT_USER }} -rabbit_password = {{ RABBIT_PASS }} - -my_ip = {{ internal_ip }} -vnc_enabled = True -vncserver_listen = {{ internal_ip }} -vncserver_proxyclient_address = {{ internal_ip }} -novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html - -novncproxy_host = {{ internal_ip }} -novncproxy_port = 6080 - -network_api_class = nova.network.neutronv2.api.API -linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver -firewall_driver = nova.virt.firewall.NoopFirewallDriver -security_group_api = neutron - -instance_usage_audit = True -instance_usage_audit_period = hour -notify_on_state_change = vm_and_task_state -notification_driver = nova.openstack.common.notifier.rpc_notifier -notification_driver = ceilometer.compute.nova_notifier - -[database] -# The SQLAlchemy connection string used to connect to the database -connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova - -[keystone_authtoken] -auth_uri = http://{{ HA_VIP }}:5000/2.0 -identity_uri = http://{{ HA_VIP }}:35357 -admin_tenant_name = service -admin_user = nova -admin_password = {{ NOVA_PASS }} - -[glance] -host = {{ HA_VIP }} - -[neutron] -url = http://{{ HA_VIP }}:9696 -auth_strategy = keystone -admin_tenant_name = service -admin_username = neutron -admin_password = {{ NEUTRON_PASS }} -admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 diff --git a/compass/deploy/ansible/roles/neutron-network/handlers/main.yml b/compass/deploy/ansible/roles/neutron-network/handlers/main.yml deleted file mode 100644 index d6c5cc8..0000000 --- a/compass/deploy/ansible/roles/neutron-network/handlers/main.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: restart neutron-plugin-openvswitch-agent - service: name=neutron-plugin-openvswitch-agent state=restarted enabled=yes - when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}" - -- name: restart neutron-l3-agent - service: name=neutron-l3-agent state=restarted enabled=yes - -- name: kill dnsmasq - command: killall dnsmasq - ignore_errors: True - -- name: restart neutron-dhcp-agent - service: name=neutron-dhcp-agent state=restarted enabled=yes - -- name: restart neutron-metadata-agent - service: name=neutron-metadata-agent state=restarted enabled=yes - -- name: restart xorp - service: name=xorp state=restarted enabled=yes sleep=10 - ignore_errors: True diff --git a/compass/deploy/ansible/roles/neutron-network/tasks/igmp-router.yml b/compass/deploy/ansible/roles/neutron-network/tasks/igmp-router.yml deleted file mode 100644 index d6f38a0..0000000 --- a/compass/deploy/ansible/roles/neutron-network/tasks/igmp-router.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- name: Install XORP to provide IGMP router functionality - apt: pkg=xorp - -- name: configure xorp - template: src=etc/xorp/config.boot dest=/etc/xorp/config.boot - notify: - - restart xorp - -- name: set xorp defaults - lineinfile: dest=/etc/default/xorp regexp=^RUN= line=RUN=yes - notify: - - restart xorp - -- meta: flush_handlers - -- name: start and enable xorp service - service: name=xorp state=started enabled=yes - retries: 2 - delay: 10 diff --git a/compass/deploy/ansible/roles/neutron-network/tasks/main.yml b/compass/deploy/ansible/roles/neutron-network/tasks/main.yml deleted file mode 100644 index 1d4b591..0000000 --- a/compass/deploy/ansible/roles/neutron-network/tasks/main.yml +++ /dev/null @@ -1,114 +0,0 @@ ---- -- name: activate ipv4 forwarding - sysctl: name=net.ipv4.ip_forward value=1 - state=present reload=yes - -- name: deactivate ipv4 rp filter - sysctl: name=net.ipv4.conf.all.rp_filter value=0 - state=present reload=yes - -- name: deactivate ipv4 default rp filter - sysctl: name=net.ipv4.conf.default.rp_filter - value=0 state=present reload=yes - -- name: install neutron network related packages - apt: name={{ item }} state=present force=yes - with_items: - - neutron-plugin-ml2 - - openvswitch-datapath-dkms - - openvswitch-switch - - neutron-l3-agent - - neutron-dhcp-agent - -- name: generate neutron service list - shell: echo {{ item }} >> /opt/service - with_items: - - openvswitch-switch - - neutron-l3-agent - - neutron-dhcp-agent - - neutron-plugin-openvswitch-agent - - neutron-metadata-agent - - xorp - -- name: install neutron openvswitch agent - apt: name=neutron-plugin-openvswitch-agent - state=present force=yes - when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}" - -- name: config neutron - template: src=neutron-network.conf - dest=/etc/neutron/neutron.conf backup=yes - notify: - - restart neutron-plugin-openvswitch-agent - - restart neutron-l3-agent - - kill dnsmasq - - restart neutron-dhcp-agent - - restart neutron-metadata-agent - -- name: config l3 agent - template: src=l3_agent.ini dest=/etc/neutron/l3_agent.ini - backup=yes - notify: - - restart neutron-l3-agent - -- name: config dhcp agent - template: src=dhcp_agent.ini dest=/etc/neutron/dhcp_agent.ini - backup=yes - notify: - - kill dnsmasq - - restart neutron-dhcp-agent - -- name: update dnsmasq-neutron.conf - template: src=dnsmasq-neutron.conf - dest=/etc/neutron/dnsmasq-neutron.conf - notify: - - kill dnsmasq - - restart neutron-dhcp-agent - -- name: config metadata agent - template: src=metadata_agent.ini - dest=/etc/neutron/metadata_agent.ini backup=yes - notify: - - restart neutron-metadata-agent - -- name: config ml2 plugin - template: src=ml2_conf.ini - dest=/etc/neutron/plugins/ml2/ml2_conf.ini - backup=yes - notify: - - restart neutron-plugin-openvswitch-agent - -- meta: flush_handlers - -- name: add br-int - openvswitch_bridge: bridge=br-int state=present - -- name: add br-ex - openvswitch_bridge: bridge=br-ex state=present - when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}" - -- name: assign a port to br-ex for physical ext interface - openvswitch_port: bridge=br-ex port={{ INTERFACE_NAME }} - state=present - when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}" - -- include: igmp-router.yml - when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}" - -- name: assert kernel support for vxlan - command: modinfo -F version vxlan - when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}" - -- name: assert iproute2 suppport for vxlan - command: ip link add type vxlan help - register: iproute_out - failed_when: iproute_out.rc == 255 - when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}" - -- include: odl.yml - when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}" - -- name: restart ovs service - service: name=openvswitch-switch state=restarted enabled=yes - -- meta: flush_handlers diff --git a/compass/deploy/ansible/roles/neutron-network/tasks/odl.yml b/compass/deploy/ansible/roles/neutron-network/tasks/odl.yml deleted file mode 100644 index a2b449c..0000000 --- a/compass/deploy/ansible/roles/neutron-network/tasks/odl.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: ovs set manager - command: ovs-vsctl set-manager tcp:{{ controller }}:6640 - -- name: get ovs uuid - shell: ovs-vsctl get Open_vSwitch . _uuid - register: ovs_uuid - -- name: set bridge_mappings - command: ovs-vsctl set Open_vSwitch {{ ovs_uuid.stdout }} other_config:bridge_mappings=physnet1:{{ INTERFACE_NAME }} - -- name: set local ip - command: ovs-vsctl set Open_vSwitch {{ ovs_uuid.stdout }} other_config:local_ip={{ internal_ip }} diff --git a/compass/deploy/ansible/roles/neutron-network/templates/dhcp_agent.ini b/compass/deploy/ansible/roles/neutron-network/templates/dhcp_agent.ini deleted file mode 100644 index 19eb62e..0000000 --- a/compass/deploy/ansible/roles/neutron-network/templates/dhcp_agent.ini +++ /dev/null @@ -1,90 +0,0 @@ -[DEFAULT] -# Show debugging output in log (sets DEBUG log level output) -# debug = False -verbose = True - -# The DHCP agent will resync its state with Neutron to recover from any -# transient notification or rpc errors. The interval is number of -# seconds between attempts. -resync_interval = 5 - -# The DHCP agent requires an interface driver be set. Choose the one that best -# matches your plugin. -# interface_driver = - -# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP, -# BigSwitch/Floodlight) -interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver - -# Name of Open vSwitch bridge to use -# ovs_integration_bridge = br-int - -# Use veth for an OVS interface or not. -# Support kernels with limited namespace support -# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. -ovs_use_veth = False - -# Example of interface_driver option for LinuxBridge -# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver - -# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires -# no additional setup of the DHCP server. -dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq - -# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and -# iproute2 package that supports namespaces). -use_namespaces = True - -# The DHCP server can assist with providing metadata support on isolated -# networks. Setting this value to True will cause the DHCP server to append -# specific host routes to the DHCP request. The metadata service will only -# be activated when the subnet does not contain any router port. The guest -# instance must be configured to request host routes via DHCP (Option 121). -enable_isolated_metadata = False - -# Allows for serving metadata requests coming from a dedicated metadata -# access network whose cidr is 169.254.169.254/16 (or larger prefix), and -# is connected to a Neutron router from which the VMs send metadata -# request. In this case DHCP Option 121 will not be injected in VMs, as -# they will be able to reach 169.254.169.254 through a router. -# This option requires enable_isolated_metadata = True -enable_metadata_network = False - -# Number of threads to use during sync process. Should not exceed connection -# pool size configured on server. -# num_sync_threads = 4 - -# Location to store DHCP server config files -# dhcp_confs = $state_path/dhcp - -# Domain to use for building the hostnames -dhcp_domain = openstacklocal - -# Override the default dnsmasq settings with this file -# dnsmasq_config_file = -dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf - -# Comma-separated list of DNS servers which will be used by dnsmasq -# as forwarders. -# dnsmasq_dns_servers = - -# Limit number of leases to prevent a denial-of-service. -dnsmasq_lease_max = 16777216 - -# Location to DHCP lease relay UNIX domain socket -# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay - -# Location of Metadata Proxy UNIX domain socket -# metadata_proxy_socket = $state_path/metadata_proxy - -# dhcp_delete_namespaces, which is false by default, can be set to True if -# namespaces can be deleted cleanly on the host running the dhcp agent. -# Do not enable this until you understand the problem with the Linux iproute -# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and -# you are sure that your version of iproute does not suffer from the problem. -# If True, namespaces will be deleted when a dhcp server is disabled. -# dhcp_delete_namespaces = False - -# Timeout for ovs-vsctl commands. -# If the timeout expires, ovs commands will fail with ALARMCLOCK error. -# ovs_vsctl_timeout = 10 diff --git a/compass/deploy/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf deleted file mode 100644 index 7bcbd9d..0000000 --- a/compass/deploy/ansible/roles/neutron-network/templates/dnsmasq-neutron.conf +++ /dev/null @@ -1,2 +0,0 @@ -dhcp-option-force=26,1454 - diff --git a/compass/deploy/ansible/roles/neutron-network/templates/etc/xorp/config.boot b/compass/deploy/ansible/roles/neutron-network/templates/etc/xorp/config.boot deleted file mode 100644 index 32caf96..0000000 --- a/compass/deploy/ansible/roles/neutron-network/templates/etc/xorp/config.boot +++ /dev/null @@ -1,25 +0,0 @@ -interfaces { - restore-original-config-on-shutdown: false - interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { - description: "Internal pNodes interface" - disable: false - default-system-config - } -} - -protocols { - igmp { - disable: false - interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { - vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { - disable: false - version: 3 - } - } - traceoptions { - flag all { - disable: false - } - } - } -} diff --git a/compass/deploy/ansible/roles/neutron-network/templates/l3_agent.ini b/compass/deploy/ansible/roles/neutron-network/templates/l3_agent.ini deleted file mode 100644 index b394c00..0000000 --- a/compass/deploy/ansible/roles/neutron-network/templates/l3_agent.ini +++ /dev/null @@ -1,81 +0,0 @@ -[DEFAULT] -# Show debugging output in log (sets DEBUG log level output) -# debug = False -verbose = True - -# L3 requires that an interface driver be set. Choose the one that best -# matches your plugin. -# interface_driver = - -# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC) -# that supports L3 agent -# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver -interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver - -# Use veth for an OVS interface or not. -# Support kernels with limited namespace support -# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. -# ovs_use_veth = False - -# Example of interface_driver option for LinuxBridge -# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver - -# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and -# iproute2 package that supports namespaces). -use_namespaces = True - -# If use_namespaces is set as False then the agent can only configure one router. - -# This is done by setting the specific router_id. -# router_id = - -# When external_network_bridge is set, each L3 agent can be associated -# with no more than one external network. This value should be set to the UUID -# of that external network. To allow L3 agent support multiple external -# networks, both the external_network_bridge and gateway_external_network_id -# must be left empty. -# gateway_external_network_id = - -# Indicates that this L3 agent should also handle routers that do not have -# an external network gateway configured. This option should be True only -# for a single agent in a Neutron deployment, and may be False for all agents -# if all routers must have an external network gateway -handle_internal_only_routers = True - -# Name of bridge used for external network traffic. This should be set to -# empty value for the linux bridge. when this parameter is set, each L3 agent -# can be associated with no more than one external network. -external_network_bridge = br-ex - -# TCP Port used by Neutron metadata server -metadata_port = 9697 - -# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 -# to disable this feature. -send_arp_for_ha = 3 - -# seconds between re-sync routers' data if needed -periodic_interval = 40 - -# seconds to start to sync routers' data after -# starting agent -periodic_fuzzy_delay = 5 - -# enable_metadata_proxy, which is true by default, can be set to False -# if the Nova metadata server is not available -# enable_metadata_proxy = True - -# Location of Metadata Proxy UNIX domain socket -# metadata_proxy_socket = $state_path/metadata_proxy - -# router_delete_namespaces, which is false by default, can be set to True if -# namespaces can be deleted cleanly on the host running the L3 agent. -# Do not enable this until you understand the problem with the Linux iproute -# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and -# you are sure that your version of iproute does not suffer from the problem. -# If True, namespaces will be deleted when a router is destroyed. -# router_delete_namespaces = False - -# Timeout for ovs-vsctl commands. -# If the timeout expires, ovs commands will fail with ALARMCLOCK error. -# ovs_vsctl_timeout = 10 diff --git a/compass/deploy/ansible/roles/neutron-network/templates/metadata_agent.ini b/compass/deploy/ansible/roles/neutron-network/templates/metadata_agent.ini deleted file mode 100644 index 6badf28..0000000 --- a/compass/deploy/ansible/roles/neutron-network/templates/metadata_agent.ini +++ /dev/null @@ -1,46 +0,0 @@ -[DEFAULT] -# Show debugging output in log (sets DEBUG log level output) -debug = True - -# The Neutron user information for accessing the Neutron API. -auth_url = http://{{ HA_VIP }}:5000/v2.0 -auth_region = RegionOne -# Turn off verification of the certificate for ssl -# auth_insecure = False -# Certificate Authority public key (CA cert) file for ssl -# auth_ca_cert = -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} - -# Network service endpoint type to pull from the keystone catalog -# endpoint_type = adminURL - -# IP address used by Nova metadata server -nova_metadata_ip = {{ HA_VIP }} - -# TCP Port used by Nova metadata server -nova_metadata_port = 8775 - -# When proxying metadata requests, Neutron signs the Instance-ID header with a -# shared secret to prevent spoofing. You may select any string for a secret, -# but it must match here and in the configuration used by the Nova Metadata -# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret -metadata_proxy_shared_secret = {{ METADATA_SECRET }} - -# Location of Metadata Proxy UNIX domain socket -# metadata_proxy_socket = $state_path/metadata_proxy - -# Number of separate worker processes for metadata server -# metadata_workers = 0 - -# Number of backlog requests to configure the metadata server socket with -# metadata_backlog = 128 - -# URL to connect to the cache backend. -# Example of URL using memory caching backend -# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5 -# default_ttl=0 parameter will cause cache entries to never expire. -# Otherwise default_ttl specifies time in seconds a cache entry is valid for. -# No cache is used in case no value is passed. -# cache_url = diff --git a/compass/deploy/ansible/roles/neutron-network/templates/ml2_conf.ini b/compass/deploy/ansible/roles/neutron-network/templates/ml2_conf.ini deleted file mode 100644 index a790069..0000000 --- a/compass/deploy/ansible/roles/neutron-network/templates/ml2_conf.ini +++ /dev/null @@ -1,108 +0,0 @@ -[ml2] -# (ListOpt) List of network type driver entrypoints to be loaded from -# the neutron.ml2.type_drivers namespace. -# -# type_drivers = local,flat,vlan,gre,vxlan -# Example: type_drivers = flat,vlan,gre,vxlan -type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }} - -# (ListOpt) Ordered list of network_types to allocate as tenant -# networks. The default value 'local' is useful for single-box testing -# but provides no connectivity between hosts. -# -# tenant_network_types = local -# Example: tenant_network_types = vlan,gre,vxlan -tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }} - -# (ListOpt) Ordered list of networking mechanism driver entrypoints -# to be loaded from the neutron.ml2.mechanism_drivers namespace. -# mechanism_drivers = -# Example: mechanism_drivers = openvswitch,mlnx -# Example: mechanism_drivers = arista -# Example: mechanism_drivers = cisco,logger -# Example: mechanism_drivers = openvswitch,brocade -# Example: mechanism_drivers = linuxbridge,brocade -mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }} - -[ml2_type_flat] -# (ListOpt) List of physical_network names with which flat networks -# can be created. Use * to allow flat networks with arbitrary -# physical_network names. -# -flat_networks = external -# Example:flat_networks = physnet1,physnet2 -# Example:flat_networks = * - -[ml2_type_vlan] -# (ListOpt) List of [::] tuples -# specifying physical_network names usable for VLAN provider and -# tenant networks, as well as ranges of VLAN tags on each -# physical_network available for allocation as tenant networks. -# -network_vlan_ranges = -# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 - -[ml2_type_gre] -# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation -tunnel_id_ranges = 1:1000 - -[ml2_type_vxlan] -# (ListOpt) Comma-separated list of : tuples enumerating -# ranges of VXLAN VNI IDs that are available for tenant network allocation. -# -vni_ranges = 1001:4095 - -# (StrOpt) Multicast group for the VXLAN interface. When configured, will -# enable sending all broadcast traffic to this multicast group. When left -# unconfigured, will disable multicast VXLAN mode. -# -vxlan_group = 239.1.1.1 -# Example: vxlan_group = 239.1.1.1 - -[securitygroup] -# Controls if neutron security group is enabled or not. -# It should be false when you use nova security group. -# enable_security_group = True -firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver -enable_security_group = True - -[database] -connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8 - -[ovs] -local_ip = {{ internal_ip }} -{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %} -integration_bridge = br-int -tunnel_bridge = br-tun -tunnel_id_ranges = 1001:4095 -tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }} -bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }} -{% endif %} - -[agent] -root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf -tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }} -{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %} -vxlan_udp_port = 4789 -{% endif %} -l2_population = False - -[odl] -{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} -network_vlan_ranges = 1001:4095 -tunnel_id_ranges = 1001:4095 -tun_peer_patch_port = patch-int -int_peer_patch_port = patch-tun -tenant_network_type = vxlan -tunnel_bridge = br-tun -integration_bridge = br-int -controllers = 10.1.0.15:8080:admin:admin -{% endif %} - -[ml2_odl] -{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} -username = {{ odl_username }} -password = {{ odl_password }} -url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron -{% endif %} - diff --git a/compass/deploy/ansible/roles/neutron-network/templates/neutron-network.conf b/compass/deploy/ansible/roles/neutron-network/templates/neutron-network.conf deleted file mode 100644 index 93be9cb..0000000 --- a/compass/deploy/ansible/roles/neutron-network/templates/neutron-network.conf +++ /dev/null @@ -1,465 +0,0 @@ -[DEFAULT] -# Print more verbose output (set logging level to INFO instead of default WARNING level). -verbose = {{ VERBOSE }} - -# Print debugging output (set logging level to DEBUG instead of default WARNING level). -debug = {{ DEBUG }} - -# Where to store Neutron state files. This directory must be writable by the -# user executing the agent. -state_path = /var/lib/neutron - -# Where to store lock files -lock_path = $state_path/lock - -# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s -# log_date_format = %Y-%m-%d %H:%M:%S - -# use_syslog -> syslog -# log_file and log_dir -> log_dir/log_file -# (not log_file) and log_dir -> log_dir/{binary_name}.log -# use_stderr -> stderr -# (not user_stderr) and (not log_file) -> stdout -# publish_errors -> notification system - -# use_syslog = False -# syslog_log_facility = LOG_USER - -# use_stderr = True -# log_file = -log_dir = /var/log/neutron - -# publish_errors = False - -# Address to bind the API server to -bind_host = {{ network_server_host }} - -# Port the bind the API server to -bind_port = 9696 - -# Path to the extensions. Note that this can be a colon-separated list of -# paths. For example: -# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions -# The __path__ of neutron.extensions is appended to this, so if your -# extensions are in there you don't need to specify them here -# api_extensions_path = - -# (StrOpt) Neutron core plugin entrypoint to be loaded from the -# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the -# plugins included in the neutron source distribution. For compatibility with -# previous versions, the class name of a plugin can be specified instead of its -# entrypoint name. -# -#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin -core_plugin = ml2 -# Example: core_plugin = ml2 - -# (ListOpt) List of service plugin entrypoints to be loaded from the -# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of -# the plugins included in the neutron source distribution. For compatibility -# with previous versions, the class name of a plugin can be specified instead -# of its entrypoint name. -# -# service_plugins = -# Example: service_plugins = router,firewall,lbaas,vpnaas,metering -service_plugins = router - -# Paste configuration file -api_paste_config = api-paste.ini - -# The strategy to be used for auth. -# Supported values are 'keystone'(default), 'noauth'. -auth_strategy = keystone - -# Base MAC address. The first 3 octets will remain unchanged. If the -# 4h octet is not 00, it will also be used. The others will be -# randomly generated. -# 3 octet -# base_mac = fa:16:3e:00:00:00 -# 4 octet -# base_mac = fa:16:3e:4f:00:00 - -# Maximum amount of retries to generate a unique MAC address -# mac_generation_retries = 16 - -# DHCP Lease duration (in seconds) -dhcp_lease_duration = 86400 - -# Allow sending resource operation notification to DHCP agent -# dhcp_agent_notification = True - -# Enable or disable bulk create/update/delete operations -# allow_bulk = True -# Enable or disable pagination -# allow_pagination = False -# Enable or disable sorting -# allow_sorting = False -# Enable or disable overlapping IPs for subnets -# Attention: the following parameter MUST be set to False if Neutron is -# being used in conjunction with nova security groups -allow_overlapping_ips = True -# Ensure that configured gateway is on subnet -# force_gateway_on_subnet = False - - -# RPC configuration options. Defined in rpc __init__ -# The messaging module to use, defaults to kombu. -# rpc_backend = neutron.openstack.common.rpc.impl_kombu -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_password = {{ RABBIT_PASS }} - -# Size of RPC thread pool -rpc_thread_pool_size = 240 -# Size of RPC connection pool -rpc_conn_pool_size = 100 -# Seconds to wait for a response from call or multicall -rpc_response_timeout = 300 -# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. -rpc_cast_timeout = 300 -# Modules of exceptions that are permitted to be recreated -# upon receiving exception data from an rpc call. -# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception -# AMQP exchange to connect to if using RabbitMQ or QPID -# control_exchange = neutron - -# If passed, use a fake RabbitMQ provider -# fake_rabbit = False - -# Configuration options if sending notifications via kombu rpc (these are -# the defaults) -# SSL version to use (valid only if SSL enabled) -# kombu_ssl_version = -# SSL key file (valid only if SSL enabled) -# kombu_ssl_keyfile = -# SSL cert file (valid only if SSL enabled) -# kombu_ssl_certfile = -# SSL certification authority file (valid only if SSL enabled) -# kombu_ssl_ca_certs = -# Port where RabbitMQ server is running/listening -rabbit_port = 5672 -# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' -# rabbit_hosts = localhost:5672 -# User ID used for RabbitMQ connections -rabbit_userid = {{ RABBIT_USER }} -# Location of a virtual RabbitMQ installation. -# rabbit_virtual_host = / -# Maximum retries with trying to connect to RabbitMQ -# (the default of 0 implies an infinite retry count) -# rabbit_max_retries = 0 -# RabbitMQ connection retry interval -# rabbit_retry_interval = 1 -# Use HA queues in RabbitMQ (x-ha-policy: all). You need to -# wipe RabbitMQ database when changing this option. (boolean value) -# rabbit_ha_queues = false -# QPID -# rpc_backend=neutron.openstack.common.rpc.impl_qpid -# Qpid broker hostname -# qpid_hostname = localhost -# Qpid broker port -# qpid_port = 5672 -# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' -# qpid_hosts = localhost:5672 -# Username for qpid connection -# qpid_username = '' -# Password for qpid connection -# qpid_password = '' -# Space separated list of SASL mechanisms to use for auth -# qpid_sasl_mechanisms = '' -# Seconds between connection keepalive heartbeats -# qpid_heartbeat = 60 -# Transport to use, either 'tcp' or 'ssl' -# qpid_protocol = tcp -# Disable Nagle algorithm -# qpid_tcp_nodelay = True - -# ZMQ -# rpc_backend=neutron.openstack.common.rpc.impl_zmq -# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. -# The "host" option should point or resolve to this address. -# rpc_zmq_bind_address = * - -# ============ Notification System Options ===================== - -# Notifications can be sent when network/subnet/port are created, updated or deleted. -# There are three methods of sending notifications: logging (via the -# log_file directive), rpc (via a message queue) and -# noop (no notifications sent, the default) - -# Notification_driver can be defined multiple times -# Do nothing driver -# notification_driver = neutron.openstack.common.notifier.no_op_notifier -# Logging driver -# notification_driver = neutron.openstack.common.notifier.log_notifier -# RPC driver. -notification_driver = neutron.openstack.common.notifier.rpc_notifier - -# default_notification_level is used to form actual topic name(s) or to set logging level -default_notification_level = INFO - -# default_publisher_id is a part of the notification payload -# host = myhost.com -# default_publisher_id = $host - -# Defined in rpc_notifier, can be comma separated values. -# The actual topic names will be %s.%(default_notification_level)s -notification_topics = notifications - -# Default maximum number of items returned in a single response, -# value == infinite and value < 0 means no max limit, and value must -# be greater than 0. If the number of items requested is greater than -# pagination_max_limit, server will just return pagination_max_limit -# of number of items. -# pagination_max_limit = -1 - -# Maximum number of DNS nameservers per subnet -# max_dns_nameservers = 5 - -# Maximum number of host routes per subnet -# max_subnet_host_routes = 20 - -# Maximum number of fixed ips per port -# max_fixed_ips_per_port = 5 - -# =========== items for agent management extension ============= -# Seconds to regard the agent as down; should be at least twice -# report_interval, to be sure the agent is down for good -agent_down_time = 75 -# =========== end of items for agent management extension ===== - -# =========== items for agent scheduler extension ============= -# Driver to use for scheduling network to DHCP agent -network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler -# Driver to use for scheduling router to a default L3 agent -router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler -# Driver to use for scheduling a loadbalancer pool to an lbaas agent -# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler - -# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted -# networks to first DHCP agent which sends get_active_networks message to -# neutron server -# network_auto_schedule = True - -# Allow auto scheduling routers to L3 agent. It will schedule non-hosted -# routers to first L3 agent which sends sync_routers message to neutron server -# router_auto_schedule = True - -# Number of DHCP agents scheduled to host a network. This enables redundant -# DHCP agents for configured networks. -# dhcp_agents_per_network = 1 - -# =========== end of items for agent scheduler extension ===== - -# =========== WSGI parameters related to the API server ============== -# Number of separate worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as workers. The parent process manages them. -api_workers = 8 - -# Number of separate RPC worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as RPC workers. The parent process manages them. -# This feature is experimental until issues are addressed and testing has been -# enabled for various plugins for compatibility. -rpc_workers = 8 - -# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when -# starting API server. Not supported on OS X. -# tcp_keepidle = 600 - -# Number of seconds to keep retrying to listen -# retry_until_window = 30 - -# Number of backlog requests to configure the socket with. -# backlog = 4096 - -# Max header line to accommodate large tokens -# max_header_line = 16384 - -# Enable SSL on the API server -# use_ssl = False - -# Certificate file to use when starting API server securely -# ssl_cert_file = /path/to/certfile - -# Private key file to use when starting API server securely -# ssl_key_file = /path/to/keyfile - -# CA certificate file to use when starting API server securely to -# verify connecting clients. This is an optional parameter only required if -# API clients need to authenticate to the API server using SSL certificates -# signed by a trusted CA -# ssl_ca_file = /path/to/cafile -# ======== end of WSGI parameters related to the API server ========== - - -# ======== neutron nova interactions ========== -# Send notification to nova when port status is active. -notify_nova_on_port_status_changes = True - -# Send notifications to nova when port data (fixed_ips/floatingips) change -# so nova can update it's cache. -notify_nova_on_port_data_changes = True - -# URL for connection to nova (Only supports one nova region currently). -nova_url = http://{{ HA_VIP }}:8774/v2 - -# Name of nova region to use. Useful if keystone manages more than one region -nova_region_name = RegionOne - -# Username for connection to nova in admin context -nova_admin_username = nova - -# The uuid of the admin nova tenant - -# Password for connection to nova in admin context. -nova_admin_password = {{ NOVA_PASS }} - -# Authorization URL for connection to nova in admin context. -nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 - -# Number of seconds between sending events to nova if there are any events to send -send_events_interval = 2 - -# ======== end of neutron nova interactions ========== - -[quotas] -# Default driver to use for quota checks -quota_driver = neutron.db.quota_db.DbQuotaDriver - -# Resource name(s) that are supported in quota features -quota_items = network,subnet,port - -# Default number of resource allowed per tenant. A negative value means -# unlimited. -default_quota = -1 - -# Number of networks allowed per tenant. A negative value means unlimited. -quota_network = 100 - -# Number of subnets allowed per tenant. A negative value means unlimited. -quota_subnet = 100 - -# Number of ports allowed per tenant. A negative value means unlimited. -quota_port = 8000 - -# Number of security groups allowed per tenant. A negative value means -# unlimited. -quota_security_group = 1000 - -# Number of security group rules allowed per tenant. A negative value means -# unlimited. -quota_security_group_rule = 1000 - -# Number of vips allowed per tenant. A negative value means unlimited. -# quota_vip = 10 - -# Number of pools allowed per tenant. A negative value means unlimited. -# quota_pool = 10 - -# Number of pool members allowed per tenant. A negative value means unlimited. -# The default is unlimited because a member is not a real resource consumer -# on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_member = -1 - -# Number of health monitors allowed per tenant. A negative value means -# unlimited. -# The default is unlimited because a health monitor is not a real resource -# consumer on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_health_monitors = -1 - -# Number of routers allowed per tenant. A negative value means unlimited. -# quota_router = 10 - -# Number of floating IPs allowed per tenant. A negative value means unlimited. -# quota_floatingip = 50 - -[agent] -# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real -# root filter facility. -# Change to "sudo" to skip the filtering and just run the comand directly -root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" - -# =========== items for agent management extension ============= -# seconds between nodes reporting state to server; should be less than -# agent_down_time, best if it is half or less than agent_down_time -report_interval = 30 - -# =========== end of items for agent management extension ===== - -[keystone_authtoken] -auth_uri = http://{{ HA_VIP }}:5000/v2.0 -identity_uri = http://{{ HA_VIP }}:35357 -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} -signing_dir = $state_path/keystone-signing - -[database] -# This line MUST be changed to actually run the plugin. -# Example: -# connection = mysql://root:pass@127.0.0.1:3306/neutron -# Replace 127.0.0.1 above with the IP address of the database used by the -# main neutron server. (Leave it as is if the database runs on this host.) -# connection = sqlite:////var/lib/neutron/neutron.sqlite -#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron - -# The SQLAlchemy connection string used to connect to the slave database -slave_connection = - -# Database reconnection retry times - in event connectivity is lost -# set to -1 implies an infinite retry count -max_retries = 10 - -# Database reconnection interval in seconds - if the initial connection to the -# database fails -retry_interval = 10 - -# Minimum number of SQL connections to keep open in a pool -min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -max_pool_size = 100 - -# Timeout in seconds before idle sql connections are reaped -idle_timeout = 3600 - -# If set, use this value for max_overflow with sqlalchemy -max_overflow = 100 - -# Verbosity of SQL debugging information. 0=None, 100=Everything -connection_debug = 0 - -# Add python stack traces to SQL as comment strings -connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -pool_timeout = 10 - -[service_providers] -# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. -# Must be in form: -# service_provider=::[:default] -# List of allowed service types includes LOADBALANCER, FIREWALL, VPN -# Combination of and must be unique; must also be unique -# This is multiline option, example for default provider: -# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default -# example of non-default provider: -# service_provider=FIREWALL:name2:firewall_driver_path -# --- Reference implementations --- -service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default -service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default -# In order to activate Radware's lbaas driver you need to uncomment the next line. -# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. -# Otherwise comment the HA Proxy line -# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default -# uncomment the following line to make the 'netscaler' LBaaS provider available. -# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver -# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. -# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default -# Uncomment the line below to use Embrane heleos as Load Balancer service provider. -# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/compass/deploy/ansible/roles/neutron-network/templates/neutron.conf b/compass/deploy/ansible/roles/neutron-network/templates/neutron.conf deleted file mode 100644 index 1575367..0000000 --- a/compass/deploy/ansible/roles/neutron-network/templates/neutron.conf +++ /dev/null @@ -1,466 +0,0 @@ -[DEFAULT] -# Print more verbose output (set logging level to INFO instead of default WARNING level). -verbose = {{ VERBOSE }} - -# Print debugging output (set logging level to DEBUG instead of default WARNING level). -debug = {{ VERBOSE }} - -# Where to store Neutron state files. This directory must be writable by the -# user executing the agent. -state_path = /var/lib/neutron - -# Where to store lock files -lock_path = $state_path/lock - -# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s -# log_date_format = %Y-%m-%d %H:%M:%S - -# use_syslog -> syslog -# log_file and log_dir -> log_dir/log_file -# (not log_file) and log_dir -> log_dir/{binary_name}.log -# use_stderr -> stderr -# (not user_stderr) and (not log_file) -> stdout -# publish_errors -> notification system - -# use_syslog = False -# syslog_log_facility = LOG_USER - -# use_stderr = True -# log_file = -log_dir = /var/log/neutron - -# publish_errors = False - -# Address to bind the API server to -bind_host = {{ network_server_host }} - -# Port the bind the API server to -bind_port = 9696 - -# Path to the extensions. Note that this can be a colon-separated list of -# paths. For example: -# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions -# The __path__ of neutron.extensions is appended to this, so if your -# extensions are in there you don't need to specify them here -# api_extensions_path = - -# (StrOpt) Neutron core plugin entrypoint to be loaded from the -# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the -# plugins included in the neutron source distribution. For compatibility with -# previous versions, the class name of a plugin can be specified instead of its -# entrypoint name. -# -#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin -core_plugin = ml2 -# Example: core_plugin = ml2 - -# (ListOpt) List of service plugin entrypoints to be loaded from the -# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of -# the plugins included in the neutron source distribution. For compatibility -# with previous versions, the class name of a plugin can be specified instead -# of its entrypoint name. -# -# service_plugins = -# Example: service_plugins = router,firewall,lbaas,vpnaas,metering -service_plugins = router - -# Paste configuration file -api_paste_config = api-paste.ini - -# The strategy to be used for auth. -# Supported values are 'keystone'(default), 'noauth'. -auth_strategy = keystone - -# Base MAC address. The first 3 octets will remain unchanged. If the -# 4h octet is not 00, it will also be used. The others will be -# randomly generated. -# 3 octet -# base_mac = fa:16:3e:00:00:00 -# 4 octet -# base_mac = fa:16:3e:4f:00:00 - -# Maximum amount of retries to generate a unique MAC address -# mac_generation_retries = 16 - -# DHCP Lease duration (in seconds) -dhcp_lease_duration = 86400 - -# Allow sending resource operation notification to DHCP agent -# dhcp_agent_notification = True - -# Enable or disable bulk create/update/delete operations -# allow_bulk = True -# Enable or disable pagination -# allow_pagination = False -# Enable or disable sorting -# allow_sorting = False -# Enable or disable overlapping IPs for subnets -# Attention: the following parameter MUST be set to False if Neutron is -# being used in conjunction with nova security groups -allow_overlapping_ips = True -# Ensure that configured gateway is on subnet -# force_gateway_on_subnet = False - - -# RPC configuration options. Defined in rpc __init__ -# The messaging module to use, defaults to kombu. -# rpc_backend = neutron.openstack.common.rpc.impl_kombu -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_password = {{ RABBIT_PASS }} - -# Size of RPC thread pool -rpc_thread_pool_size = 240 -# Size of RPC connection pool -rpc_conn_pool_size = 100 -# Seconds to wait for a response from call or multicall -rpc_response_timeout = 300 -# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. -rpc_cast_timeout = 300 -# Modules of exceptions that are permitted to be recreated -# upon receiving exception data from an rpc call. -# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception -# AMQP exchange to connect to if using RabbitMQ or QPID -# control_exchange = neutron - -# If passed, use a fake RabbitMQ provider -# fake_rabbit = False - -# Configuration options if sending notifications via kombu rpc (these are -# the defaults) -# SSL version to use (valid only if SSL enabled) -# kombu_ssl_version = -# SSL key file (valid only if SSL enabled) -# kombu_ssl_keyfile = -# SSL cert file (valid only if SSL enabled) -# kombu_ssl_certfile = -# SSL certification authority file (valid only if SSL enabled) -# kombu_ssl_ca_certs = -# Port where RabbitMQ server is running/listening -rabbit_port = 5672 -# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' -# rabbit_hosts = localhost:5672 -# User ID used for RabbitMQ connections -rabbit_userid = {{ RABBIT_USER }} -# Location of a virtual RabbitMQ installation. -# rabbit_virtual_host = / -# Maximum retries with trying to connect to RabbitMQ -# (the default of 0 implies an infinite retry count) -# rabbit_max_retries = 0 -# RabbitMQ connection retry interval -# rabbit_retry_interval = 1 -# Use HA queues in RabbitMQ (x-ha-policy: all). You need to -# wipe RabbitMQ database when changing this option. (boolean value) -# rabbit_ha_queues = false -# QPID -# rpc_backend=neutron.openstack.common.rpc.impl_qpid -# Qpid broker hostname -# qpid_hostname = localhost -# Qpid broker port -# qpid_port = 5672 -# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' -# qpid_hosts = localhost:5672 -# Username for qpid connection -# qpid_username = '' -# Password for qpid connection -# qpid_password = '' -# Space separated list of SASL mechanisms to use for auth -# qpid_sasl_mechanisms = '' -# Seconds between connection keepalive heartbeats -# qpid_heartbeat = 60 -# Transport to use, either 'tcp' or 'ssl' -# qpid_protocol = tcp -# Disable Nagle algorithm -# qpid_tcp_nodelay = True - -# ZMQ -# rpc_backend=neutron.openstack.common.rpc.impl_zmq -# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. -# The "host" option should point or resolve to this address. -# rpc_zmq_bind_address = * - -# ============ Notification System Options ===================== - -# Notifications can be sent when network/subnet/port are created, updated or deleted. -# There are three methods of sending notifications: logging (via the -# log_file directive), rpc (via a message queue) and -# noop (no notifications sent, the default) - -# Notification_driver can be defined multiple times -# Do nothing driver -# notification_driver = neutron.openstack.common.notifier.no_op_notifier -# Logging driver -# notification_driver = neutron.openstack.common.notifier.log_notifier -# RPC driver. -notification_driver = neutron.openstack.common.notifier.rpc_notifier - -# default_notification_level is used to form actual topic name(s) or to set logging level -default_notification_level = INFO - -# default_publisher_id is a part of the notification payload -# host = myhost.com -# default_publisher_id = $host - -# Defined in rpc_notifier, can be comma separated values. -# The actual topic names will be %s.%(default_notification_level)s -notification_topics = notifications - -# Default maximum number of items returned in a single response, -# value == infinite and value < 0 means no max limit, and value must -# be greater than 0. If the number of items requested is greater than -# pagination_max_limit, server will just return pagination_max_limit -# of number of items. -# pagination_max_limit = -1 - -# Maximum number of DNS nameservers per subnet -# max_dns_nameservers = 5 - -# Maximum number of host routes per subnet -# max_subnet_host_routes = 20 - -# Maximum number of fixed ips per port -# max_fixed_ips_per_port = 5 - -# =========== items for agent management extension ============= -# Seconds to regard the agent as down; should be at least twice -# report_interval, to be sure the agent is down for good -agent_down_time = 75 -# =========== end of items for agent management extension ===== - -# =========== items for agent scheduler extension ============= -# Driver to use for scheduling network to DHCP agent -network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler -# Driver to use for scheduling router to a default L3 agent -router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler -# Driver to use for scheduling a loadbalancer pool to an lbaas agent -# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler - -# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted -# networks to first DHCP agent which sends get_active_networks message to -# neutron server -# network_auto_schedule = True - -# Allow auto scheduling routers to L3 agent. It will schedule non-hosted -# routers to first L3 agent which sends sync_routers message to neutron server -# router_auto_schedule = True - -# Number of DHCP agents scheduled to host a network. This enables redundant -# DHCP agents for configured networks. -# dhcp_agents_per_network = 1 - -# =========== end of items for agent scheduler extension ===== - -# =========== WSGI parameters related to the API server ============== -# Number of separate worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as workers. The parent process manages them. -api_workers = 8 - -# Number of separate RPC worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as RPC workers. The parent process manages them. -# This feature is experimental until issues are addressed and testing has been -# enabled for various plugins for compatibility. -rpc_workers = 8 - -# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when -# starting API server. Not supported on OS X. -# tcp_keepidle = 600 - -# Number of seconds to keep retrying to listen -# retry_until_window = 30 - -# Number of backlog requests to configure the socket with. -# backlog = 4096 - -# Max header line to accommodate large tokens -# max_header_line = 16384 - -# Enable SSL on the API server -# use_ssl = False - -# Certificate file to use when starting API server securely -# ssl_cert_file = /path/to/certfile - -# Private key file to use when starting API server securely -# ssl_key_file = /path/to/keyfile - -# CA certificate file to use when starting API server securely to -# verify connecting clients. This is an optional parameter only required if -# API clients need to authenticate to the API server using SSL certificates -# signed by a trusted CA -# ssl_ca_file = /path/to/cafile -# ======== end of WSGI parameters related to the API server ========== - - -# ======== neutron nova interactions ========== -# Send notification to nova when port status is active. -notify_nova_on_port_status_changes = True - -# Send notifications to nova when port data (fixed_ips/floatingips) change -# so nova can update it's cache. -notify_nova_on_port_data_changes = True - -# URL for connection to nova (Only supports one nova region currently). -nova_url = http://{{ HA_VIP }}:8774/v2 - -# Name of nova region to use. Useful if keystone manages more than one region -nova_region_name = RegionOne - -# Username for connection to nova in admin context -nova_admin_username = nova - -# The uuid of the admin nova tenant -nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }} - -# Password for connection to nova in admin context. -nova_admin_password = {{ NOVA_PASS }} - -# Authorization URL for connection to nova in admin context. -nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 - -# Number of seconds between sending events to nova if there are any events to send -send_events_interval = 2 - -# ======== end of neutron nova interactions ========== - -[quotas] -# Default driver to use for quota checks -quota_driver = neutron.db.quota_db.DbQuotaDriver - -# Resource name(s) that are supported in quota features -quota_items = network,subnet,port - -# Default number of resource allowed per tenant. A negative value means -# unlimited. -default_quota = -1 - -# Number of networks allowed per tenant. A negative value means unlimited. -quota_network = 100 - -# Number of subnets allowed per tenant. A negative value means unlimited. -quota_subnet = 100 - -# Number of ports allowed per tenant. A negative value means unlimited. -quota_port = 8000 - -# Number of security groups allowed per tenant. A negative value means -# unlimited. -quota_security_group = 1000 - -# Number of security group rules allowed per tenant. A negative value means -# unlimited. -quota_security_group_rule = 1000 - -# Number of vips allowed per tenant. A negative value means unlimited. -# quota_vip = 10 - -# Number of pools allowed per tenant. A negative value means unlimited. -# quota_pool = 10 - -# Number of pool members allowed per tenant. A negative value means unlimited. -# The default is unlimited because a member is not a real resource consumer -# on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_member = -1 - -# Number of health monitors allowed per tenant. A negative value means -# unlimited. -# The default is unlimited because a health monitor is not a real resource -# consumer on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_health_monitors = -1 - -# Number of routers allowed per tenant. A negative value means unlimited. -# quota_router = 10 - -# Number of floating IPs allowed per tenant. A negative value means unlimited. -# quota_floatingip = 50 - -[agent] -# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real -# root filter facility. -# Change to "sudo" to skip the filtering and just run the comand directly -root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" - -# =========== items for agent management extension ============= -# seconds between nodes reporting state to server; should be less than -# agent_down_time, best if it is half or less than agent_down_time -report_interval = 30 - -# =========== end of items for agent management extension ===== - -[keystone_authtoken] -auth_uri = http://{{ HA_VIP }}:5000/v2.0 -identity_uri = http://{{ HA_VIP }}:35357 -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} -signing_dir = $state_path/keystone-signing - -[database] -# This line MUST be changed to actually run the plugin. -# Example: -# connection = mysql://root:pass@127.0.0.1:3306/neutron -# Replace 127.0.0.1 above with the IP address of the database used by the -# main neutron server. (Leave it as is if the database runs on this host.) -# connection = sqlite:////var/lib/neutron/neutron.sqlite -#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron - -# The SQLAlchemy connection string used to connect to the slave database -slave_connection = - -# Database reconnection retry times - in event connectivity is lost -# set to -1 implies an infinite retry count -max_retries = 10 - -# Database reconnection interval in seconds - if the initial connection to the -# database fails -retry_interval = 10 - -# Minimum number of SQL connections to keep open in a pool -min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -max_pool_size = 100 - -# Timeout in seconds before idle sql connections are reaped -idle_timeout = 3600 - -# If set, use this value for max_overflow with sqlalchemy -max_overflow = 100 - -# Verbosity of SQL debugging information. 0=None, 100=Everything -connection_debug = 0 - -# Add python stack traces to SQL as comment strings -connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -pool_timeout = 10 - -[service_providers] -# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. -# Must be in form: -# service_provider=::[:default] -# List of allowed service types includes LOADBALANCER, FIREWALL, VPN -# Combination of and must be unique; must also be unique -# This is multiline option, example for default provider: -# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default -# example of non-default provider: -# service_provider=FIREWALL:name2:firewall_driver_path -# --- Reference implementations --- -service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default -service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default -# In order to activate Radware's lbaas driver you need to uncomment the next line. -# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. -# Otherwise comment the HA Proxy line -# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default -# uncomment the following line to make the 'netscaler' LBaaS provider available. -# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver -# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. -# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default -# Uncomment the line below to use Embrane heleos as Load Balancer service provider. -# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/compass/deploy/ansible/roles/neutron-network/templates/neutron_init.sh b/compass/deploy/ansible/roles/neutron-network/templates/neutron_init.sh deleted file mode 100644 index b92e202..0000000 --- a/compass/deploy/ansible/roles/neutron-network/templates/neutron_init.sh +++ /dev/null @@ -1,4 +0,0 @@ -# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True - -# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}} - diff --git a/compass/deploy/ansible/roles/neutron-network/templates/nova.conf b/compass/deploy/ansible/roles/neutron-network/templates/nova.conf deleted file mode 100644 index 9587073..0000000 --- a/compass/deploy/ansible/roles/neutron-network/templates/nova.conf +++ /dev/null @@ -1,69 +0,0 @@ -[DEFAULT] -dhcpbridge_flagfile=/etc/nova/nova.conf -dhcpbridge=/usr/bin/nova-dhcpbridge -logdir=/var/log/nova -state_path=/var/lib/nova -lock_path=/var/lock/nova -force_dhcp_release=True -iscsi_helper=tgtadm -libvirt_use_virtio_for_bridges=True -connection_type=libvirt -root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf -verbose={{ VERBOSE}} -debug={{ DEBUG }} -ec2_private_dns_show_ip=True -api_paste_config=/etc/nova/api-paste.ini -volumes_path=/var/lib/nova/volumes -enabled_apis=ec2,osapi_compute,metadata - -vif_plugging_is_fatal: false -vif_plugging_timeout: 0 - -auth_strategy = keystone - -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_userid = {{ RABBIT_USER }} -rabbit_password = {{ RABBIT_PASS }} - -my_ip = {{ internal_ip }} -vnc_enabled = True -vncserver_listen = {{ internal_ip }} -vncserver_proxyclient_address = {{ internal_ip }} -novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html - -novncproxy_host = {{ internal_ip }} -novncproxy_port = 6080 - -network_api_class = nova.network.neutronv2.api.API -linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver -firewall_driver = nova.virt.firewall.NoopFirewallDriver -security_group_api = neutron - -instance_usage_audit = True -instance_usage_audit_period = hour -notify_on_state_change = vm_and_task_state -notification_driver = nova.openstack.common.notifier.rpc_notifier -notification_driver = ceilometer.compute.nova_notifier - -[database] -# The SQLAlchemy connection string used to connect to the database -connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova - -[keystone_authtoken] -auth_uri = http://{{ HA_VIP }}:5000/2.0 -identity_uri = http://{{ HA_VIP }}:35357 -admin_tenant_name = service -admin_user = nova -admin_password = {{ NOVA_PASS }} - -[glance] -host = {{ HA_VIP }} - -[neutron] -url = http://{{ HA_VIP }}:9696 -auth_strategy = keystone -admin_tenant_name = service -admin_username = neutron -admin_password = {{ NEUTRON_PASS }} -admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 diff --git a/compass/deploy/ansible/roles/nova-compute/handlers/main.yml b/compass/deploy/ansible/roles/nova-compute/handlers/main.yml deleted file mode 100644 index c135003..0000000 --- a/compass/deploy/ansible/roles/nova-compute/handlers/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -- name: restart nova-compute - service: name=nova-compute state=restarted enabled=yes diff --git a/compass/deploy/ansible/roles/nova-compute/tasks/main.yml b/compass/deploy/ansible/roles/nova-compute/tasks/main.yml deleted file mode 100644 index 51c8dfa..0000000 --- a/compass/deploy/ansible/roles/nova-compute/tasks/main.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: install nova-compute related packages - apt: name=nova-compute-kvm state=present force=yes - -- name: update nova-compute conf - template: src={{ item }} dest=/etc/nova/{{ item }} - with_items: - - nova.conf - - nova-compute.conf - notify: - - restart nova-compute - -- name: generate neutron controll service list - shell: echo {{ item }} >> /opt/service - with_items: - - nova-compute - -- meta: flush_handlers - -- name: remove nova sqlite db - shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.removed diff --git a/compass/deploy/ansible/roles/nova-compute/templates/nova-compute.conf b/compass/deploy/ansible/roles/nova-compute/templates/nova-compute.conf deleted file mode 100644 index 401dee7..0000000 --- a/compass/deploy/ansible/roles/nova-compute/templates/nova-compute.conf +++ /dev/null @@ -1,7 +0,0 @@ -[DEFAULT] -compute_driver=libvirt.LibvirtDriver -force_raw_images = true -[libvirt] -virt_type=qemu -images_type = raw -mem_stats_period_seconds=0 diff --git a/compass/deploy/ansible/roles/nova-compute/templates/nova.conf b/compass/deploy/ansible/roles/nova-compute/templates/nova.conf deleted file mode 100644 index 4988cb0..0000000 --- a/compass/deploy/ansible/roles/nova-compute/templates/nova.conf +++ /dev/null @@ -1,73 +0,0 @@ -[DEFAULT] -dhcpbridge_flagfile=/etc/nova/nova.conf -dhcpbridge=/usr/bin/nova-dhcpbridge -logdir=/var/log/nova -state_path=/var/lib/nova -lock_path=/var/lock/nova -force_dhcp_release=True -iscsi_helper=tgtadm -libvirt_use_virtio_for_bridges=True -connection_type=libvirt -root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf -verbose={{ VERBOSE}} -debug={{ DEBUG }} -ec2_private_dns_show_ip=True -api_paste_config=/etc/nova/api-paste.ini -volumes_path=/var/lib/nova/volumes -enabled_apis=ec2,osapi_compute,metadata - -vif_plugging_is_fatal: false -vif_plugging_timeout: 0 - -auth_strategy = keystone - -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_userid = {{ RABBIT_USER }} -rabbit_password = {{ RABBIT_PASS }} - -my_ip = {{ internal_ip }} -vnc_enabled = True -vncserver_listen = {{ internal_ip }} -vncserver_proxyclient_address = {{ internal_ip }} -novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html - -novncproxy_host = {{ internal_ip }} -novncproxy_port = 6080 - -network_api_class = nova.network.neutronv2.api.API -linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver -firewall_driver = nova.virt.firewall.NoopFirewallDriver -security_group_api = neutron - -instance_usage_audit = True -instance_usage_audit_period = hour -notify_on_state_change = vm_and_task_state -notification_driver = nova.openstack.common.notifier.rpc_notifier -notification_driver = ceilometer.compute.nova_notifier - -[database] -# The SQLAlchemy connection string used to connect to the database -connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova - -[conductor] -manager = nova.conductor.manager.ConductorManager -topic = conductor - -[keystone_authtoken] -auth_uri = http://{{ HA_VIP }}:5000/2.0 -identity_uri = http://{{ HA_VIP }}:35357 -admin_tenant_name = service -admin_user = nova -admin_password = {{ NOVA_PASS }} - -[glance] -host = {{ HA_VIP }} - -[neutron] -url = http://{{ HA_VIP }}:9696 -auth_strategy = keystone -admin_tenant_name = service -admin_username = neutron -admin_password = {{ NEUTRON_PASS }} -admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 diff --git a/compass/deploy/ansible/roles/nova-controller/handlers/main.yml b/compass/deploy/ansible/roles/nova-controller/handlers/main.yml deleted file mode 100644 index b4c1585..0000000 --- a/compass/deploy/ansible/roles/nova-controller/handlers/main.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- name: restart nova-api - service: name=nova-api state=restarted enabled=yes - -- name: restart nova-cert - service: name=nova-cert state=restarted enabled=yes - -- name: restart nova-consoleauth - service: name=nova-consoleauth state=restarted enabled=yes - -- name: restart nova-scheduler - service: name=nova-scheduler state=restarted enabled=yes - -- name: restart nova-conductor - service: name=nova-conductor state=restarted enabled=yes - -- name: restart nova-novncproxy - service: name=nova-novncproxy state=restarted enabled=yes - -- name: remove nova-sqlite-db - shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.db.removed - -- name: restart neutron-server - service: name=neutron-server state=restarted enabled=yes diff --git a/compass/deploy/ansible/roles/nova-controller/tasks/main.yml b/compass/deploy/ansible/roles/nova-controller/tasks/main.yml deleted file mode 100644 index 72a9f4d..0000000 --- a/compass/deploy/ansible/roles/nova-controller/tasks/main.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- include: nova_install.yml - tags: - - install - - nova_install - - nova - -- include: nova_config.yml - when: HA_CLUSTER is not defined or HA_CLUSTER[inventory_hostname] == '' - tags: - - config - - nova_config - - nova diff --git a/compass/deploy/ansible/roles/nova-controller/tasks/nova_config.yml b/compass/deploy/ansible/roles/nova-controller/tasks/nova_config.yml deleted file mode 100644 index 62351fa..0000000 --- a/compass/deploy/ansible/roles/nova-controller/tasks/nova_config.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- name: nova db sync - command: su -s /bin/sh -c "nova-manage db sync" nova - register: result - until: result.rc == 0 - retries: 5 - delay: 3 - notify: - - restart nova-api - - restart nova-cert - - restart nova-consoleauth - - restart nova-scheduler - - restart nova-conductor - - restart nova-novncproxy - -- meta: flush_handlers diff --git a/compass/deploy/ansible/roles/nova-controller/tasks/nova_install.yml b/compass/deploy/ansible/roles/nova-controller/tasks/nova_install.yml deleted file mode 100644 index a1cded5..0000000 --- a/compass/deploy/ansible/roles/nova-controller/tasks/nova_install.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -- name: install nova related packages - apt: name={{ item }} state=present force=yes - with_items: - - nova-api - - nova-cert - - nova-conductor - - nova-consoleauth - - nova-novncproxy - - nova-scheduler - - python-novaclient - - python-oslo.rootwrap - -- name: generate nova controll service list - shell: echo {{ item }} >> /opt/service - with_items: - - nova-api - - nova-cert - - nova-conductor - - nova-consoleauth - - nova-novncproxy - - nova-scheduler - -- name: update nova conf - template: src=nova.conf - dest=/etc/nova/nova.conf - backup=yes - notify: - - restart nova-api - - restart nova-cert - - restart nova-consoleauth - - restart nova-scheduler - - restart nova-conductor - - restart nova-novncproxy - - remove nova-sqlite-db diff --git a/compass/deploy/ansible/roles/nova-controller/templates/dhcp_agent.ini b/compass/deploy/ansible/roles/nova-controller/templates/dhcp_agent.ini deleted file mode 100644 index 19eb62e..0000000 --- a/compass/deploy/ansible/roles/nova-controller/templates/dhcp_agent.ini +++ /dev/null @@ -1,90 +0,0 @@ -[DEFAULT] -# Show debugging output in log (sets DEBUG log level output) -# debug = False -verbose = True - -# The DHCP agent will resync its state with Neutron to recover from any -# transient notification or rpc errors. The interval is number of -# seconds between attempts. -resync_interval = 5 - -# The DHCP agent requires an interface driver be set. Choose the one that best -# matches your plugin. -# interface_driver = - -# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP, -# BigSwitch/Floodlight) -interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver - -# Name of Open vSwitch bridge to use -# ovs_integration_bridge = br-int - -# Use veth for an OVS interface or not. -# Support kernels with limited namespace support -# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. -ovs_use_veth = False - -# Example of interface_driver option for LinuxBridge -# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver - -# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires -# no additional setup of the DHCP server. -dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq - -# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and -# iproute2 package that supports namespaces). -use_namespaces = True - -# The DHCP server can assist with providing metadata support on isolated -# networks. Setting this value to True will cause the DHCP server to append -# specific host routes to the DHCP request. The metadata service will only -# be activated when the subnet does not contain any router port. The guest -# instance must be configured to request host routes via DHCP (Option 121). -enable_isolated_metadata = False - -# Allows for serving metadata requests coming from a dedicated metadata -# access network whose cidr is 169.254.169.254/16 (or larger prefix), and -# is connected to a Neutron router from which the VMs send metadata -# request. In this case DHCP Option 121 will not be injected in VMs, as -# they will be able to reach 169.254.169.254 through a router. -# This option requires enable_isolated_metadata = True -enable_metadata_network = False - -# Number of threads to use during sync process. Should not exceed connection -# pool size configured on server. -# num_sync_threads = 4 - -# Location to store DHCP server config files -# dhcp_confs = $state_path/dhcp - -# Domain to use for building the hostnames -dhcp_domain = openstacklocal - -# Override the default dnsmasq settings with this file -# dnsmasq_config_file = -dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf - -# Comma-separated list of DNS servers which will be used by dnsmasq -# as forwarders. -# dnsmasq_dns_servers = - -# Limit number of leases to prevent a denial-of-service. -dnsmasq_lease_max = 16777216 - -# Location to DHCP lease relay UNIX domain socket -# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay - -# Location of Metadata Proxy UNIX domain socket -# metadata_proxy_socket = $state_path/metadata_proxy - -# dhcp_delete_namespaces, which is false by default, can be set to True if -# namespaces can be deleted cleanly on the host running the dhcp agent. -# Do not enable this until you understand the problem with the Linux iproute -# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and -# you are sure that your version of iproute does not suffer from the problem. -# If True, namespaces will be deleted when a dhcp server is disabled. -# dhcp_delete_namespaces = False - -# Timeout for ovs-vsctl commands. -# If the timeout expires, ovs commands will fail with ALARMCLOCK error. -# ovs_vsctl_timeout = 10 diff --git a/compass/deploy/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf b/compass/deploy/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf deleted file mode 100644 index 7bcbd9d..0000000 --- a/compass/deploy/ansible/roles/nova-controller/templates/dnsmasq-neutron.conf +++ /dev/null @@ -1,2 +0,0 @@ -dhcp-option-force=26,1454 - diff --git a/compass/deploy/ansible/roles/nova-controller/templates/etc/xorp/config.boot b/compass/deploy/ansible/roles/nova-controller/templates/etc/xorp/config.boot deleted file mode 100644 index 32caf96..0000000 --- a/compass/deploy/ansible/roles/nova-controller/templates/etc/xorp/config.boot +++ /dev/null @@ -1,25 +0,0 @@ -interfaces { - restore-original-config-on-shutdown: false - interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { - description: "Internal pNodes interface" - disable: false - default-system-config - } -} - -protocols { - igmp { - disable: false - interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { - vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} { - disable: false - version: 3 - } - } - traceoptions { - flag all { - disable: false - } - } - } -} diff --git a/compass/deploy/ansible/roles/nova-controller/templates/l3_agent.ini b/compass/deploy/ansible/roles/nova-controller/templates/l3_agent.ini deleted file mode 100644 index b394c00..0000000 --- a/compass/deploy/ansible/roles/nova-controller/templates/l3_agent.ini +++ /dev/null @@ -1,81 +0,0 @@ -[DEFAULT] -# Show debugging output in log (sets DEBUG log level output) -# debug = False -verbose = True - -# L3 requires that an interface driver be set. Choose the one that best -# matches your plugin. -# interface_driver = - -# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC) -# that supports L3 agent -# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver -interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver - -# Use veth for an OVS interface or not. -# Support kernels with limited namespace support -# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True. -# ovs_use_veth = False - -# Example of interface_driver option for LinuxBridge -# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver - -# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and -# iproute2 package that supports namespaces). -use_namespaces = True - -# If use_namespaces is set as False then the agent can only configure one router. - -# This is done by setting the specific router_id. -# router_id = - -# When external_network_bridge is set, each L3 agent can be associated -# with no more than one external network. This value should be set to the UUID -# of that external network. To allow L3 agent support multiple external -# networks, both the external_network_bridge and gateway_external_network_id -# must be left empty. -# gateway_external_network_id = - -# Indicates that this L3 agent should also handle routers that do not have -# an external network gateway configured. This option should be True only -# for a single agent in a Neutron deployment, and may be False for all agents -# if all routers must have an external network gateway -handle_internal_only_routers = True - -# Name of bridge used for external network traffic. This should be set to -# empty value for the linux bridge. when this parameter is set, each L3 agent -# can be associated with no more than one external network. -external_network_bridge = br-ex - -# TCP Port used by Neutron metadata server -metadata_port = 9697 - -# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0 -# to disable this feature. -send_arp_for_ha = 3 - -# seconds between re-sync routers' data if needed -periodic_interval = 40 - -# seconds to start to sync routers' data after -# starting agent -periodic_fuzzy_delay = 5 - -# enable_metadata_proxy, which is true by default, can be set to False -# if the Nova metadata server is not available -# enable_metadata_proxy = True - -# Location of Metadata Proxy UNIX domain socket -# metadata_proxy_socket = $state_path/metadata_proxy - -# router_delete_namespaces, which is false by default, can be set to True if -# namespaces can be deleted cleanly on the host running the L3 agent. -# Do not enable this until you understand the problem with the Linux iproute -# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and -# you are sure that your version of iproute does not suffer from the problem. -# If True, namespaces will be deleted when a router is destroyed. -# router_delete_namespaces = False - -# Timeout for ovs-vsctl commands. -# If the timeout expires, ovs commands will fail with ALARMCLOCK error. -# ovs_vsctl_timeout = 10 diff --git a/compass/deploy/ansible/roles/nova-controller/templates/metadata_agent.ini b/compass/deploy/ansible/roles/nova-controller/templates/metadata_agent.ini deleted file mode 100644 index 6badf28..0000000 --- a/compass/deploy/ansible/roles/nova-controller/templates/metadata_agent.ini +++ /dev/null @@ -1,46 +0,0 @@ -[DEFAULT] -# Show debugging output in log (sets DEBUG log level output) -debug = True - -# The Neutron user information for accessing the Neutron API. -auth_url = http://{{ HA_VIP }}:5000/v2.0 -auth_region = RegionOne -# Turn off verification of the certificate for ssl -# auth_insecure = False -# Certificate Authority public key (CA cert) file for ssl -# auth_ca_cert = -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} - -# Network service endpoint type to pull from the keystone catalog -# endpoint_type = adminURL - -# IP address used by Nova metadata server -nova_metadata_ip = {{ HA_VIP }} - -# TCP Port used by Nova metadata server -nova_metadata_port = 8775 - -# When proxying metadata requests, Neutron signs the Instance-ID header with a -# shared secret to prevent spoofing. You may select any string for a secret, -# but it must match here and in the configuration used by the Nova Metadata -# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret -metadata_proxy_shared_secret = {{ METADATA_SECRET }} - -# Location of Metadata Proxy UNIX domain socket -# metadata_proxy_socket = $state_path/metadata_proxy - -# Number of separate worker processes for metadata server -# metadata_workers = 0 - -# Number of backlog requests to configure the metadata server socket with -# metadata_backlog = 128 - -# URL to connect to the cache backend. -# Example of URL using memory caching backend -# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5 -# default_ttl=0 parameter will cause cache entries to never expire. -# Otherwise default_ttl specifies time in seconds a cache entry is valid for. -# No cache is used in case no value is passed. -# cache_url = diff --git a/compass/deploy/ansible/roles/nova-controller/templates/ml2_conf.ini b/compass/deploy/ansible/roles/nova-controller/templates/ml2_conf.ini deleted file mode 100644 index a790069..0000000 --- a/compass/deploy/ansible/roles/nova-controller/templates/ml2_conf.ini +++ /dev/null @@ -1,108 +0,0 @@ -[ml2] -# (ListOpt) List of network type driver entrypoints to be loaded from -# the neutron.ml2.type_drivers namespace. -# -# type_drivers = local,flat,vlan,gre,vxlan -# Example: type_drivers = flat,vlan,gre,vxlan -type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }} - -# (ListOpt) Ordered list of network_types to allocate as tenant -# networks. The default value 'local' is useful for single-box testing -# but provides no connectivity between hosts. -# -# tenant_network_types = local -# Example: tenant_network_types = vlan,gre,vxlan -tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }} - -# (ListOpt) Ordered list of networking mechanism driver entrypoints -# to be loaded from the neutron.ml2.mechanism_drivers namespace. -# mechanism_drivers = -# Example: mechanism_drivers = openvswitch,mlnx -# Example: mechanism_drivers = arista -# Example: mechanism_drivers = cisco,logger -# Example: mechanism_drivers = openvswitch,brocade -# Example: mechanism_drivers = linuxbridge,brocade -mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }} - -[ml2_type_flat] -# (ListOpt) List of physical_network names with which flat networks -# can be created. Use * to allow flat networks with arbitrary -# physical_network names. -# -flat_networks = external -# Example:flat_networks = physnet1,physnet2 -# Example:flat_networks = * - -[ml2_type_vlan] -# (ListOpt) List of [::] tuples -# specifying physical_network names usable for VLAN provider and -# tenant networks, as well as ranges of VLAN tags on each -# physical_network available for allocation as tenant networks. -# -network_vlan_ranges = -# Example: network_vlan_ranges = physnet1:1000:2999,physnet2 - -[ml2_type_gre] -# (ListOpt) Comma-separated list of : tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation -tunnel_id_ranges = 1:1000 - -[ml2_type_vxlan] -# (ListOpt) Comma-separated list of : tuples enumerating -# ranges of VXLAN VNI IDs that are available for tenant network allocation. -# -vni_ranges = 1001:4095 - -# (StrOpt) Multicast group for the VXLAN interface. When configured, will -# enable sending all broadcast traffic to this multicast group. When left -# unconfigured, will disable multicast VXLAN mode. -# -vxlan_group = 239.1.1.1 -# Example: vxlan_group = 239.1.1.1 - -[securitygroup] -# Controls if neutron security group is enabled or not. -# It should be false when you use nova security group. -# enable_security_group = True -firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver -enable_security_group = True - -[database] -connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron?charset=utf8 - -[ovs] -local_ip = {{ internal_ip }} -{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %} -integration_bridge = br-int -tunnel_bridge = br-tun -tunnel_id_ranges = 1001:4095 -tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }} -bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }} -{% endif %} - -[agent] -root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf -tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }} -{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %} -vxlan_udp_port = 4789 -{% endif %} -l2_population = False - -[odl] -{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} -network_vlan_ranges = 1001:4095 -tunnel_id_ranges = 1001:4095 -tun_peer_patch_port = patch-int -int_peer_patch_port = patch-tun -tenant_network_type = vxlan -tunnel_bridge = br-tun -integration_bridge = br-int -controllers = 10.1.0.15:8080:admin:admin -{% endif %} - -[ml2_odl] -{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %} -username = {{ odl_username }} -password = {{ odl_password }} -url = http://{{ controller }}:{{ odl_api_port }}/controller/nb/v2/neutron -{% endif %} - diff --git a/compass/deploy/ansible/roles/nova-controller/templates/neutron-network.conf b/compass/deploy/ansible/roles/nova-controller/templates/neutron-network.conf deleted file mode 100644 index 93be9cb..0000000 --- a/compass/deploy/ansible/roles/nova-controller/templates/neutron-network.conf +++ /dev/null @@ -1,465 +0,0 @@ -[DEFAULT] -# Print more verbose output (set logging level to INFO instead of default WARNING level). -verbose = {{ VERBOSE }} - -# Print debugging output (set logging level to DEBUG instead of default WARNING level). -debug = {{ DEBUG }} - -# Where to store Neutron state files. This directory must be writable by the -# user executing the agent. -state_path = /var/lib/neutron - -# Where to store lock files -lock_path = $state_path/lock - -# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s -# log_date_format = %Y-%m-%d %H:%M:%S - -# use_syslog -> syslog -# log_file and log_dir -> log_dir/log_file -# (not log_file) and log_dir -> log_dir/{binary_name}.log -# use_stderr -> stderr -# (not user_stderr) and (not log_file) -> stdout -# publish_errors -> notification system - -# use_syslog = False -# syslog_log_facility = LOG_USER - -# use_stderr = True -# log_file = -log_dir = /var/log/neutron - -# publish_errors = False - -# Address to bind the API server to -bind_host = {{ network_server_host }} - -# Port the bind the API server to -bind_port = 9696 - -# Path to the extensions. Note that this can be a colon-separated list of -# paths. For example: -# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions -# The __path__ of neutron.extensions is appended to this, so if your -# extensions are in there you don't need to specify them here -# api_extensions_path = - -# (StrOpt) Neutron core plugin entrypoint to be loaded from the -# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the -# plugins included in the neutron source distribution. For compatibility with -# previous versions, the class name of a plugin can be specified instead of its -# entrypoint name. -# -#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin -core_plugin = ml2 -# Example: core_plugin = ml2 - -# (ListOpt) List of service plugin entrypoints to be loaded from the -# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of -# the plugins included in the neutron source distribution. For compatibility -# with previous versions, the class name of a plugin can be specified instead -# of its entrypoint name. -# -# service_plugins = -# Example: service_plugins = router,firewall,lbaas,vpnaas,metering -service_plugins = router - -# Paste configuration file -api_paste_config = api-paste.ini - -# The strategy to be used for auth. -# Supported values are 'keystone'(default), 'noauth'. -auth_strategy = keystone - -# Base MAC address. The first 3 octets will remain unchanged. If the -# 4h octet is not 00, it will also be used. The others will be -# randomly generated. -# 3 octet -# base_mac = fa:16:3e:00:00:00 -# 4 octet -# base_mac = fa:16:3e:4f:00:00 - -# Maximum amount of retries to generate a unique MAC address -# mac_generation_retries = 16 - -# DHCP Lease duration (in seconds) -dhcp_lease_duration = 86400 - -# Allow sending resource operation notification to DHCP agent -# dhcp_agent_notification = True - -# Enable or disable bulk create/update/delete operations -# allow_bulk = True -# Enable or disable pagination -# allow_pagination = False -# Enable or disable sorting -# allow_sorting = False -# Enable or disable overlapping IPs for subnets -# Attention: the following parameter MUST be set to False if Neutron is -# being used in conjunction with nova security groups -allow_overlapping_ips = True -# Ensure that configured gateway is on subnet -# force_gateway_on_subnet = False - - -# RPC configuration options. Defined in rpc __init__ -# The messaging module to use, defaults to kombu. -# rpc_backend = neutron.openstack.common.rpc.impl_kombu -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_password = {{ RABBIT_PASS }} - -# Size of RPC thread pool -rpc_thread_pool_size = 240 -# Size of RPC connection pool -rpc_conn_pool_size = 100 -# Seconds to wait for a response from call or multicall -rpc_response_timeout = 300 -# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. -rpc_cast_timeout = 300 -# Modules of exceptions that are permitted to be recreated -# upon receiving exception data from an rpc call. -# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception -# AMQP exchange to connect to if using RabbitMQ or QPID -# control_exchange = neutron - -# If passed, use a fake RabbitMQ provider -# fake_rabbit = False - -# Configuration options if sending notifications via kombu rpc (these are -# the defaults) -# SSL version to use (valid only if SSL enabled) -# kombu_ssl_version = -# SSL key file (valid only if SSL enabled) -# kombu_ssl_keyfile = -# SSL cert file (valid only if SSL enabled) -# kombu_ssl_certfile = -# SSL certification authority file (valid only if SSL enabled) -# kombu_ssl_ca_certs = -# Port where RabbitMQ server is running/listening -rabbit_port = 5672 -# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' -# rabbit_hosts = localhost:5672 -# User ID used for RabbitMQ connections -rabbit_userid = {{ RABBIT_USER }} -# Location of a virtual RabbitMQ installation. -# rabbit_virtual_host = / -# Maximum retries with trying to connect to RabbitMQ -# (the default of 0 implies an infinite retry count) -# rabbit_max_retries = 0 -# RabbitMQ connection retry interval -# rabbit_retry_interval = 1 -# Use HA queues in RabbitMQ (x-ha-policy: all). You need to -# wipe RabbitMQ database when changing this option. (boolean value) -# rabbit_ha_queues = false -# QPID -# rpc_backend=neutron.openstack.common.rpc.impl_qpid -# Qpid broker hostname -# qpid_hostname = localhost -# Qpid broker port -# qpid_port = 5672 -# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' -# qpid_hosts = localhost:5672 -# Username for qpid connection -# qpid_username = '' -# Password for qpid connection -# qpid_password = '' -# Space separated list of SASL mechanisms to use for auth -# qpid_sasl_mechanisms = '' -# Seconds between connection keepalive heartbeats -# qpid_heartbeat = 60 -# Transport to use, either 'tcp' or 'ssl' -# qpid_protocol = tcp -# Disable Nagle algorithm -# qpid_tcp_nodelay = True - -# ZMQ -# rpc_backend=neutron.openstack.common.rpc.impl_zmq -# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. -# The "host" option should point or resolve to this address. -# rpc_zmq_bind_address = * - -# ============ Notification System Options ===================== - -# Notifications can be sent when network/subnet/port are created, updated or deleted. -# There are three methods of sending notifications: logging (via the -# log_file directive), rpc (via a message queue) and -# noop (no notifications sent, the default) - -# Notification_driver can be defined multiple times -# Do nothing driver -# notification_driver = neutron.openstack.common.notifier.no_op_notifier -# Logging driver -# notification_driver = neutron.openstack.common.notifier.log_notifier -# RPC driver. -notification_driver = neutron.openstack.common.notifier.rpc_notifier - -# default_notification_level is used to form actual topic name(s) or to set logging level -default_notification_level = INFO - -# default_publisher_id is a part of the notification payload -# host = myhost.com -# default_publisher_id = $host - -# Defined in rpc_notifier, can be comma separated values. -# The actual topic names will be %s.%(default_notification_level)s -notification_topics = notifications - -# Default maximum number of items returned in a single response, -# value == infinite and value < 0 means no max limit, and value must -# be greater than 0. If the number of items requested is greater than -# pagination_max_limit, server will just return pagination_max_limit -# of number of items. -# pagination_max_limit = -1 - -# Maximum number of DNS nameservers per subnet -# max_dns_nameservers = 5 - -# Maximum number of host routes per subnet -# max_subnet_host_routes = 20 - -# Maximum number of fixed ips per port -# max_fixed_ips_per_port = 5 - -# =========== items for agent management extension ============= -# Seconds to regard the agent as down; should be at least twice -# report_interval, to be sure the agent is down for good -agent_down_time = 75 -# =========== end of items for agent management extension ===== - -# =========== items for agent scheduler extension ============= -# Driver to use for scheduling network to DHCP agent -network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler -# Driver to use for scheduling router to a default L3 agent -router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler -# Driver to use for scheduling a loadbalancer pool to an lbaas agent -# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler - -# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted -# networks to first DHCP agent which sends get_active_networks message to -# neutron server -# network_auto_schedule = True - -# Allow auto scheduling routers to L3 agent. It will schedule non-hosted -# routers to first L3 agent which sends sync_routers message to neutron server -# router_auto_schedule = True - -# Number of DHCP agents scheduled to host a network. This enables redundant -# DHCP agents for configured networks. -# dhcp_agents_per_network = 1 - -# =========== end of items for agent scheduler extension ===== - -# =========== WSGI parameters related to the API server ============== -# Number of separate worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as workers. The parent process manages them. -api_workers = 8 - -# Number of separate RPC worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as RPC workers. The parent process manages them. -# This feature is experimental until issues are addressed and testing has been -# enabled for various plugins for compatibility. -rpc_workers = 8 - -# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when -# starting API server. Not supported on OS X. -# tcp_keepidle = 600 - -# Number of seconds to keep retrying to listen -# retry_until_window = 30 - -# Number of backlog requests to configure the socket with. -# backlog = 4096 - -# Max header line to accommodate large tokens -# max_header_line = 16384 - -# Enable SSL on the API server -# use_ssl = False - -# Certificate file to use when starting API server securely -# ssl_cert_file = /path/to/certfile - -# Private key file to use when starting API server securely -# ssl_key_file = /path/to/keyfile - -# CA certificate file to use when starting API server securely to -# verify connecting clients. This is an optional parameter only required if -# API clients need to authenticate to the API server using SSL certificates -# signed by a trusted CA -# ssl_ca_file = /path/to/cafile -# ======== end of WSGI parameters related to the API server ========== - - -# ======== neutron nova interactions ========== -# Send notification to nova when port status is active. -notify_nova_on_port_status_changes = True - -# Send notifications to nova when port data (fixed_ips/floatingips) change -# so nova can update it's cache. -notify_nova_on_port_data_changes = True - -# URL for connection to nova (Only supports one nova region currently). -nova_url = http://{{ HA_VIP }}:8774/v2 - -# Name of nova region to use. Useful if keystone manages more than one region -nova_region_name = RegionOne - -# Username for connection to nova in admin context -nova_admin_username = nova - -# The uuid of the admin nova tenant - -# Password for connection to nova in admin context. -nova_admin_password = {{ NOVA_PASS }} - -# Authorization URL for connection to nova in admin context. -nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 - -# Number of seconds between sending events to nova if there are any events to send -send_events_interval = 2 - -# ======== end of neutron nova interactions ========== - -[quotas] -# Default driver to use for quota checks -quota_driver = neutron.db.quota_db.DbQuotaDriver - -# Resource name(s) that are supported in quota features -quota_items = network,subnet,port - -# Default number of resource allowed per tenant. A negative value means -# unlimited. -default_quota = -1 - -# Number of networks allowed per tenant. A negative value means unlimited. -quota_network = 100 - -# Number of subnets allowed per tenant. A negative value means unlimited. -quota_subnet = 100 - -# Number of ports allowed per tenant. A negative value means unlimited. -quota_port = 8000 - -# Number of security groups allowed per tenant. A negative value means -# unlimited. -quota_security_group = 1000 - -# Number of security group rules allowed per tenant. A negative value means -# unlimited. -quota_security_group_rule = 1000 - -# Number of vips allowed per tenant. A negative value means unlimited. -# quota_vip = 10 - -# Number of pools allowed per tenant. A negative value means unlimited. -# quota_pool = 10 - -# Number of pool members allowed per tenant. A negative value means unlimited. -# The default is unlimited because a member is not a real resource consumer -# on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_member = -1 - -# Number of health monitors allowed per tenant. A negative value means -# unlimited. -# The default is unlimited because a health monitor is not a real resource -# consumer on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_health_monitors = -1 - -# Number of routers allowed per tenant. A negative value means unlimited. -# quota_router = 10 - -# Number of floating IPs allowed per tenant. A negative value means unlimited. -# quota_floatingip = 50 - -[agent] -# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real -# root filter facility. -# Change to "sudo" to skip the filtering and just run the comand directly -root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" - -# =========== items for agent management extension ============= -# seconds between nodes reporting state to server; should be less than -# agent_down_time, best if it is half or less than agent_down_time -report_interval = 30 - -# =========== end of items for agent management extension ===== - -[keystone_authtoken] -auth_uri = http://{{ HA_VIP }}:5000/v2.0 -identity_uri = http://{{ HA_VIP }}:35357 -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} -signing_dir = $state_path/keystone-signing - -[database] -# This line MUST be changed to actually run the plugin. -# Example: -# connection = mysql://root:pass@127.0.0.1:3306/neutron -# Replace 127.0.0.1 above with the IP address of the database used by the -# main neutron server. (Leave it as is if the database runs on this host.) -# connection = sqlite:////var/lib/neutron/neutron.sqlite -#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron - -# The SQLAlchemy connection string used to connect to the slave database -slave_connection = - -# Database reconnection retry times - in event connectivity is lost -# set to -1 implies an infinite retry count -max_retries = 10 - -# Database reconnection interval in seconds - if the initial connection to the -# database fails -retry_interval = 10 - -# Minimum number of SQL connections to keep open in a pool -min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -max_pool_size = 100 - -# Timeout in seconds before idle sql connections are reaped -idle_timeout = 3600 - -# If set, use this value for max_overflow with sqlalchemy -max_overflow = 100 - -# Verbosity of SQL debugging information. 0=None, 100=Everything -connection_debug = 0 - -# Add python stack traces to SQL as comment strings -connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -pool_timeout = 10 - -[service_providers] -# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. -# Must be in form: -# service_provider=::[:default] -# List of allowed service types includes LOADBALANCER, FIREWALL, VPN -# Combination of and must be unique; must also be unique -# This is multiline option, example for default provider: -# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default -# example of non-default provider: -# service_provider=FIREWALL:name2:firewall_driver_path -# --- Reference implementations --- -service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default -service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default -# In order to activate Radware's lbaas driver you need to uncomment the next line. -# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. -# Otherwise comment the HA Proxy line -# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default -# uncomment the following line to make the 'netscaler' LBaaS provider available. -# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver -# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. -# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default -# Uncomment the line below to use Embrane heleos as Load Balancer service provider. -# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/compass/deploy/ansible/roles/nova-controller/templates/neutron.conf b/compass/deploy/ansible/roles/nova-controller/templates/neutron.conf deleted file mode 100644 index 1575367..0000000 --- a/compass/deploy/ansible/roles/nova-controller/templates/neutron.conf +++ /dev/null @@ -1,466 +0,0 @@ -[DEFAULT] -# Print more verbose output (set logging level to INFO instead of default WARNING level). -verbose = {{ VERBOSE }} - -# Print debugging output (set logging level to DEBUG instead of default WARNING level). -debug = {{ VERBOSE }} - -# Where to store Neutron state files. This directory must be writable by the -# user executing the agent. -state_path = /var/lib/neutron - -# Where to store lock files -lock_path = $state_path/lock - -# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s -# log_date_format = %Y-%m-%d %H:%M:%S - -# use_syslog -> syslog -# log_file and log_dir -> log_dir/log_file -# (not log_file) and log_dir -> log_dir/{binary_name}.log -# use_stderr -> stderr -# (not user_stderr) and (not log_file) -> stdout -# publish_errors -> notification system - -# use_syslog = False -# syslog_log_facility = LOG_USER - -# use_stderr = True -# log_file = -log_dir = /var/log/neutron - -# publish_errors = False - -# Address to bind the API server to -bind_host = {{ network_server_host }} - -# Port the bind the API server to -bind_port = 9696 - -# Path to the extensions. Note that this can be a colon-separated list of -# paths. For example: -# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions -# The __path__ of neutron.extensions is appended to this, so if your -# extensions are in there you don't need to specify them here -# api_extensions_path = - -# (StrOpt) Neutron core plugin entrypoint to be loaded from the -# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the -# plugins included in the neutron source distribution. For compatibility with -# previous versions, the class name of a plugin can be specified instead of its -# entrypoint name. -# -#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin -core_plugin = ml2 -# Example: core_plugin = ml2 - -# (ListOpt) List of service plugin entrypoints to be loaded from the -# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of -# the plugins included in the neutron source distribution. For compatibility -# with previous versions, the class name of a plugin can be specified instead -# of its entrypoint name. -# -# service_plugins = -# Example: service_plugins = router,firewall,lbaas,vpnaas,metering -service_plugins = router - -# Paste configuration file -api_paste_config = api-paste.ini - -# The strategy to be used for auth. -# Supported values are 'keystone'(default), 'noauth'. -auth_strategy = keystone - -# Base MAC address. The first 3 octets will remain unchanged. If the -# 4h octet is not 00, it will also be used. The others will be -# randomly generated. -# 3 octet -# base_mac = fa:16:3e:00:00:00 -# 4 octet -# base_mac = fa:16:3e:4f:00:00 - -# Maximum amount of retries to generate a unique MAC address -# mac_generation_retries = 16 - -# DHCP Lease duration (in seconds) -dhcp_lease_duration = 86400 - -# Allow sending resource operation notification to DHCP agent -# dhcp_agent_notification = True - -# Enable or disable bulk create/update/delete operations -# allow_bulk = True -# Enable or disable pagination -# allow_pagination = False -# Enable or disable sorting -# allow_sorting = False -# Enable or disable overlapping IPs for subnets -# Attention: the following parameter MUST be set to False if Neutron is -# being used in conjunction with nova security groups -allow_overlapping_ips = True -# Ensure that configured gateway is on subnet -# force_gateway_on_subnet = False - - -# RPC configuration options. Defined in rpc __init__ -# The messaging module to use, defaults to kombu. -# rpc_backend = neutron.openstack.common.rpc.impl_kombu -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_password = {{ RABBIT_PASS }} - -# Size of RPC thread pool -rpc_thread_pool_size = 240 -# Size of RPC connection pool -rpc_conn_pool_size = 100 -# Seconds to wait for a response from call or multicall -rpc_response_timeout = 300 -# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. -rpc_cast_timeout = 300 -# Modules of exceptions that are permitted to be recreated -# upon receiving exception data from an rpc call. -# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception -# AMQP exchange to connect to if using RabbitMQ or QPID -# control_exchange = neutron - -# If passed, use a fake RabbitMQ provider -# fake_rabbit = False - -# Configuration options if sending notifications via kombu rpc (these are -# the defaults) -# SSL version to use (valid only if SSL enabled) -# kombu_ssl_version = -# SSL key file (valid only if SSL enabled) -# kombu_ssl_keyfile = -# SSL cert file (valid only if SSL enabled) -# kombu_ssl_certfile = -# SSL certification authority file (valid only if SSL enabled) -# kombu_ssl_ca_certs = -# Port where RabbitMQ server is running/listening -rabbit_port = 5672 -# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port' -# rabbit_hosts = localhost:5672 -# User ID used for RabbitMQ connections -rabbit_userid = {{ RABBIT_USER }} -# Location of a virtual RabbitMQ installation. -# rabbit_virtual_host = / -# Maximum retries with trying to connect to RabbitMQ -# (the default of 0 implies an infinite retry count) -# rabbit_max_retries = 0 -# RabbitMQ connection retry interval -# rabbit_retry_interval = 1 -# Use HA queues in RabbitMQ (x-ha-policy: all). You need to -# wipe RabbitMQ database when changing this option. (boolean value) -# rabbit_ha_queues = false -# QPID -# rpc_backend=neutron.openstack.common.rpc.impl_qpid -# Qpid broker hostname -# qpid_hostname = localhost -# Qpid broker port -# qpid_port = 5672 -# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672) -# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port' -# qpid_hosts = localhost:5672 -# Username for qpid connection -# qpid_username = '' -# Password for qpid connection -# qpid_password = '' -# Space separated list of SASL mechanisms to use for auth -# qpid_sasl_mechanisms = '' -# Seconds between connection keepalive heartbeats -# qpid_heartbeat = 60 -# Transport to use, either 'tcp' or 'ssl' -# qpid_protocol = tcp -# Disable Nagle algorithm -# qpid_tcp_nodelay = True - -# ZMQ -# rpc_backend=neutron.openstack.common.rpc.impl_zmq -# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. -# The "host" option should point or resolve to this address. -# rpc_zmq_bind_address = * - -# ============ Notification System Options ===================== - -# Notifications can be sent when network/subnet/port are created, updated or deleted. -# There are three methods of sending notifications: logging (via the -# log_file directive), rpc (via a message queue) and -# noop (no notifications sent, the default) - -# Notification_driver can be defined multiple times -# Do nothing driver -# notification_driver = neutron.openstack.common.notifier.no_op_notifier -# Logging driver -# notification_driver = neutron.openstack.common.notifier.log_notifier -# RPC driver. -notification_driver = neutron.openstack.common.notifier.rpc_notifier - -# default_notification_level is used to form actual topic name(s) or to set logging level -default_notification_level = INFO - -# default_publisher_id is a part of the notification payload -# host = myhost.com -# default_publisher_id = $host - -# Defined in rpc_notifier, can be comma separated values. -# The actual topic names will be %s.%(default_notification_level)s -notification_topics = notifications - -# Default maximum number of items returned in a single response, -# value == infinite and value < 0 means no max limit, and value must -# be greater than 0. If the number of items requested is greater than -# pagination_max_limit, server will just return pagination_max_limit -# of number of items. -# pagination_max_limit = -1 - -# Maximum number of DNS nameservers per subnet -# max_dns_nameservers = 5 - -# Maximum number of host routes per subnet -# max_subnet_host_routes = 20 - -# Maximum number of fixed ips per port -# max_fixed_ips_per_port = 5 - -# =========== items for agent management extension ============= -# Seconds to regard the agent as down; should be at least twice -# report_interval, to be sure the agent is down for good -agent_down_time = 75 -# =========== end of items for agent management extension ===== - -# =========== items for agent scheduler extension ============= -# Driver to use for scheduling network to DHCP agent -network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler -# Driver to use for scheduling router to a default L3 agent -router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler -# Driver to use for scheduling a loadbalancer pool to an lbaas agent -# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler - -# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted -# networks to first DHCP agent which sends get_active_networks message to -# neutron server -# network_auto_schedule = True - -# Allow auto scheduling routers to L3 agent. It will schedule non-hosted -# routers to first L3 agent which sends sync_routers message to neutron server -# router_auto_schedule = True - -# Number of DHCP agents scheduled to host a network. This enables redundant -# DHCP agents for configured networks. -# dhcp_agents_per_network = 1 - -# =========== end of items for agent scheduler extension ===== - -# =========== WSGI parameters related to the API server ============== -# Number of separate worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as workers. The parent process manages them. -api_workers = 8 - -# Number of separate RPC worker processes to spawn. The default, 0, runs the -# worker thread in the current process. Greater than 0 launches that number of -# child processes as RPC workers. The parent process manages them. -# This feature is experimental until issues are addressed and testing has been -# enabled for various plugins for compatibility. -rpc_workers = 8 - -# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when -# starting API server. Not supported on OS X. -# tcp_keepidle = 600 - -# Number of seconds to keep retrying to listen -# retry_until_window = 30 - -# Number of backlog requests to configure the socket with. -# backlog = 4096 - -# Max header line to accommodate large tokens -# max_header_line = 16384 - -# Enable SSL on the API server -# use_ssl = False - -# Certificate file to use when starting API server securely -# ssl_cert_file = /path/to/certfile - -# Private key file to use when starting API server securely -# ssl_key_file = /path/to/keyfile - -# CA certificate file to use when starting API server securely to -# verify connecting clients. This is an optional parameter only required if -# API clients need to authenticate to the API server using SSL certificates -# signed by a trusted CA -# ssl_ca_file = /path/to/cafile -# ======== end of WSGI parameters related to the API server ========== - - -# ======== neutron nova interactions ========== -# Send notification to nova when port status is active. -notify_nova_on_port_status_changes = True - -# Send notifications to nova when port data (fixed_ips/floatingips) change -# so nova can update it's cache. -notify_nova_on_port_data_changes = True - -# URL for connection to nova (Only supports one nova region currently). -nova_url = http://{{ HA_VIP }}:8774/v2 - -# Name of nova region to use. Useful if keystone manages more than one region -nova_region_name = RegionOne - -# Username for connection to nova in admin context -nova_admin_username = nova - -# The uuid of the admin nova tenant -nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }} - -# Password for connection to nova in admin context. -nova_admin_password = {{ NOVA_PASS }} - -# Authorization URL for connection to nova in admin context. -nova_admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 - -# Number of seconds between sending events to nova if there are any events to send -send_events_interval = 2 - -# ======== end of neutron nova interactions ========== - -[quotas] -# Default driver to use for quota checks -quota_driver = neutron.db.quota_db.DbQuotaDriver - -# Resource name(s) that are supported in quota features -quota_items = network,subnet,port - -# Default number of resource allowed per tenant. A negative value means -# unlimited. -default_quota = -1 - -# Number of networks allowed per tenant. A negative value means unlimited. -quota_network = 100 - -# Number of subnets allowed per tenant. A negative value means unlimited. -quota_subnet = 100 - -# Number of ports allowed per tenant. A negative value means unlimited. -quota_port = 8000 - -# Number of security groups allowed per tenant. A negative value means -# unlimited. -quota_security_group = 1000 - -# Number of security group rules allowed per tenant. A negative value means -# unlimited. -quota_security_group_rule = 1000 - -# Number of vips allowed per tenant. A negative value means unlimited. -# quota_vip = 10 - -# Number of pools allowed per tenant. A negative value means unlimited. -# quota_pool = 10 - -# Number of pool members allowed per tenant. A negative value means unlimited. -# The default is unlimited because a member is not a real resource consumer -# on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_member = -1 - -# Number of health monitors allowed per tenant. A negative value means -# unlimited. -# The default is unlimited because a health monitor is not a real resource -# consumer on Openstack. However, on back-end, a member is a resource consumer -# and that is the reason why quota is possible. -# quota_health_monitors = -1 - -# Number of routers allowed per tenant. A negative value means unlimited. -# quota_router = 10 - -# Number of floating IPs allowed per tenant. A negative value means unlimited. -# quota_floatingip = 50 - -[agent] -# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real -# root filter facility. -# Change to "sudo" to skip the filtering and just run the comand directly -root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" - -# =========== items for agent management extension ============= -# seconds between nodes reporting state to server; should be less than -# agent_down_time, best if it is half or less than agent_down_time -report_interval = 30 - -# =========== end of items for agent management extension ===== - -[keystone_authtoken] -auth_uri = http://{{ HA_VIP }}:5000/v2.0 -identity_uri = http://{{ HA_VIP }}:35357 -admin_tenant_name = service -admin_user = neutron -admin_password = {{ NEUTRON_PASS }} -signing_dir = $state_path/keystone-signing - -[database] -# This line MUST be changed to actually run the plugin. -# Example: -# connection = mysql://root:pass@127.0.0.1:3306/neutron -# Replace 127.0.0.1 above with the IP address of the database used by the -# main neutron server. (Leave it as is if the database runs on this host.) -# connection = sqlite:////var/lib/neutron/neutron.sqlite -#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron - -# The SQLAlchemy connection string used to connect to the slave database -slave_connection = - -# Database reconnection retry times - in event connectivity is lost -# set to -1 implies an infinite retry count -max_retries = 10 - -# Database reconnection interval in seconds - if the initial connection to the -# database fails -retry_interval = 10 - -# Minimum number of SQL connections to keep open in a pool -min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool -max_pool_size = 100 - -# Timeout in seconds before idle sql connections are reaped -idle_timeout = 3600 - -# If set, use this value for max_overflow with sqlalchemy -max_overflow = 100 - -# Verbosity of SQL debugging information. 0=None, 100=Everything -connection_debug = 0 - -# Add python stack traces to SQL as comment strings -connection_trace = False - -# If set, use this value for pool_timeout with sqlalchemy -pool_timeout = 10 - -[service_providers] -# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall. -# Must be in form: -# service_provider=::[:default] -# List of allowed service types includes LOADBALANCER, FIREWALL, VPN -# Combination of and must be unique; must also be unique -# This is multiline option, example for default provider: -# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default -# example of non-default provider: -# service_provider=FIREWALL:name2:firewall_driver_path -# --- Reference implementations --- -service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default -service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default -# In order to activate Radware's lbaas driver you need to uncomment the next line. -# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below. -# Otherwise comment the HA Proxy line -# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default -# uncomment the following line to make the 'netscaler' LBaaS provider available. -# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver -# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver. -# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default -# Uncomment the line below to use Embrane heleos as Load Balancer service provider. -# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default diff --git a/compass/deploy/ansible/roles/nova-controller/templates/neutron_init.sh b/compass/deploy/ansible/roles/nova-controller/templates/neutron_init.sh deleted file mode 100644 index b92e202..0000000 --- a/compass/deploy/ansible/roles/nova-controller/templates/neutron_init.sh +++ /dev/null @@ -1,4 +0,0 @@ -# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True - -# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}} - diff --git a/compass/deploy/ansible/roles/nova-controller/templates/nova.conf b/compass/deploy/ansible/roles/nova-controller/templates/nova.conf deleted file mode 100644 index c8991a3..0000000 --- a/compass/deploy/ansible/roles/nova-controller/templates/nova.conf +++ /dev/null @@ -1,72 +0,0 @@ -[DEFAULT] -dhcpbridge_flagfile=/etc/nova/nova.conf -dhcpbridge=/usr/bin/nova-dhcpbridge -logdir=/var/log/nova -state_path=/var/lib/nova -lock_path=/var/lock/nova -force_dhcp_release=True -iscsi_helper=tgtadm -libvirt_use_virtio_for_bridges=True -connection_type=libvirt -root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf -verbose={{ VERBOSE}} -debug={{ DEBUG }} -ec2_private_dns_show_ip=True -api_paste_config=/etc/nova/api-paste.ini -volumes_path=/var/lib/nova/volumes -enabled_apis=osapi_compute,metadata - -vif_plugging_is_fatal: false -vif_plugging_timeout: 0 - -auth_strategy = keystone - -rpc_backend = rabbit -rabbit_host = {{ rabbit_host }} -rabbit_userid = {{ RABBIT_USER }} -rabbit_password = {{ RABBIT_PASS }} - -osapi_compute_listen={{ internal_ip }} -metadata_listen={{ internal_ip }} - -my_ip = {{ internal_ip }} -vnc_enabled = True -vncserver_listen = {{ internal_ip }} -vncserver_proxyclient_address = {{ internal_ip }} -novncproxy_base_url = http://{{ HA_VIP }}:6080/vnc_auto.html - -novncproxy_host = {{ internal_ip }} -novncproxy_port = 6080 - -network_api_class = nova.network.neutronv2.api.API -linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver -firewall_driver = nova.virt.firewall.NoopFirewallDriver -security_group_api = neutron - -instance_usage_audit = True -instance_usage_audit_period = hour -notify_on_state_change = vm_and_task_state -notification_driver = nova.openstack.common.notifier.rpc_notifier -notification_driver = ceilometer.compute.nova_notifier - -[database] -# The SQLAlchemy connection string used to connect to the database -connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova - -[keystone_authtoken] -auth_uri = http://{{ HA_VIP }}:5000/2.0 -identity_uri = http://{{ HA_VIP }}:35357 -admin_tenant_name = service -admin_user = nova -admin_password = {{ NOVA_PASS }} - -[glance] -host = {{ HA_VIP }} - -[neutron] -url = http://{{ HA_VIP }}:9696 -auth_strategy = keystone -admin_tenant_name = service -admin_username = neutron -admin_password = {{ NEUTRON_PASS }} -admin_auth_url = http://{{ HA_VIP }}:35357/v2.0 diff --git a/compass/deploy/ansible/roles/repo/tasks/main.yml b/compass/deploy/ansible/roles/repo/tasks/main.yml deleted file mode 100644 index 9476f80..0000000 --- a/compass/deploy/ansible/roles/repo/tasks/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: add juno cloudarchive - apt_repository: repo="{{ juno_cloud_archive }}" state=present - -- name: first update pkgs - apt: update_cache=yes diff --git a/compass/deploy/ansible/roles/repo/templates/sources.list b/compass/deploy/ansible/roles/repo/templates/sources.list deleted file mode 100644 index 8b062e7..0000000 --- a/compass/deploy/ansible/roles/repo/templates/sources.list +++ /dev/null @@ -1 +0,0 @@ -{{ LOCAL_REPO }} diff --git a/compass/deploy/compass_vm.sh b/compass/deploy/compass_vm.sh deleted file mode 100644 index 0764917..0000000 --- a/compass/deploy/compass_vm.sh +++ /dev/null @@ -1,103 +0,0 @@ -compass_vm_dir=$WORK_DIR/vm/compass -rsa_file=$compass_vm_dir/boot.rsa -ssh_args="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i $rsa_file" -function tear_down_compass() { - sudo virsh destroy compass > /dev/null 2>&1 - sudo virsh undefine compass > /dev/null 2>&1 - - sudo umount $compass_vm_dir/old > /dev/null 2>&1 - sudo umount $compass_vm_dir/new > /dev/null 2>&1 - - sudo rm -rf $compass_vm_dir - - log_info "tear_down_compass success!!!" -} - -function install_compass_core() { - local inventory_file=$compass_vm_dir/inventory.file - log_info "install_compass_core enter" - sed -i "s/mgmt_next_ip:.*/mgmt_next_ip: ${COMPASS_SERVER}/g" $WORK_DIR/installer/compass-install/install/group_vars/all - echo "compass_nodocker ansible_ssh_host=$MGMT_IP ansible_ssh_port=22" > $inventory_file - PYTHONUNBUFFERED=1 ANSIBLE_FORCE_COLOR=true ANSIBLE_HOST_KEY_CHECKING=false ANSIBLE_SSH_ARGS='-o UserKnownHostsFile=/dev/null -o ControlMaster=auto -o ControlPersist=60s' ansible-playbook -e pipeline=true --private-key=$rsa_file --user=root --connection=ssh --inventory-file=$inventory_file $WORK_DIR/installer/compass-install/install/compass_nodocker.yml - exit_status=$? - rm $inventory_file - log_info "install_compass_core exit" - if [[ $exit_status != 0 ]];then - /bin/false - fi -} - -function wait_ok() { - log_info "wait_compass_ok enter" - retry=0 - until timeout 1s ssh $ssh_args root@$MGMT_IP "exit" 2>/dev/null - do - log_progress "os install time used: $((retry*100/$1))%" - sleep 1 - let retry+=1 - if [[ $retry -ge $1 ]];then - log_error "os install time out" - tear_down_compass - exit 1 - fi - done - - log_warn "os install time used: 100%" - log_info "wait_compass_ok exit" -} - -function launch_compass() { - local old_mnt=$compass_vm_dir/old - local new_mnt=$compass_vm_dir/new - local old_iso=$WORK_DIR/iso/centos.iso - local new_iso=$compass_vm_dir/centos.iso - - log_info "launch_compass enter" - tear_down_compass - - set -e - mkdir -p $compass_vm_dir $old_mnt - sudo mount -o loop $old_iso $old_mnt - cd $old_mnt;find .|cpio -pd $new_mnt;cd - - - sudo umount $old_mnt - - chmod 755 -R $new_mnt - sed -i -e "s/REPLACE_MGMT_IP/$MGMT_IP/g" -e "s/REPLACE_MGMT_NETMASK/$MGMT_MASK/g" -e "s/REPLACE_INSTALL_IP/$COMPASS_SERVER/g" -e "s/REPLACE_INSTALL_NETMASK/$INSTALL_MASK/g" -e "s/REPLACE_GW/$MGMT_GW/g" $new_mnt/isolinux/isolinux.cfg - - sudo ssh-keygen -f $new_mnt/bootstrap/boot.rsa -t rsa -N '' - cp $new_mnt/bootstrap/boot.rsa $rsa_file - - rm -rf $new_mnt/.rr_moved $new_mnt/rr_moved - sudo mkisofs -quiet -r -J -R -b isolinux/isolinux.bin -no-emul-boot -boot-load-size 4 -boot-info-table -hide-rr-moved -x "lost+found:" -o $new_iso $new_mnt - - rm -rf $old_mnt $new_mnt - - qemu-img create -f qcow2 $compass_vm_dir/disk.img 100G - - # create vm xml - sed -e "s/REPLACE_MEM/$COMPASS_VIRT_MEM/g" \ - -e "s/REPLACE_CPU/$COMPASS_VIRT_CPUS/g" \ - -e "s#REPLACE_IMAGE#$compass_vm_dir/disk.img#g" \ - -e "s#REPLACE_ISO#$compass_vm_dir/centos.iso#g" \ - -e "s/REPLACE_NET_MGMT/mgmt/g" \ - -e "s/REPLACE_BRIDGE_INSTALL/br_install/g" \ - $COMPASS_DIR/deploy/template/vm/compass.xml \ - > $WORK_DIR/vm/compass/libvirt.xml - - sudo virsh define $compass_vm_dir/libvirt.xml - sudo virsh start compass - - if ! wait_ok 300;then - log_error "install os timeout" - exit 1 - fi - - if ! install_compass_core;then - log_error "install compass core failed" - exit 1 - fi - - set +e - log_info "launch_compass exit" -} diff --git a/compass/deploy/conf/baremetal.conf b/compass/deploy/conf/baremetal.conf deleted file mode 100644 index 317d561..0000000 --- a/compass/deploy/conf/baremetal.conf +++ /dev/null @@ -1,20 +0,0 @@ -export VIRT_CPUS=4 -export HOST_MACS="'64:3e:8c:4c:6d:a3' '64:3e:8c:4c:6d:37' '64:3e:8c:4c:6c:d7' '64:3e:8c:4c:6b:7b' '64:3e:8c:4c:68:2b'" -export VIRT_MEM=16384 -export VIRT_DISK=30G -export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*' -#export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*' -export ADAPTER_NAME="openstack_juno" -export ADAPTER_TARGET_SYSTEM_PATTERN="^openstack$" -export ADAPTER_FLAVOR_PATTERN="HA-ansible-multinodes" -export HOSTNAMES="host1,host2,host3,host4,host5" -export HOST_ROLES="host1=controller,ha;host2=controller,ha;host3=controller,ha;host4=compute;host5=compute" -export DEFAULT_ROLES="" -export SWITCH_IPS="172.29.1.166" -export SWITCH_CREDENTIAL="version=2c,community=public" -export DEPLOYMENT_TIMEOUT="150" -export POLL_SWITCHES_FLAG="nopoll_switches" -export DASHBOARD_URL="" -export REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) -source ${REGTEST_DIR}/base.conf -export VIP="10.1.0.222" diff --git a/compass/deploy/conf/base.conf b/compass/deploy/conf/base.conf deleted file mode 100644 index d8e8d51..0000000 --- a/compass/deploy/conf/base.conf +++ /dev/null @@ -1,78 +0,0 @@ -export ISO_URL=http://192.168.123.11:9999/xh/work/build/work/compass.iso -export INSTALL_IP=${INSTALL_IP:-10.1.0.12} -export INSTALL_MASK=${INSTALL_MASK:-255.255.255.0} -export INSTALL_GW=${INSTALL_GW:-10.1.0.1} -export INSTALL_IP_START=${INSTALL_IP_START:-10.1.0.1} -export INSTALL_IP_END=${INSTALL_IP_END:-10.1.0.254} -export MGMT_IP=${MGMT_IP:-192.168.200.2} -export MGMT_MASK=${MAGMT_MASK:-255.255.252.0} -export MGMT_GW=${MAGMT_GW:-192.168.200.1} -export MGMT_IP_START=${MGMT_IP_START:-192.168.200.3} -export MGMT_IP_END=${MGMT_IP_END:-192.168.200.254} -export OM_NIC=${OM_NIC:-eth3} -export OM_IP=${OM_IP:-192.168.123.11/22} -export OM_GW=${OM_GW:-192.168.120.1} -export COMPASS_VIRT_CPUS=4 -export COMPASS_VIRT_MEM=4096 -export COMPASS_SERVER=$INSTALL_IP -export COMPASS_SERVER_URL="http://$COMPASS_SERVER/api" -export COMPASS_USER_EMAIL="admin@huawei.com" -export COMPASS_USER_PASSWORD="admin" -export CLUSTER_NAME="opnfv2" -export LANGUAGE="EN" -export TIMEZONE="America/Los_Angeles" -export NTP_SERVER="$COMPASS_SERVER" -export NAMESERVERS="$COMPASS_SERVER" -export DOMAIN="ods.com" -export PARTITIONS="/home=5%,/tmp=5%,/var=20%" -export SUBNETS="10.1.0.0/24,172.16.2.0/24,172.16.3.0/24,172.16.4.0/24" -export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-'10.1.0.50'} -export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.50'} -export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.50'} -export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.50'} -export MANAGEMENT_INTERFACE=${MANAGEMENT_INTERFACE:-eth0} -export TENANT_INTERFACE=${TENANT_INTERFACE:-eth1} -export STORAGE_INTERFACE=${STORAGE_INTERFACE:-eth3} -export PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-eth2} - - -function next_ip { - ip_addr=$1 - ip_base="$(echo $ip_addr | cut -d. -f'1 2 3')" - ip_last="$(echo $ip_addr | cut -d. -f4)" - let ip_last_next=$ip_last+1 - echo "${ip_base}.${ip_last_next}" -} - -if [ -z "$HOST_NETWORKS" ]; then - IFS=, read -a HOSTNAME_LIST <<< "$HOSTNAMES" - MANAGE_IP=${MANAGEMENT_IP_START} - TENANT_IP=${TENANT_IP_START} - PUBLIC_IP=${PUBLIC_IP_START} - STORAGE_IP=${STORAGE_IP_START} - for HOSTNAME in ${HOSTNAME_LIST[@]}; do - if [ -z "$HOST_NETWORKS" ]; then - HOST_NETWORKS="${HOSTNAME}:${MANAGEMENT_INTERFACE}=${MANAGE_IP}|is_mgmt,${TENANT_INTERFACE}=${TENANT_IP},${PUBLIC_INTERFACE}=${PUBLIC_IP}|is_promiscuous,${STORAGE_INTERFACE}=${STORAGE_IP}" - else - HOST_NETWORKS="${HOST_NETWORKS};${HOSTNAME}:${MANAGEMENT_INTERFACE}=${MANAGE_IP}|is_mgmt,${TENANT_INTERFACE}=${TENANT_IP},${PUBLIC_INTERFACE}=${PUBLIC_IP}|is_promiscuous,${STORAGE_INTERFACE}=${STORAGE_IP}" - fi - MANAGE_IP=$(next_ip ${MANAGE_IP}) - TENANT_IP=$(next_ip ${TENANT_IP}) - PUBLIC_IP=$(next_ip ${PUBLIC_IP}) - STORAGE_IP=$(next_ip ${STORAGE_IP}) - done - export HOST_NETWORKS -fi - -export NETWORK_MAPPING=${NETWORK_MAPPING:-"management=${MANAGEMENT_INTERFACE},tenant=${TENANT_INTERFACE},storage=${STORAGE_INTERFACE},external=${PUBLIC_INTERFACE}"} - -export PROXY="" -export IGNORE_PROXY="" -export SEARCH_PATH="ods.com" -export GATEWAY="10.1.0.1" -export SERVER_CREDENTIAL="root=root" -export LOCAL_REPO_URL="" -export OS_CONFIG_FILENAME="" -export SERVICE_CREDENTIALS="image:service=service,compute:service=service,dashboard:service=service,identity:service=service,metering:service=service,rabbitmq:service=service,volume:service=service,mysql:service=service" -export CONSOLE_CREDENTIALS="admin:console=console,compute:console=console,dashboard:console=console,image:console=console,metering:console=console,network:console=console,object-store:console=console,volume:console=console" -export PACKAGE_CONFIG_FILENAME="" diff --git a/compass/deploy/conf/cluster.conf b/compass/deploy/conf/cluster.conf deleted file mode 100644 index 4f43027..0000000 --- a/compass/deploy/conf/cluster.conf +++ /dev/null @@ -1,20 +0,0 @@ -export VIRT_NUMBER=5 -export VIRT_CPUS=4 -export VIRT_MEM=16384 -export VIRT_DISK=30G -export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*' -#export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*' -export ADAPTER_NAME="openstack_juno" -export ADAPTER_TARGET_SYSTEM_PATTERN="^openstack$" -export ADAPTER_FLAVOR_PATTERN="HA-ansible-multinodes" -export HOSTNAMES="host1,host2,host3,host4,host5" -export HOST_ROLES="host1=controller,ha;host2=controller,ha;host3=controller,ha;host4=compute;host5=compute" -export DEFAULT_ROLES="" -export SWITCH_IPS="1.1.1.1" -export SWITCH_CREDENTIAL="version=2c,community=public" -export DEPLOYMENT_TIMEOUT="150" -export POLL_SWITCHES_FLAG="nopoll_switches" -export DASHBOARD_URL="" -export REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) -source ${REGTEST_DIR}/base.conf -export VIP="10.1.0.222" diff --git a/compass/deploy/conf/five.conf b/compass/deploy/conf/five.conf deleted file mode 100644 index f32411b..0000000 --- a/compass/deploy/conf/five.conf +++ /dev/null @@ -1,19 +0,0 @@ -export VIRT_NUMBER=5 -export VIRT_CPUS=4 -export VIRT_MEM=16384 -export VIRT_DISK=30G -export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*' -#export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*' -export ADAPTER_NAME="openstack_juno" -export ADAPTER_TARGET_SYSTEM_PATTERN="^openstack$" -export ADAPTER_FLAVOR_PATTERN="single-controller" -export HOSTNAMES="host1,host2,host3,host4,host5" -export HOST_ROLES="host1=controller,network;host2=compute,storage;host3=compute,storage;host4=compute,storage;host5=compute,storage" -export DEFAULT_ROLES="" -export SWITCH_IPS="1.1.1.1" -export SWITCH_CREDENTIAL="version=2c,community=public" -export DEPLOYMENT_TIMEOUT="150" -export POLL_SWITCHES_FLAG="nopoll_switches" -export DASHBOARD_URL="" -export REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) -source ${REGTEST_DIR}/base.conf diff --git a/compass/deploy/deploy-vm.sh b/compass/deploy/deploy-vm.sh deleted file mode 100644 index 41ef209..0000000 --- a/compass/deploy/deploy-vm.sh +++ /dev/null @@ -1,52 +0,0 @@ -cd .. -rm -rf compass-core -git clone http://git.openstack.org/stackforge/compass-core -b dev/experimental -cd compass-core -virtualenv venv -source venv/bin/activate -pip install -i http://pypi.douban.com/simple -e . -if [[ ! -f /var/log/compass ]]; then - sudo mkdir /var/log/compass - sudo chown -R 777 /var/log/compass -fi -if [[ ! -f /etc/compass ]]; then - sudo mkdir /etc/compass - sudo cp -rf conf/setting /etc/compass/. -fi -cp bin/switch_virtualenv.py.template bin/switch_virtualenv.py -sed -i "s|\$PythonHome|$VIRTUAL_ENV|g" bin/switch_virtualenv.py -#source ../compass-install/ci/allinone.conf -/usr/bin/expect ${SCRIPT_DIR}/../deploy/remote_excute.exp \ - "ssh root@${COMPASS_SERVER} mkdir -p /opt/compass/bin/ansible_callbacks" vagrant - -/usr/bin/expect ${SCRIPT_DIR}/../deploy/remote_excute.exp \ - "scp -r ${SCRIPT_DIR}/../deploy/status_callback.py root@${COMPASS_SERVER}:/opt/compass/bin/ansible_callbacks/status_callback.py" \ - vagrant -bin/client.py --logfile= --loglevel=debug --logdir= --compass_server="${COMPASS_SERVER_URL}" \ ---compass_user_email="${COMPASS_USER_EMAIL}" --compass_user_password="${COMPASS_USER_PASSWORD}" \ ---cluster_name="${CLUSTER_NAME}" --language="${LANGUAGE}" --timezone="${TIMEZONE}" \ ---hostnames="${HOSTNAMES}" --partitions="${PARTITIONS}" --subnets="${SUBNETS}" \ ---adapter_os_pattern="${ADAPTER_OS_PATTERN}" --adapter_name="${ADAPTER_NAME}" \ ---adapter_target_system_pattern="${ADAPTER_TARGET_SYSTEM_PATTERN}" \ ---adapter_flavor_pattern="${ADAPTER_FLAVOR_PATTERN}" \ ---http_proxy="${PROXY}" --https_proxy="${PROXY}" --no_proxy="${IGNORE_PROXY}" \ ---ntp_server="${NTP_SERVER}" --dns_servers="${NAMESERVERS}" --domain="${DOMAIN}" \ ---search_path="${SEARCH_PATH}" --default_gateway="${GATEWAY}" \ ---server_credential="${SERVER_CREDENTIAL}" --local_repo_url="${LOCAL_REPO_URL}" \ ---os_config_json_file="${OS_CONFIG_FILENAME}" --service_credentials="${SERVICE_CREDENTIALS}" \ ---console_credentials="${CONSOLE_CREDENTIALS}" --host_networks="${HOST_NETWORKS}" \ ---network_mapping="${NETWORK_MAPPING}" --package_config_json_file="${PACKAGE_CONFIG_FILENAME}" \ ---host_roles="${HOST_ROLES}" --default_roles="${DEFAULT_ROLES}" --switch_ips="${SWITCH_IPS}" \ ---machines=${machines//\'} --switch_credential="${SWITCH_CREDENTIAL}" \ ---deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}" \ ---cluster_vip="${VIP}" -deploy_result=$? -tear_down_machines -cd ../compass-install -sudo vagrant destroy compass_nodocker -if [[ $deploy_result != 0 ]]; then - echo "deployment failed" - exit 1 -else - echo "deployment complete" -fi diff --git a/compass/deploy/deploy_host.sh b/compass/deploy/deploy_host.sh deleted file mode 100644 index d08a821..0000000 --- a/compass/deploy/deploy_host.sh +++ /dev/null @@ -1,40 +0,0 @@ -function deploy_host(){ - cd $WORK_DIR/installer/compass-core - source $WORK_DIR/venv/bin/activate - if pip --help | grep -q trusted; then - pip install -i http://pypi.douban.com/simple -e . --trusted-host pypi.douban.com - else - pip install -i http://pypi.douban.com/simple -e . - fi - - sudo mkdir -p /var/log/compass - sudo chown -R 777 /var/log/compass - - sudo mkdir -p /etc/compass - sudo cp -rf conf/setting /etc/compass/. - - cp bin/switch_virtualenv.py.template bin/switch_virtualenv.py - sed -i "s|\$PythonHome|$VIRTUAL_ENV|g" bin/switch_virtualenv.py - ssh $ssh_args root@${COMPASS_SERVER} mkdir -p /opt/compass/bin/ansible_callbacks - scp $ssh_args -r ${COMPASS_DIR}/deploy/status_callback.py root@${COMPASS_SERVER}:/opt/compass/bin/ansible_callbacks/status_callback.py - - (sleep 15;reboot_hosts ) & - bin/client.py --logfile= --loglevel=debug --logdir= --compass_server="${COMPASS_SERVER_URL}" \ - --compass_user_email="${COMPASS_USER_EMAIL}" --compass_user_password="${COMPASS_USER_PASSWORD}" \ - --cluster_name="${CLUSTER_NAME}" --language="${LANGUAGE}" --timezone="${TIMEZONE}" \ - --hostnames="${HOSTNAMES}" --partitions="${PARTITIONS}" --subnets="${SUBNETS}" \ - --adapter_os_pattern="${ADAPTER_OS_PATTERN}" --adapter_name="${ADAPTER_NAME}" \ - --adapter_target_system_pattern="${ADAPTER_TARGET_SYSTEM_PATTERN}" \ - --adapter_flavor_pattern="${ADAPTER_FLAVOR_PATTERN}" \ - --http_proxy="${PROXY}" --https_proxy="${PROXY}" --no_proxy="${IGNORE_PROXY}" \ - --ntp_server="${NTP_SERVER}" --dns_servers="${NAMESERVERS}" --domain="${DOMAIN}" \ - --search_path="${SEARCH_PATH}" --default_gateway="${GATEWAY}" \ - --server_credential="${SERVER_CREDENTIAL}" --local_repo_url="${LOCAL_REPO_URL}" \ - --os_config_json_file="${OS_CONFIG_FILENAME}" --service_credentials="${SERVICE_CREDENTIALS}" \ - --console_credentials="${CONSOLE_CREDENTIALS}" --host_networks="${HOST_NETWORKS}" \ - --network_mapping="${NETWORK_MAPPING}" --package_config_json_file="${PACKAGE_CONFIG_FILENAME}" \ - --host_roles="${HOST_ROLES}" --default_roles="${DEFAULT_ROLES}" --switch_ips="${SWITCH_IPS}" \ - --machines=${machines//\'} --switch_credential="${SWITCH_CREDENTIAL}" \ - --deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}" \ - --cluster_vip="${VIP}" -} diff --git a/compass/deploy/func.sh b/compass/deploy/func.sh deleted file mode 100755 index 49ea947..0000000 --- a/compass/deploy/func.sh +++ /dev/null @@ -1,23 +0,0 @@ -function tear_down_machines() { - virtmachines=$(sudo virsh list --name |grep pxe) - for virtmachine in $virtmachines; do - echo "destroy $virtmachine" - sudo virsh destroy $virtmachine - if [[ "$?" != "0" ]]; then - echo "destroy instance $virtmachine failed" - exit 1 - fi - done - - sudo virsh list --all|grep shut|awk '{print $2}'|xargs -n 1 sudo virsh undefine - - vol_names=$(sudo virsh vol-list default |grep .img | awk '{print $1}') - for vol_name in $vol_names; do - echo "virsh vol-delete $vol_name" - sudo virsh vol-delete $vol_name --pool default - if [[ "$?" != "0" ]]; then - echo "vol-delete $vol_name failed!" - exit 1 - fi - done -} diff --git a/compass/deploy/host_baremetal.sh b/compass/deploy/host_baremetal.sh deleted file mode 100644 index 26238e0..0000000 --- a/compass/deploy/host_baremetal.sh +++ /dev/null @@ -1,9 +0,0 @@ -function get_host_macs() { - local config_file=$WORK_DIR/installer/compass-install/install/group_vars/all - local machines=`echo $HOST_MACS|sed 's/ /,/g'` - - echo "test: true" >> $config_file - echo "pxe_boot_macs: [${machines}]" >> $config_file - - echo $machines -} diff --git a/compass/deploy/host_vm.sh b/compass/deploy/host_vm.sh deleted file mode 100644 index cf9a757..0000000 --- a/compass/deploy/host_vm.sh +++ /dev/null @@ -1,59 +0,0 @@ -host_vm_dir=$WORK_DIR/vm -function tear_down_machines() { - for i in host{0..4} - do - sudo virsh destroy $i 1>/dev/null 2>/dev/null - sudo virsh undefine $i 1>/dev/null 2>/dev/null - rm -rf $host_vm_dir/host$i - done -} - -function reboot_hosts() { - log_warn "reboot_hosts do nothing" -} - -function launch_host_vms() { - tear_down_machines - #function_bod - mac_array=`echo $machines|sed 's/,/ /g'` - log_info "bringing up pxe boot vms" - i=0 - for mac in $mac_array; do - log_info "creating vm disk for instance host${i}" - vm_dir=$host_vm_dir/host$i - mkdir -p $vm_dir - sudo qemu-img create -f raw $vm_dir/disk.img ${VIRT_DISK} - # create vm xml - sed -e "s/REPLACE_MEM/$VIRT_MEM/g" \ - -e "s/REPLACE_CPU/$VIRT_CPUS/g" \ - -e "s/REPLACE_NAME/host$i/g" \ - -e "s#REPLACE_IMAGE#$vm_dir/disk.img#g" \ - -e "s/REPLACE_BOOT_MAC/$mac/g" \ - -e "s/REPLACE_BRIDGE_MGMT/br_install/g" \ - -e "s/REPLACE_BRIDGE_TENANT/br_install/g" \ - -e "s/REPLACE_BRIDGE_PUBLIC/br_install/g" \ - -e "s/REPLACE_BRIDGE_STORAGE/br_install/g" \ - $COMPASS_DIR/deploy/template/vm/host.xml\ - > $vm_dir/libvirt.xml - - sudo virsh define $vm_dir/libvirt.xml - sudo virsh start host$i - let i=i+1 - done -} - -function get_host_macs() { - local config_file=$WORK_DIR/installer/compass-install/install/group_vars/all - local mac_generator=${COMPASS_DIR}/deploy/mac_generator.sh - local machines= - - chmod +x $mac_generator - mac_array=`$mac_generator $VIRT_NUMBER` - machines=`echo $mac_array|sed 's/ /,/g'` - - echo "test: true" >> $config_file - echo "pxe_boot_macs: [${machines}]" >> $config_file - - echo $machines -} - diff --git a/compass/deploy/mac_generator.sh b/compass/deploy/mac_generator.sh deleted file mode 100755 index ca898cb..0000000 --- a/compass/deploy/mac_generator.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -function mac_address_part() { - hex_number=$(printf '%02x' $RANDOM) - number_length=${#hex_number} - number_start=$(expr $number_length - 2) - echo ${hex_number:$number_start:2} -} - -function mac_address() { - echo "'00:00:$(mac_address_part):$(mac_address_part):$(mac_address_part):$(mac_address_part)'" -} - -machines='' -for i in `seq $1`; do - mac=$(mac_address) - - if [[ -z $machines ]]; then - machines="${mac}" - else - machines="${machines} ${mac}" - fi -done -echo ${machines} diff --git a/compass/deploy/network.sh b/compass/deploy/network.sh deleted file mode 100755 index c60607e..0000000 --- a/compass/deploy/network.sh +++ /dev/null @@ -1,70 +0,0 @@ -function destroy_nets() { - sudo virsh net-destroy mgmt > /dev/null 2>&1 - sudo virsh net-undefine mgmt > /dev/null 2>&1 - - sudo virsh net-destroy install > /dev/null 2>&1 - sudo virsh net-undefine install > /dev/null 2>&1 - rm -rf $COMPASS_DIR/deploy/work/network/*.xml -} - -function setup_om_bridge() { - local device=$1 - local gw=$2 - ip link set br_install down - ip addr flush $device - brctl delbr br_install - - brctl addbr br_install - brctl addif br_install $device - ip link set br_install up - - shift;shift - for ip in $*;do - ip addr add $ip dev br_install - done - - route add default gw $gw -} - -function setup_om_nat() { - # create install network - sed -e "s/REPLACE_BRIDGE/br_install/g" \ - -e "s/REPLACE_NAME/install/g" \ - -e "s/REPLACE_GATEWAY/$INSTALL_GW/g" \ - -e "s/REPLACE_MASK/$INSTALL_MASK/g" \ - -e "s/REPLACE_START/$INSTALL_IP_START/g" \ - -e "s/REPLACE_END/$INSTALL_IP_END/g" \ - $COMPASS_DIR/deploy/template/network/nat.xml \ - > $WORK_DIR/network/install.xml - - sudo virsh net-define $WORK_DIR/network/install.xml - sudo virsh net-start install -} - -function create_nets() { - destroy_nets - - # create mgmt network - sed -e "s/REPLACE_BRIDGE/br_mgmt/g" \ - -e "s/REPLACE_NAME/mgmt/g" \ - -e "s/REPLACE_GATEWAY/$MGMT_GW/g" \ - -e "s/REPLACE_MASK/$MGMT_MASK/g" \ - -e "s/REPLACE_START/$MGMT_IP_START/g" \ - -e "s/REPLACE_END/$MGMT_IP_END/g" \ - $COMPASS_DIR/deploy/template/network/nat.xml \ - > $WORK_DIR/network/mgmt.xml - - sudo virsh net-define $WORK_DIR/network/mgmt.xml - sudo virsh net-start mgmt - - # create install network - if [[ ! -z $VIRT_NUMBER ]];then - setup_om_nat - else - mask=`echo $INSTALL_MASK | awk -F'.' '{print ($1*(2^24)+$2*(2^16)+$3*(2^8)+$4)}'` - mask_len=`echo "obase=2;${mask}"|bc|awk -F'0' '{print length($1)}'` - setup_om_bridge $OM_NIC $OM_GW $INSTALL_GW/$mask_len $OM_IP - fi - -} - diff --git a/compass/deploy/prepare.sh b/compass/deploy/prepare.sh deleted file mode 100644 index 9ec15f8..0000000 --- a/compass/deploy/prepare.sh +++ /dev/null @@ -1,35 +0,0 @@ -function prepare_env() { - export PYTHONPATH=/usr/lib/python2.7/dist-packages:/usr/local/lib/python2.7/dist-packages - sudo apt-get update -y - sudo apt-get install mkisofs bc - sudo apt-get install git python-pip python-dev -y - sudo apt-get install libxslt-dev libxml2-dev libvirt-dev build-essential qemu-utils qemu-kvm libvirt-bin virtinst libmysqld-dev -y - sudo pip install --upgrade pip - sudo pip install --upgrade ansible - sudo pip install --upgrade virtualenv - sudo service libvirt-bin restart - - # prepare work dir - sudo rm -rf $WORK_DIR - mkdir -p $WORK_DIR - mkdir -p $WORK_DIR/installer - mkdir -p $WORK_DIR/vm - mkdir -p $WORK_DIR/network - mkdir -p $WORK_DIR/iso - mkdir -p $WORK_DIR/venv - - if [[ ! -f centos.iso ]];then - wget -O $WORK_DIR/iso/centos.iso $ISO_URL - fi - - # copy compass - mkdir -p $WORK_DIR/mnt - sudo mount -o loop $WORK_DIR/iso/centos.iso $WORK_DIR/mnt - cp -rf $WORK_DIR/mnt/compass/compass-core $WORK_DIR/installer/ - cp -rf $WORK_DIR/mnt/compass/compass-install $WORK_DIR/installer/ - sudo umount $WORK_DIR/mnt - rm -rf $WORK_DIR/mnt - - chmod 755 $WORK_DIR -R - virtualenv $WORK_DIR/venv -} diff --git a/compass/deploy/remote_excute.exp b/compass/deploy/remote_excute.exp deleted file mode 100644 index 9dd112b..0000000 --- a/compass/deploy/remote_excute.exp +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/expect - -set command [lindex $argv 0] -set passwd [lindex $argv 1] - -eval spawn "$command" -set timeout 60 - -expect { - -re ".*es.*o.*" - { - exp_send "yes\r" - exp_continue - } - - -re ".*sword:" { - exp_send "$passwd\r" - - } - -} - -interact diff --git a/compass/deploy/setup-env.sh b/compass/deploy/setup-env.sh deleted file mode 100644 index ffa9aa5..0000000 --- a/compass/deploy/setup-env.sh +++ /dev/null @@ -1,61 +0,0 @@ -rm -rf compass-install -git clone http://git.openstack.org/stackforge/compass-install -cd compass-install - -function join { local IFS="$1"; shift; echo "$*"; } -source ${SCRIPT_DIR}/../deploy/conf/${CONF_NAME}.conf -source ${SCRIPT_DIR}/../deploy/func.sh -if [[ ! -z $VIRT_NUMBER ]]; then - mac_array=$(${SCRIPT_DIR}/../deploy/mac_generator.sh $VIRT_NUMBER) - mac_list=$(join , $mac_array) - echo "pxe_boot_macs: [${mac_list}]" >> install/group_vars/all - echo "test: true" >> install/group_vars/all -fi -virsh list |grep compass -if [[ $? == 0 ]]; then - compass_old=`virsh list |grep compass|awk '{print$2}'` - virsh destroy ${compass_old} - virsh undefine ${compass_old} -fi -sudo vagrant up compass_nodocker -if [[ $? != 0 ]]; then - echo "installation of compass failed" - sudo vagrant destroy compass_nodocker - exit 1 -fi -echo "compass is up" - -tear_down_machines -if [[ -n $mac_array ]]; then - echo "bringing up pxe boot vms" - i=0 - for mac in $mac_array; do - echo "creating vm disk for instance pxe${i}" - sudo qemu-img create -f raw /home/pxe${i}.raw ${VIRT_DISK} - sudo virt-install --accelerate --hvm --connect qemu:///system \ - --name pxe$i --ram=$VIRT_MEM --pxe --disk /home/pxe$i.raw,format=raw \ - --vcpus=$VIRT_CPUS --graphics vnc,listen=0.0.0.0 \ - --network=bridge:virbr2,mac=$mac \ - --network=bridge:virbr2 \ - --network=bridge:virbr2 \ - --network=bridge:virbr2 \ - --noautoconsole --autostart --os-type=linux --os-variant=rhel6 - if [[ $? != 0 ]]; then - echo "launching pxe${i} failed" - exit 1 - fi - echo "checking pxe${i} state" - state=$(virsh domstate pxe${i}) - if [[ "$state" == "running" ]]; then - echo "pxe${i} is running" - sudo virsh destroy pxe${i} - fi - echo "add network boot option and make pxe${i} reboot if failing" - sudo sed -i "// a\ " /etc/libvirt/qemu/pxe${i}.xml - sudo sed -i "// a\ " /etc/libvirt/qemu/pxe${i}.xml - sudo virsh define /etc/libvirt/qemu/pxe${i}.xml - sudo virsh start pxe${i} - let i=i+1 - done -fi -machines=${mac_list} diff --git a/compass/deploy/status_callback.py b/compass/deploy/status_callback.py deleted file mode 100644 index 8619132..0000000 --- a/compass/deploy/status_callback.py +++ /dev/null @@ -1,174 +0,0 @@ -# (C) 2012, Michael DeHaan, - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import httplib -import json -import sys -import logging - -def task_error(host, data): - logging.info("task_error: host=%s,data=%s" % (host, data)) - - if type(data) == dict: - invocation = data.pop('invocation', {}) - - notify_host("localhost", host, "failed") - -class CallbackModule(object): - """ - logs playbook results, per host, in /var/log/ansible/hosts - """ - - def on_any(self, *args, **kwargs): - pass - - def runner_on_failed(self, host, res, ignore_errors=False): - task_error(host, res) - - def runner_on_ok(self, host, res): - pass - - def runner_on_skipped(self, host, item=None): - pass - - def runner_on_unreachable(self, host, res): - pass - - def runner_on_no_hosts(self): - pass - - def runner_on_async_poll(self, host, res, jid, clock): - pass - - def runner_on_async_ok(self, host, res, jid): - pass - - def runner_on_async_failed(self, host, res, jid): - task_error(host, res) - - def playbook_on_start(self): - pass - - def playbook_on_notify(self, host, handler): - pass - - def playbook_on_no_hosts_matched(self): - pass - - def playbook_on_no_hosts_remaining(self): - pass - - def playbook_on_task_start(self, name, is_conditional): - pass - - def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): - pass - - def playbook_on_setup(self): - pass - - def playbook_on_import_for_host(self, host, imported_file): - pass - - def playbook_on_not_import_for_host(self, host, missing_file): - pass - - def playbook_on_play_start(self, name): - pass - - def playbook_on_stats(self, stats): - logging.info("playbook_on_stats enter") - hosts = sorted(stats.processed.keys()) - host_vars = self.playbook.inventory.get_variables(hosts[0]) - cluster_name = host_vars['cluster_name'] - failures = False - unreachable = False - - for host in hosts: - summary = stats.summarize(host) - - if summary['failures'] > 0: - failures = True - if summary['unreachable'] > 0: - unreachable = True - - if failures or unreachable: - for host in hosts: - notify_host("localhost", host, "error") - return - - for host in hosts: - clusterhost_name = host + "." + cluster_name - notify_host("localhost", clusterhost_name, "succ") - - -def raise_for_status(resp): - if resp.status < 200 or resp.status > 300: - raise RuntimeError("%s, %s, %s" % (resp.status, resp.reason, resp.read())) - -def auth(conn): - credential = {} - credential['email'] = "admin@huawei.com" - credential['password'] = "admin" - url = "/api/users/token" - headers = {"Content-type": "application/json", - "Accept": "*/*"} - conn.request("POST", url, json.dumps(credential), headers) - resp = conn.getresponse() - - raise_for_status(resp) - return json.loads(resp.read())["token"] - -def notify_host(compass_host, host, status): - if status == "succ": - body = {"ready": True} - url = "/api/clusterhosts/%s/state_internal" % host - elif status == "error": - body = {"state": "ERROR"} - host = host.strip("host") - url = "/api/clusterhosts/%s/state" % host - else: - logging.error("notify_host: host %s with status %s is not supported" \ - % (host, status)) - return - - headers = {"Content-type": "application/json", - "Accept": "*/*"} - - conn = httplib.HTTPConnection(compass_host, 80) - token = auth(conn) - headers["X-Auth-Token"] = token - logging.info("host=%s,url=%s,body=%s,headers=%s" % (compass_host,url,json.dumps(body),headers)) - conn.request("POST", url, json.dumps(body), headers) - resp = conn.getresponse() - try: - raise_for_status(resp) - logging.info("notify host status success!!! status=%s, body=%s" % (resp.status, resp.read())) - except Exception as e: - logging.error("http request failed %s" % str(e)) - raise - finally: - conn.close() - -if __name__ == "__main__": - if len(sys.argv) != 3: - logging.error("params: host, status is need") - sys.exit(1) - - host = sys.argv[1] - status = sys.argv[2] - notify_host(host, status) diff --git a/compass/deploy/template/network/bridge.xml b/compass/deploy/template/network/bridge.xml deleted file mode 100644 index 6202cf1..0000000 --- a/compass/deploy/template/network/bridge.xml +++ /dev/null @@ -1,5 +0,0 @@ - - REPLACE_NAME - - - diff --git a/compass/deploy/template/network/nat.xml b/compass/deploy/template/network/nat.xml deleted file mode 100644 index 90ce888..0000000 --- a/compass/deploy/template/network/nat.xml +++ /dev/null @@ -1,10 +0,0 @@ - - REPLACE_NAME - - - - - - diff --git a/compass/deploy/template/vm/compass.xml b/compass/deploy/template/vm/compass.xml deleted file mode 100644 index 918a9f2..0000000 --- a/compass/deploy/template/vm/compass.xml +++ /dev/null @@ -1,64 +0,0 @@ - - compass - REPLACE_MEM - REPLACE_MEM - REPLACE_CPU - - hvm - - - - - - - - - - - - - - - destroy - restart - destroy - - /usr/bin/kvm-spice - - - - - - - - - - - - - - - -
- - - - -
- - - - - - - - - - - - - - - diff --git a/compass/deploy/template/vm/host.xml b/compass/deploy/template/vm/host.xml deleted file mode 100644 index b399e6f..0000000 --- a/compass/deploy/template/vm/host.xml +++ /dev/null @@ -1,67 +0,0 @@ - - REPLACE_NAME - REPLACE_MEM - REPLACE_MEM - REPLACE_CPU - - hvm - - - - - - - - - - - destroy - restart - restart - - /usr/bin/kvm-spice - - - - - - - - - -
- - - - -
- - - - -
- - - - -
- - - - - - - - - - - - - -