summaryrefslogtreecommitdiffstats
path: root/openstack-ansible
diff options
context:
space:
mode:
Diffstat (limited to 'openstack-ansible')
-rw-r--r--openstack-ansible/README.md48
-rw-r--r--openstack-ansible/file/cinder.yml13
-rw-r--r--openstack-ansible/file/exports12
-rw-r--r--openstack-ansible/file/modules8
-rw-r--r--openstack-ansible/file/openstack_user_config.yml278
-rw-r--r--openstack-ansible/file/opnfv-setup-openstack.yml34
-rw-r--r--openstack-ansible/file/user_variables.yml27
-rw-r--r--openstack-ansible/playbooks/configure-targethosts.yml61
-rw-r--r--openstack-ansible/playbooks/configure-xcimaster.yml66
-rw-r--r--openstack-ansible/playbooks/inventory11
-rwxr-xr-xopenstack-ansible/scripts/osa-deploy.sh136
-rw-r--r--openstack-ansible/template/bifrost/compute.interface.j286
-rw-r--r--openstack-ansible/template/bifrost/controller.interface.j271
-rw-r--r--openstack-ansible/var/ubuntu.yml8
14 files changed, 0 insertions, 859 deletions
diff --git a/openstack-ansible/README.md b/openstack-ansible/README.md
deleted file mode 100644
index 6210cc0e..00000000
--- a/openstack-ansible/README.md
+++ /dev/null
@@ -1,48 +0,0 @@
-===============================
-How to deploy OpenStack-Ansible
-===============================
-The script and playbooks defined on this repo will deploy an OpenStack
-cloud based on OpenStack-Ansible.
-It needs to be combined with Bifrost. You need use Bifrost to provide six VMs.
-To learn about how to use Bifrost, you can read the document on
-[/opt/bifrost/README.md].
-
-Minimal requirements:
-1. You will need to have a least 150G free space for the partition on where
- "/var/lib/libvirt/images/" lives.
-2. each vm needs to have at least 8 vCPU, 12 GB RAM, 60 GB HDD.
-
-After provisioning the six VMs please follow that steps:
-
-1.Run the script to deploy OpenStack
- cd /opt/openstack-ansible/scripts/
- sudo ./osa_deploy.sh
-It will take a lot of time. When the deploy is successful, you will see the
-message "OpenStack deployed successfully".
-
-2.To verify the OpenStack operation
- 2.1 ssh into the controller::
- ssh 192.168.122.3
- 2.2 Enter into the lxc container::
- lxcname=$(lxc-ls | grep utility)
- lxc-attach -n $lxcname
- 2.3 Verify the OpenStack API::
- source /root/openrc
- openstack user list
-
-This will show the following output::
-+----------------------------------+--------------------+
-| ID | Name |
-+----------------------------------+--------------------+
-| 056f8fe41336435991fd80872731cada | aodh |
-| 308f6436e68f40b49d3b8e7ce5c5be1e | glance |
-| 351b71b43a66412d83f9b3cd75485875 | nova |
-| 511129e053394aea825cce13b9f28504 | ceilometer |
-| 5596f71319d44c8991fdc65f3927b62e | gnocchi |
-| 586f49e3398a4c47a2f6fe50135d4941 | stack_domain_admin |
-| 601b329e6b1d427f9a1e05ed28753497 | heat |
-| 67fe383b94964a4781345fbcc30ae434 | cinder |
-| 729bb08351264d729506dad84ed3ccf0 | admin |
-| 9f2beb2b270940048fe6844f0b16281e | neutron |
-| fa68f86dd1de4ddbbb7415b4d9a54121 | keystone |
-+----------------------------------+--------------------+
diff --git a/openstack-ansible/file/cinder.yml b/openstack-ansible/file/cinder.yml
deleted file mode 100644
index e40b3925..00000000
--- a/openstack-ansible/file/cinder.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-# This file contains an example to show how to set
-# the cinder-volume service to run in a container.
-#
-# Important note:
-# When using LVM or any iSCSI-based cinder backends, such as NetApp with
-# iSCSI protocol, the cinder-volume service *must* run on metal.
-# Reference: https://bugs.launchpad.net/ubuntu/+source/lxc/+bug/1226855
-
-container_skel:
- cinder_volumes_container:
- properties:
- is_metal: false
diff --git a/openstack-ansible/file/exports b/openstack-ansible/file/exports
deleted file mode 100644
index 315f79d2..00000000
--- a/openstack-ansible/file/exports
+++ /dev/null
@@ -1,12 +0,0 @@
-# /etc/exports: the access control list for filesystems which may be exported
-# to NFS clients. See exports(5).
-#
-# Example for NFSv2 and NFSv3:
-# /srv/homes hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
-#
-# Example for NFSv4:
-# /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
-# /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check)
-#
-/images *(rw,sync,no_subtree_check,no_root_squash)
-
diff --git a/openstack-ansible/file/modules b/openstack-ansible/file/modules
deleted file mode 100644
index 60a517f1..00000000
--- a/openstack-ansible/file/modules
+++ /dev/null
@@ -1,8 +0,0 @@
-# /etc/modules: kernel modules to load at boot time.
-#
-# This file contains the names of kernel modules that should be loaded
-# at boot time, one per line. Lines beginning with "#" are ignored.
-# Parameters can be specified after the module name.
-
-bonding
-8021q
diff --git a/openstack-ansible/file/openstack_user_config.yml b/openstack-ansible/file/openstack_user_config.yml
deleted file mode 100644
index 43e88c0d..00000000
--- a/openstack-ansible/file/openstack_user_config.yml
+++ /dev/null
@@ -1,278 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.222
- external_lb_vip_address: 192.168.122.220
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_linuxbridge_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_linuxbridge_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- type: "vlan"
- range: "1:1"
- net_name: "vlan"
- group_binds:
- - neutron_linuxbridge_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.15"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
- controller01:
- ip: 172.29.236.12
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.15"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
- controller02:
- ip: 172.29.236.13
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.15"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# ceilometer (telemetry API)
-metering-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# aodh (telemetry alarm service)
-metering-alarm_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# gnocchi (telemetry metrics storage)
-metrics_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.14
- compute01:
- ip: 172.29.236.15
-
-# ceilometer compute agent (telemetry)
-metering-compute_hosts:
- compute00:
- ip: 172.29.236.14
- compute01:
- ip: 172.29.236.15
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- lvm:
- volume_group: cinder-volumes
- volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
- volume_backend_name: LVM_iSCSI
- iscsi_ip_address: "172.29.244.11"
- controller01:
- ip: 172.29.236.12
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- lvm:
- volume_group: cinder-volumes
- volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
- volume_backend_name: LVM_iSCSI
- iscsi_ip_address: "172.29.244.12"
- controller02:
- ip: 172.29.236.13
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- lvm:
- volume_group: cinder-volumes
- volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
- volume_backend_name: LVM_iSCSI
- iscsi_ip_address: "172.29.244.13"
diff --git a/openstack-ansible/file/opnfv-setup-openstack.yml b/openstack-ansible/file/opnfv-setup-openstack.yml
deleted file mode 100644
index aacdeffb..00000000
--- a/openstack-ansible/file/opnfv-setup-openstack.yml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-- include: os-keystone-install.yml
-- include: os-glance-install.yml
-- include: os-cinder-install.yml
-- include: os-nova-install.yml
-- include: os-neutron-install.yml
-- include: os-heat-install.yml
-- include: os-horizon-install.yml
-- include: os-ceilometer-install.yml
-- include: os-aodh-install.yml
-#NOTE(stevelle) Ensure Gnocchi identities exist before Swift
-- include: os-gnocchi-install.yml
- when:
- - gnocchi_storage_driver is defined
- - gnocchi_storage_driver == 'swift'
- vars:
- gnocchi_identity_only: True
-- include: os-swift-install.yml
-- include: os-gnocchi-install.yml
-- include: os-ironic-install.yml
diff --git a/openstack-ansible/file/user_variables.yml b/openstack-ansible/file/user_variables.yml
deleted file mode 100644
index 65cbcc11..00000000
--- a/openstack-ansible/file/user_variables.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ##
-# ## This file contains commonly used overrides for convenience. Please inspect
-# ## the defaults for each role to find additional override options.
-# ##
-
-# # Debug and Verbose options.
-debug: false
-
-haproxy_keepalived_external_vip_cidr: "192.168.122.220/32"
-haproxy_keepalived_internal_vip_cidr: "172.29.236.222/32"
-haproxy_keepalived_external_interface: br-vlan
-haproxy_keepalived_internal_interface: br-mgmt
diff --git a/openstack-ansible/playbooks/configure-targethosts.yml b/openstack-ansible/playbooks/configure-targethosts.yml
deleted file mode 100644
index 538fe17e..00000000
--- a/openstack-ansible/playbooks/configure-targethosts.yml
+++ /dev/null
@@ -1,61 +0,0 @@
----
-- hosts: all
- remote_user: root
- vars_files:
- - ../var/ubuntu.yml
- tasks:
- - name: add public key to host
- copy:
- src: ../file/authorized_keys
- dest: /root/.ssh/authorized_keys
- - name: configure modules
- copy:
- src: ../file/modules
- dest: /etc/modules
-
-- hosts: controller
- remote_user: root
- vars_files:
- - ../var/ubuntu.yml
- tasks:
- - name: configure network
- template:
- src: ../template/bifrost/controller.interface.j2
- dest: /etc/network/interfaces
- notify:
- - restart network service
- handlers:
- - name: restart network service
- shell: "/sbin/ifconfig ens3 0 &&/sbin/ifdown -a && /sbin/ifup -a"
-
-- hosts: compute
- remote_user: root
- vars_files:
- - ../var/ubuntu.yml
- tasks:
- - name: configure network
- template:
- src: ../template/bifrost/compute.interface.j2
- dest: /etc/network/interfaces
- notify:
- - restart network service
- handlers:
- - name: restart network service
- shell: "/sbin/ifconfig ens3 0 &&/sbin/ifdown -a && /sbin/ifup -a"
-
-- hosts: compute01
- remote_user: root
- tasks:
- - name: make nfs dir
- file: "dest=/images mode=0777 state=directory"
- - name: configure sdrvice
- shell: "echo 'nfs 2049/tcp' >> /etc/services && echo 'nfs 2049/udp' >> /etc/services"
- - name: configure NFS
- copy:
- src: ../file/exports
- dest: /etc/exports
- notify:
- - restart nfs service
- handlers:
- - name: restart nfs service
- service: name=nfs-kernel-server state=restarted
diff --git a/openstack-ansible/playbooks/configure-xcimaster.yml b/openstack-ansible/playbooks/configure-xcimaster.yml
deleted file mode 100644
index fbbde640..00000000
--- a/openstack-ansible/playbooks/configure-xcimaster.yml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- hosts: xcimaster
- remote_user: root
- vars_files:
- - ../var/ubuntu.yml
- tasks:
- - name: generate SSH keys
- shell: ssh-keygen -b 2048 -t rsa -f /root/.ssh/id_rsa -q -N ""
- args:
- creates: /root/.ssh/id_rsa
- - name: fetch public key
- fetch: src="/root/.ssh/id_rsa.pub" dest="/"
- - name: remove openstack-ansible directories
- file:
- path={{ item }}
- state=absent
- recurse=no
- with_items:
- - "{{OSA_PATH}}"
- - "{{OSA_ETC_PATH}}"
- - name: clone openstack-ansible
- git:
- repo: "{{OSA_URL}}"
- dest: "{{OSA_PATH}}"
- version: "{{OPENSTACK_OSA_VERSION}}"
- - name: copy opnfv-setup-openstack.yml to /opt/openstack-ansible/playbooks
- copy:
- src: ../file/opnfv-setup-openstack.yml
- dest: "{{OSA_PATH}}/playbooks/opnfv-setup-openstack.yml"
- - name: copy /opt/openstack-ansible/etc/openstack_deploy to /etc/openstack_deploy
- shell: "/bin/cp -rf {{OSA_PATH}}/etc/openstack_deploy {{OSA_ETC_PATH}}"
- - name: bootstrap
- command: "/bin/bash ./scripts/bootstrap-ansible.sh"
- args:
- chdir: "{{OSA_PATH}}"
- - name: generate password token
- command: "python pw-token-gen.py --file /etc/openstack_deploy/user_secrets.yml"
- args:
- chdir: /opt/openstack-ansible/scripts/
- - name: copy openstack_user_config.yml to /etc/openstack_deploy
- copy:
- src: ../file/openstack_user_config.yml
- dest: "{{OSA_ETC_PATH}}/openstack_user_config.yml"
- - name: copy cinder.yml to /etc/openstack_deploy/env.d
- copy:
- src: ../file/cinder.yml
- dest: "{{OSA_ETC_PATH}}/env.d/cinder.yml"
- - name: copy user_variables.yml to /etc/openstack_deploy/
- copy:
- src: ../file/user_variables.yml
- dest: "{{OSA_ETC_PATH}}/user_variables.yml"
- - name: configure network
- template:
- src: ../template/bifrost/controller.interface.j2
- dest: /etc/network/interfaces
- notify:
- - restart network service
- handlers:
- - name: restart network service
- shell: "/sbin/ifconfig ens3 0 &&/sbin/ifdown -a && /sbin/ifup -a"
-
-- hosts: localhost
- remote_user: root
- tasks:
- - name: Generate authorized_keys
- shell: "/bin/cat /xcimaster/root/.ssh/id_rsa.pub >> ../file/authorized_keys"
diff --git a/openstack-ansible/playbooks/inventory b/openstack-ansible/playbooks/inventory
deleted file mode 100644
index d3768f51..00000000
--- a/openstack-ansible/playbooks/inventory
+++ /dev/null
@@ -1,11 +0,0 @@
-[xcimaster]
-xcimaster ansible_ssh_host=192.168.122.2
-
-[controller]
-controller00 ansible_ssh_host=192.168.122.3
-controller01 ansible_ssh_host=192.168.122.4
-controller02 ansible_ssh_host=192.168.122.5
-
-[compute]
-compute00 ansible_ssh_host=192.168.122.6
-compute01 ansible_ssh_host=192.168.122.7
diff --git a/openstack-ansible/scripts/osa-deploy.sh b/openstack-ansible/scripts/osa-deploy.sh
deleted file mode 100755
index ec607443..00000000
--- a/openstack-ansible/scripts/osa-deploy.sh
+++ /dev/null
@@ -1,136 +0,0 @@
-#!/bin/bash
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-set -o errexit
-set -o nounset
-set -o pipefail
-
-export OSA_PATH=/opt/openstack-ansible
-export LOG_PATH=$OSA_PATH/log
-export PLAYBOOK_PATH=$OSA_PATH/playbooks
-export OSA_BRANCH=${OSA_BRANCH:-"master"}
-XCIMASTER_IP="192.168.122.2"
-
-sudo /bin/rm -rf $LOG_PATH
-sudo /bin/mkdir -p $LOG_PATH
-sudo /bin/cp /root/.ssh/id_rsa.pub ../file/authorized_keys
-echo -e '\n' | sudo tee --append ../file/authorized_keys
-
-# log some info
-echo -e "\n"
-echo "***********************************************************************"
-echo "* *"
-echo "* Configure XCI Master *"
-echo "* *"
-echo "* Bootstrap xci-master, configure network, clone openstack-ansible *"
-echo "* Playbooks: configure-xcimaster.yml *"
-echo "* *"
-echo "***********************************************************************"
-echo -e "\n"
-
-cd ../playbooks/
-# this will prepare the jump host
-# git clone the Openstack-Ansible, bootstrap and configure network
-echo "xci: running ansible playbook configure-xcimaster.yml"
-sudo -E ansible-playbook -i inventory configure-xcimaster.yml
-
-echo "XCI Master is configured successfully!"
-
-# log some info
-echo -e "\n"
-echo "***********************************************************************"
-echo "* *"
-echo "* Configure Nodes *"
-echo "* *"
-echo "* Configure network on OpenStack Nodes, configure NFS *"
-echo "* Playbooks: configure-targethosts.yml *"
-echo "* *"
-echo "***********************************************************************"
-echo -e "\n"
-
-# this will prepare the target host
-# such as configure network and NFS
-echo "xci: running ansible playbook configure-targethosts.yml"
-sudo -E ansible-playbook -i inventory configure-targethosts.yml
-
-echo "Nodes are configured successfully!"
-
-# log some info
-echo -e "\n"
-echo "***********************************************************************"
-echo "* *"
-echo "* Set Up OpenStack Nodes *"
-echo "* *"
-echo "* Set up OpenStack Nodes using openstack-ansible *"
-echo "* Playbooks: setup-hosts.yml, setup-infrastructure.yml *"
-echo "* *"
-echo "***********************************************************************"
-echo -e "\n"
-
-# using OpenStack-Ansible deploy the OpenStack
-echo "xci: running ansible playbook setup-hosts.yml"
-sudo -E /bin/sh -c "ssh root@$XCIMASTER_IP openstack-ansible \
- $PLAYBOOK_PATH/setup-hosts.yml" | \
- tee $LOG_PATH/setup-hosts.log
-
-# check the result of openstack-ansible setup-hosts.yml
-# if failed, exit with exit code 1
-if grep -q 'failed=1\|unreachable=1' $LOG_PATH/setup-hosts.log; then
- echo "OpenStack node setup failed!"
- exit 1
-fi
-
-echo "xci: running ansible playbook setup-infrastructure.yml"
-sudo -E /bin/sh -c "ssh root@$XCIMASTER_IP openstack-ansible \
- $PLAYBOOK_PATH/setup-infrastructure.yml" | \
- tee $LOG_PATH/setup-infrastructure.log
-
-# check the result of openstack-ansible setup-infrastructure.yml
-# if failed, exit with exit code 1
-if grep -q 'failed=1\|unreachable=1' $LOG_PATH/setup-infrastructure.log; then
- echo "OpenStack node setup failed!"
- exit 1
-fi
-
-echo "OpenStack nodes are setup successfully!"
-
-sudo -E /bin/sh -c "ssh root@$XCIMASTER_IP ansible -i $PLAYBOOK_PATH/inventory/ \
- galera_container -m shell \
- -a "mysql -h localhost -e 'show status like \"%wsrep_cluster_%\";'"" \
- | tee $LOG_PATH/galera.log
-
-if grep -q 'FAILED' $LOG_PATH/galera.log; then
- echo "Database cluster verification failed!"
- exit 1
-else
- echo "Database cluster verification successful!"
-fi
-
-# log some info
-echo -e "\n"
-echo "***********************************************************************"
-echo "* *"
-echo "* Install OpenStack *"
-echo "* Playbooks: opnfv-setup-openstack.yml *"
-echo "* *"
-echo "***********************************************************************"
-echo -e "\n"
-
-echo "xci: running ansible playbook opnfv-setup-openstack.yml"
-sudo -E /bin/sh -c "ssh root@$XCIMASTER_IP openstack-ansible \
- $PLAYBOOK_PATH/opnfv-setup-openstack.yml" | \
- tee $LOG_PATH/opnfv-setup-openstack.log
-
-if grep -q 'failed=1\|unreachable=1' $LOG_PATH/opnfv-setup-openstack.log; then
- echo "OpenStack installation failed!"
- exit 1
-else
- echo "OpenStack installation is successfully completed!"
- exit 0
-fi
diff --git a/openstack-ansible/template/bifrost/compute.interface.j2 b/openstack-ansible/template/bifrost/compute.interface.j2
deleted file mode 100644
index 1719f6a0..00000000
--- a/openstack-ansible/template/bifrost/compute.interface.j2
+++ /dev/null
@@ -1,86 +0,0 @@
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-
-# Physical interface
-auto ens3
-iface ens3 inet manual
-
-# Container/Host management VLAN interface
-auto ens3.10
-iface ens3.10 inet manual
- vlan-raw-device ens3
-
-# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto ens3.30
-iface ens3.30 inet manual
- vlan-raw-device ens3
-
-# Storage network VLAN interface (optional)
-auto ens3.20
-iface ens3.20 inet manual
- vlan-raw-device ens3
-
-# Container/Host management bridge
-auto br-mgmt
-iface br-mgmt inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports ens3.10
- address {{host_info[inventory_hostname].MGMT_IP}}
- netmask 255.255.252.0
-
-# compute1 VXLAN (tunnel/overlay) bridge config
-auto br-vxlan
-iface br-vxlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports ens3.30
- address {{host_info[inventory_hostname].VXLAN_IP}}
- netmask 255.255.252.0
-
-# OpenStack Networking VLAN bridge
-auto br-vlan
-iface br-vlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports ens3
- address {{host_info[inventory_hostname].VLAN_IP}}
- netmask 255.255.255.0
- gateway 192.168.122.1
- offload-sg off
- # Create veth pair, don't bomb if already exists
- pre-up ip link add br-vlan-veth type veth peer name eth12 || true
- # Set both ends UP
- pre-up ip link set br-vlan-veth up
- pre-up ip link set eth12 up
- # Delete veth pair on DOWN
- post-down ip link del br-vlan-veth || true
- bridge_ports br-vlan-veth
-
-# Add an additional address to br-vlan
-iface br-vlan inet static
- # Flat network default gateway
- # -- This needs to exist somewhere for network reachability
- # -- from the router namespace for floating IP paths.
- # -- Putting this here is primarily for tempest to work.
- address {{host_info[inventory_hostname].VLAN_IP_SECOND}}
- netmask 255.255.252.0
- dns-nameserver 8.8.8.8 8.8.4.4
-
-# compute1 Storage bridge
-auto br-storage
-iface br-storage inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports ens3.20
- address {{host_info[inventory_hostname].STORAGE_IP}}
- netmask 255.255.252.0
diff --git a/openstack-ansible/template/bifrost/controller.interface.j2 b/openstack-ansible/template/bifrost/controller.interface.j2
deleted file mode 100644
index 74aeea99..00000000
--- a/openstack-ansible/template/bifrost/controller.interface.j2
+++ /dev/null
@@ -1,71 +0,0 @@
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-# Physical interface
-auto ens3
-iface ens3 inet manual
-
-# Container/Host management VLAN interface
-auto ens3.10
-iface ens3.10 inet manual
- vlan-raw-device ens3
-
-# OpenStack Networking VXLAN (tunnel/overlay) VLAN interface
-auto ens3.30
-iface ens3.30 inet manual
- vlan-raw-device ens3
-
-# Storage network VLAN interface (optional)
-auto ens3.20
-iface ens3.20 inet manual
- vlan-raw-device ens3
-
-# Container/Host management bridge
-auto br-mgmt
-iface br-mgmt inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports ens3.10
- address {{host_info[inventory_hostname].MGMT_IP}}
- netmask 255.255.252.0
-
-# OpenStack Networking VXLAN (tunnel/overlay) bridge
-#
-# Only the COMPUTE and NETWORK nodes must have an IP address
-# on this bridge. When used by infrastructure nodes, the
-# IP addresses are assigned to containers which use this
-# bridge.
-#
-auto br-vxlan
-iface br-vxlan inet manual
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports ens3.30
-
-# OpenStack Networking VLAN bridge
-auto br-vlan
-iface br-vlan inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports ens3
- address {{host_info[inventory_hostname].VLAN_IP}}
- netmask 255.255.255.0
- gateway 192.168.122.1
- dns-nameserver 8.8.8.8 8.8.4.4
-
-# compute1 Storage bridge
-auto br-storage
-iface br-storage inet static
- bridge_stp off
- bridge_waitport 0
- bridge_fd 0
- bridge_ports ens3.20
- address {{host_info[inventory_hostname].STORAGE_IP}}
- netmask 255.255.252.0
diff --git a/openstack-ansible/var/ubuntu.yml b/openstack-ansible/var/ubuntu.yml
deleted file mode 100644
index eb595bea..00000000
--- a/openstack-ansible/var/ubuntu.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-OSA_URL: https://git.openstack.org/openstack/openstack-ansible
-OSA_PATH: /opt/openstack-ansible
-OSA_ETC_PATH: /etc/openstack_deploy
-OPENSTACK_OSA_VERSION: "{{ lookup('env','OPENSTACK_OSA_VERSION') }}"
-
-XCIMASTER_IP: 192.168.122.2
-host_info: {'xcimaster':{'MGMT_IP': '172.29.236.10','VLAN_IP': '192.168.122.2', 'STORAGE_IP': '172.29.244.10'},'controller00':{'MGMT_IP': '172.29.236.11','VLAN_IP': '192.168.122.3', 'STORAGE_IP': '172.29.244.11'},'controller01':{'MGMT_IP': '172.29.236.12','VLAN_IP': '192.168.122.4', 'STORAGE_IP': '172.29.244.12'},'controller02':{'MGMT_IP': '172.29.236.13','VLAN_IP': '192.168.122.5', 'STORAGE_IP': '172.29.240.13'},'compute00':{'MGMT_IP': '172.29.236.14','VLAN_IP': '192.168.122.6','VLAN_IP_SECOND': '173.29.241.1','VXLAN_IP': '172.29.240.14', 'STORAGE_IP': '172.29.244.14'},'compute01':{'MGMT_IP': '172.29.236.15','VLAN_IP': '192.168.122.7','VLAN_IP_SECOND': '173.29.241.2','VXLAN_IP': '172.29.240.15', 'STORAGE_IP': '172.29.244.15'}}