summaryrefslogtreecommitdiffstats
path: root/xci/file
diff options
context:
space:
mode:
authorwutianwei <wutianwei1@huawei.com>2018-01-02 16:49:48 +0800
committerwutianwei <wutianwei1@huawei.com>2018-01-03 10:48:56 +0800
commitb7c67c901075fc44a087f38e6b11fdac0b1d04d8 (patch)
treee2bd1e1e55241e7a9b009aaae421fa6046d69f82 /xci/file
parentb6907b858e215f358a16216a5095c9c6ed3520c4 (diff)
[XCI] move osa files into xci/nfvi/osa/files
we maybe introduce other NFVI in the future in XCI. it is necessary to put the nfvi files to corresponding directory xci/nfvi/$NFVI/files, otherwise the files directory will be confused. Change-Id: Iea98167ff0bc8d338a94fe1c064ac0ab396c53d3 Signed-off-by: wutianwei <wutianwei1@huawei.com>
Diffstat (limited to 'xci/file')
-rw-r--r--xci/file/aio/flavor-vars.yml3
-rw-r--r--xci/file/aio/inventory2
-rw-r--r--xci/file/ha/ceph.yml15
-rw-r--r--xci/file/ha/flavor-vars.yml39
-rw-r--r--xci/file/ha/inventory11
-rw-r--r--xci/file/ha/openstack_user_config.yml255
-rw-r--r--xci/file/ha/user_ceph.yml16
-rw-r--r--xci/file/ha/user_variables.yml165
-rw-r--r--xci/file/ha/user_variables_ceph.yml32
-rw-r--r--xci/file/install-ansible.sh161
-rw-r--r--xci/file/mini/ceph.yml9
-rw-r--r--xci/file/mini/flavor-vars.yml21
-rw-r--r--xci/file/mini/inventory8
-rw-r--r--xci/file/mini/openstack_user_config.yml170
-rw-r--r--xci/file/mini/user_ceph.yml16
-rw-r--r--xci/file/mini/user_variables.yml165
-rw-r--r--xci/file/mini/user_variables_ceph.yml32
-rw-r--r--xci/file/noha/ceph.yml11
-rw-r--r--xci/file/noha/flavor-vars.yml27
-rw-r--r--xci/file/noha/inventory9
-rw-r--r--xci/file/noha/openstack_user_config.yml172
-rw-r--r--xci/file/noha/user_ceph.yml16
-rw-r--r--xci/file/noha/user_variables.yml165
-rw-r--r--xci/file/noha/user_variables_ceph.yml32
24 files changed, 0 insertions, 1552 deletions
diff --git a/xci/file/aio/flavor-vars.yml b/xci/file/aio/flavor-vars.yml
deleted file mode 100644
index 6ac1e0fe..00000000
--- a/xci/file/aio/flavor-vars.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# this file is added intentionally in order to simplify putting files in place
-# in future, it might contain vars specific to this flavor
diff --git a/xci/file/aio/inventory b/xci/file/aio/inventory
deleted file mode 100644
index 9a3dd9ee..00000000
--- a/xci/file/aio/inventory
+++ /dev/null
@@ -1,2 +0,0 @@
-[opnfv]
-opnfv ansible_ssh_host=192.168.122.2
diff --git a/xci/file/ha/ceph.yml b/xci/file/ha/ceph.yml
deleted file mode 100644
index 1567c492..00000000
--- a/xci/file/ha/ceph.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-# The infra nodes where the Ceph mon services will run
-ceph-mon_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# The nodes that the Ceph OSD disks will be running on
-ceph-osd_hosts:
- compute00:
- ip: 172.29.236.14
- compute01:
- ip: 172.29.236.15
diff --git a/xci/file/ha/flavor-vars.yml b/xci/file/ha/flavor-vars.yml
deleted file mode 100644
index 167502c9..00000000
--- a/xci/file/ha/flavor-vars.yml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-host_info: {
- 'opnfv': {
- 'VLAN_IP': '192.168.122.2',
- 'MGMT_IP': '172.29.236.10',
- 'VXLAN_IP': '172.29.240.10',
- 'STORAGE_IP': '172.29.244.10'
- },
- 'controller00': {
- 'VLAN_IP': '192.168.122.3',
- 'MGMT_IP': '172.29.236.11',
- 'VXLAN_IP': '172.29.240.11',
- 'STORAGE_IP': '172.29.244.11'
- },
- 'controller01': {
- 'VLAN_IP': '192.168.122.4',
- 'MGMT_IP': '172.29.236.12',
- 'VXLAN_IP': '172.29.240.12',
- 'STORAGE_IP': '172.29.244.12'
- },
- 'controller02': {
- 'VLAN_IP': '192.168.122.5',
- 'MGMT_IP': '172.29.236.13',
- 'VXLAN_IP': '172.29.240.13',
- 'STORAGE_IP': '172.29.244.13'
- },
- 'compute00': {
- 'VLAN_IP': '192.168.122.6',
- 'MGMT_IP': '172.29.236.14',
- 'VXLAN_IP': '172.29.240.14',
- 'STORAGE_IP': '172.29.244.14'
- },
- 'compute01': {
- 'VLAN_IP': '192.168.122.7',
- 'MGMT_IP': '172.29.236.15',
- 'VXLAN_IP': '172.29.240.15',
- 'STORAGE_IP': '172.29.244.15'
- }
-}
diff --git a/xci/file/ha/inventory b/xci/file/ha/inventory
deleted file mode 100644
index 94b1d074..00000000
--- a/xci/file/ha/inventory
+++ /dev/null
@@ -1,11 +0,0 @@
-[opnfv]
-opnfv ansible_ssh_host=192.168.122.2
-
-[controller]
-controller00 ansible_ssh_host=192.168.122.3
-controller01 ansible_ssh_host=192.168.122.4
-controller02 ansible_ssh_host=192.168.122.5
-
-[compute]
-compute00 ansible_ssh_host=192.168.122.6
-compute01 ansible_ssh_host=192.168.122.7
diff --git a/xci/file/ha/openstack_user_config.yml b/xci/file/ha/openstack_user_config.yml
deleted file mode 100644
index 360aa5cb..00000000
--- a/xci/file/ha/openstack_user_config.yml
+++ /dev/null
@@ -1,255 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
- - "172.29.236.222"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.222
- external_lb_vip_address: 192.168.122.220
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_linuxbridge_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_linuxbridge_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- type: "vlan"
- range: "1:1"
- net_name: "vlan"
- group_binds:
- - neutron_linuxbridge_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
- controller01:
- ip: 172.29.236.12
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
- controller02:
- ip: 172.29.236.13
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.14"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
- controller01:
- ip: 172.29.236.12
- controller02:
- ip: 172.29.236.13
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.14
- compute01:
- ip: 172.29.236.15
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
- controller01:
- ip: 172.29.236.12
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
- controller02:
- ip: 172.29.236.13
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.14"
- share: "/volumes"
diff --git a/xci/file/ha/user_ceph.yml b/xci/file/ha/user_ceph.yml
deleted file mode 100644
index 9d5f13a9..00000000
--- a/xci/file/ha/user_ceph.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# The OSA ceph_client role does not support loading IPs from an inventory group,
-# so we have to feed it a list of IPs
-# yamllint disable rule:line-length
-ceph_mons: "[ {% for host in groups[mon_group_name] %}'{{ hostvars[host]['ansible_host'] }}'{% if not loop.last %},{% endif %}{% endfor %} ]"
-# yamllint enable rule:line-length
-cinder_backends:
- "RBD":
- volume_driver: cinder.volume.drivers.rbd.RBDDriver
- rbd_pool: volumes
- rbd_ceph_conf: /etc/ceph/ceph.conf
- rbd_store_chunk_size: 8
- volume_backend_name: rbddriver
- rbd_user: cinder
- rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
- report_discard_supported: true
diff --git a/xci/file/ha/user_variables.yml b/xci/file/ha/user_variables.yml
deleted file mode 100644
index 72960a01..00000000
--- a/xci/file/ha/user_variables.yml
+++ /dev/null
@@ -1,165 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ##
-# ## This file contains commonly used overrides for convenience. Please inspect
-# ## the defaults for each role to find additional override options.
-# ##
-
-# # Debug and Verbose options.
-debug: false
-
-# Allow root logins
-security_sshd_permit_root_login: yes
-
-haproxy_keepalived_external_vip_cidr: "192.168.122.220/32"
-haproxy_keepalived_internal_vip_cidr: "172.29.236.222/32"
-haproxy_keepalived_external_interface: br-vlan
-haproxy_keepalived_internal_interface: br-mgmt
-gnocchi_db_sync_options: ""
-
-# The settings below are taken from aio to ensure we can bump OSA SHA with current
-# RAM allocation. Higher values will be tested once the bump is done.
-# https://github.com/openstack/openstack-ansible/blob/master/tests/roles/bootstrap-host/templates/user_variables.aio.yml.j2
-
-## Galera settings
-galera_innodb_buffer_pool_size: 16M
-galera_innodb_log_buffer_size: 4M
-galera_wsrep_provider_options:
- - { option: "gcache.size", value: "4M" }
-
-## Neutron settings
-neutron_metadata_checksum_fix: True
-
-### Set workers for all services to optimise memory usage
-
-## Repo
-repo_nginx_threads: 2
-
-## Keystone
-keystone_httpd_mpm_start_servers: 2
-keystone_httpd_mpm_min_spare_threads: 1
-keystone_httpd_mpm_max_spare_threads: 2
-keystone_httpd_mpm_thread_limit: 2
-keystone_httpd_mpm_thread_child: 1
-keystone_wsgi_threads: 1
-keystone_wsgi_processes_max: 2
-
-## Barbican
-barbican_wsgi_processes: 2
-barbican_wsgi_threads: 1
-
-## Cinder
-cinder_wsgi_processes_max: 2
-cinder_wsgi_threads: 1
-cinder_wsgi_buffer_size: 16384
-cinder_osapi_volume_workers_max: 2
-
-## Glance
-glance_api_threads_max: 2
-glance_api_threads: 1
-glance_api_workers: 1
-glance_registry_workers: 1
-
-## Nova
-nova_wsgi_threads: 1
-nova_wsgi_processes_max: 2
-nova_wsgi_processes: 2
-nova_wsgi_buffer_size: 16384
-nova_api_threads_max: 2
-nova_api_threads: 1
-nova_osapi_compute_workers: 1
-nova_conductor_workers: 1
-nova_metadata_workers: 1
-
-## Neutron
-neutron_rpc_workers: 1
-neutron_metadata_workers: 1
-neutron_api_workers: 1
-neutron_api_threads_max: 2
-neutron_api_threads: 2
-neutron_num_sync_threads: 1
-
-## Heat
-heat_api_workers: 1
-heat_api_threads_max: 2
-heat_api_threads: 1
-heat_wsgi_threads: 1
-heat_wsgi_processes_max: 2
-heat_wsgi_processes: 1
-heat_wsgi_buffer_size: 16384
-
-## Horizon
-horizon_wsgi_processes: 1
-horizon_wsgi_threads: 1
-horizon_wsgi_threads_max: 2
-
-## Ceilometer
-ceilometer_notification_workers_max: 2
-ceilometer_notification_workers: 1
-
-## AODH
-aodh_wsgi_threads: 1
-aodh_wsgi_processes_max: 2
-aodh_wsgi_processes: 1
-
-## Gnocchi
-gnocchi_wsgi_threads: 1
-gnocchi_wsgi_processes_max: 2
-gnocchi_wsgi_processes: 1
-
-## Swift
-swift_account_server_replicator_workers: 1
-swift_server_replicator_workers: 1
-swift_object_replicator_workers: 1
-swift_account_server_workers: 1
-swift_container_server_workers: 1
-swift_object_server_workers: 1
-swift_proxy_server_workers_max: 2
-swift_proxy_server_workers_not_capped: 1
-swift_proxy_server_workers_capped: 1
-swift_proxy_server_workers: 1
-
-## Ironic
-ironic_wsgi_threads: 1
-ironic_wsgi_processes_max: 2
-ironic_wsgi_processes: 1
-
-## Trove
-trove_api_workers_max: 2
-trove_api_workers: 1
-trove_conductor_workers_max: 2
-trove_conductor_workers: 1
-trove_wsgi_threads: 1
-trove_wsgi_processes_max: 2
-trove_wsgi_processes: 1
-
-## Sahara
-sahara_api_workers_max: 2
-sahara_api_workers: 1
-
-openrc_os_auth_url: "https://192.168.122.220:5000/v3"
-keystone_auth_admin_password: "opnfv-secret-password"
-openrc_os_password: "opnfv-secret-password"
-openrc_os_domain_name: "Default"
-openrc_cinder_endpoint_type: "publicURL"
-openrc_nova_endpoint_type: "publicURL"
-openrc_os_endpoint_type: "publicURL"
-openrc_clouds_yml_interface: "public"
-openrc_region_name: RegionOne
-haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
-haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
-keystone_service_adminuri_insecure: true
-keystone_service_internaluri_insecure: true
diff --git a/xci/file/ha/user_variables_ceph.yml b/xci/file/ha/user_variables_ceph.yml
deleted file mode 100644
index 8f708990..00000000
--- a/xci/file/ha/user_variables_ceph.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-# Copyright 2017, Logan Vig <logan2211@gmail.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-## ceph-ansible settings
-devices: [/dev/loop0, /dev/loop1, /dev/loop2]
-common_single_host_mode: true
-monitor_interface: eth1
-public_network: "172.29.236.0/22"
-cluster_network: "172.29.244.0/22"
-journal_size: 100
-journal_collocation: true
-pool_default_pg_num: 32
-openstack_config: true # Ceph ansible automatically creates pools & keys
-cinder_ceph_client: cinder
-cinder_default_volume_type: RBD
-glance_ceph_client: glance
-glance_default_store: rbd
-glance_rbd_store_pool: images
-nova_libvirt_images_rbd_pool: vms
-nfs_file_gw: False
diff --git a/xci/file/install-ansible.sh b/xci/file/install-ansible.sh
deleted file mode 100644
index 979d9904..00000000
--- a/xci/file/install-ansible.sh
+++ /dev/null
@@ -1,161 +0,0 @@
-#!/bin/bash
-# NOTE(hwoarang): Most parts of this this file were taken from the
-# bifrost repository (scripts/install-deps.sh). This script contains all
-# the necessary distro specific code to install ansible and it's dependencies.
-
-set -eu
-
-declare -A PKG_MAP
-
-# workaround: for latest bindep to work, it needs to use en_US local
-export LANG=c
-
-CHECK_CMD_PKGS=(
- gcc
- libffi
- libopenssl
- lsb-release
- make
- net-tools
- python-devel
- python
- venv
- wget
-)
-
-source /etc/os-release || source /usr/lib/os-release
-case ${ID,,} in
- *suse)
- OS_FAMILY="Suse"
- INSTALLER_CMD="sudo -H -E zypper -q install -y --no-recommends"
- CHECK_CMD="zypper search --match-exact --installed"
- PKG_MAP=(
- [gcc]=gcc
- [libffi]=libffi-devel
- [libopenssl]=libopenssl-devel
- [lsb-release]=lsb-release
- [make]=make
- [net-tools]=net-tools
- [python]=python
- [python-devel]=python-devel
- [venv]=python-virtualenv
- [wget]=wget
- )
- EXTRA_PKG_DEPS=( python-xml )
- sudo zypper -n ref
- # NOTE (cinerama): we can't install python without removing this package
- # if it exists
- if $(${CHECK_CMD} patterns-openSUSE-minimal_base-conflicts &> /dev/null); then
- sudo -H zypper remove -y patterns-openSUSE-minimal_base-conflicts
- fi
- ;;
-
- ubuntu|debian)
- OS_FAMILY="Debian"
- export DEBIAN_FRONTEND=noninteractive
- INSTALLER_CMD="sudo -H -E apt-get -y -q=3 install"
- CHECK_CMD="dpkg -l"
- PKG_MAP=(
- [gcc]=gcc
- [libffi]=libffi-dev
- [libopenssl]=libssl-dev
- [lsb-release]=lsb-release
- [make]=make
- [net-tools]=net-tools
- [python]=python-minimal
- [python-devel]=libpython-dev
- [venv]=python-virtualenv
- [wget]=wget
- )
- EXTRA_PKG_DEPS=()
- sudo apt-get update
- ;;
-
- rhel|fedora|centos)
- OS_FAMILY="RedHat"
- PKG_MANAGER=$(which dnf || which yum)
- INSTALLER_CMD="sudo -H -E ${PKG_MANAGER} -q -y install"
- CHECK_CMD="rpm -q"
- PKG_MAP=(
- [gcc]=gcc
- [libffi]=libffi-devel
- [libopenssl]=openssl-devel
- [lsb-release]=redhat-lsb
- [make]=make
- [net-tools]=net-tools
- [python]=python
- [python-devel]=python-devel
- [venv]=python-virtualenv
- [wget]=wget
- )
- sudo yum updateinfo
- EXTRA_PKG_DEPS=()
- ;;
-
- *) echo "ERROR: Supported package manager not found. Supported: apt, dnf, yum, zypper"; exit 1;;
-esac
-
-if ! $(python --version &>/dev/null); then
- ${INSTALLER_CMD} ${PKG_MAP[python]}
-fi
-if ! $(gcc -v &>/dev/null); then
- ${INSTALLER_CMD} ${PKG_MAP[gcc]}
-fi
-if ! $(wget --version &>/dev/null); then
- ${INSTALLER_CMD} ${PKG_MAP[wget]}
-fi
-
-if ! $(python -m virtualenv --version &>/dev/null); then
- ${INSTALLER_CMD} ${PKG_MAP[venv]}
-fi
-
-for pkg in ${CHECK_CMD_PKGS[@]}; do
- if ! $(${CHECK_CMD} ${PKG_MAP[$pkg]} &>/dev/null); then
- ${INSTALLER_CMD} ${PKG_MAP[$pkg]}
- fi
-done
-
-if [ -n "${EXTRA_PKG_DEPS-}" ]; then
- for pkg in ${EXTRA_PKG_DEPS}; do
- if ! $(${CHECK_CMD} ${pkg} &>/dev/null); then
- ${INSTALLER_CMD} ${pkg}
- fi
- done
-fi
-
-# If we're using a venv, we need to work around sudo not
-# keeping the path even with -E.
-PYTHON=$(which python)
-
-# To install python packages, we need pip.
-#
-# We can't use the apt packaged version of pip since
-# older versions of pip are incompatible with
-# requests, one of our indirect dependencies (bug 1459947).
-#
-# Note(cinerama): We use pip to install an updated pip plus our
-# other python requirements. pip breakages can seriously impact us,
-# so we've chosen to install/upgrade pip here rather than in
-# requirements (which are synced automatically from the global ones)
-# so we can quickly and easily adjust version parameters.
-# See bug 1536627.
-#
-# Note(cinerama): If pip is linked to pip3, the rest of the install
-# won't work. Remove the alternatives. This is due to ansible's
-# python 2.x requirement.
-if [[ $(readlink -f /etc/alternatives/pip) =~ "pip3" ]]; then
- sudo -H update-alternatives --remove pip $(readlink -f /etc/alternatives/pip)
-fi
-
-if ! which pip; then
- wget -O /tmp/get-pip.py https://bootstrap.pypa.io/get-pip.py
- sudo -H -E ${PYTHON} /tmp/get-pip.py
-fi
-
-PIP=$(which pip)
-echo "Using pip: $(${PIP} --version)"
-sudo -H -E ${PIP} -q install --upgrade virtualenv
-sudo -H -E ${PIP} -q install --upgrade pip
-# upgrade setuptools, as latest version is needed to install some projects
-sudo -H -E ${PIP} -q install --upgrade setuptools
-${PIP} install -q --user --upgrade ansible==$XCI_ANSIBLE_PIP_VERSION
diff --git a/xci/file/mini/ceph.yml b/xci/file/mini/ceph.yml
deleted file mode 100644
index 5c09b471..00000000
--- a/xci/file/mini/ceph.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-# The infra nodes where the Ceph mon services will run
-ceph-mon_hosts:
- controller00:
- ip: 172.29.236.11
-
-# The nodes that the Ceph OSD disks will be running on
-ceph-osd_hosts:
- compute00:
- ip: 172.29.236.12
diff --git a/xci/file/mini/flavor-vars.yml b/xci/file/mini/flavor-vars.yml
deleted file mode 100644
index 0d446ba2..00000000
--- a/xci/file/mini/flavor-vars.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-host_info: {
- 'opnfv': {
- 'VLAN_IP': '192.168.122.2',
- 'MGMT_IP': '172.29.236.10',
- 'VXLAN_IP': '172.29.240.10',
- 'STORAGE_IP': '172.29.244.10'
- },
- 'controller00': {
- 'VLAN_IP': '192.168.122.3',
- 'MGMT_IP': '172.29.236.11',
- 'VXLAN_IP': '172.29.240.11',
- 'STORAGE_IP': '172.29.244.11'
- },
- 'compute00': {
- 'VLAN_IP': '192.168.122.4',
- 'MGMT_IP': '172.29.236.12',
- 'VXLAN_IP': '172.29.240.12',
- 'STORAGE_IP': '172.29.244.12'
- },
-}
diff --git a/xci/file/mini/inventory b/xci/file/mini/inventory
deleted file mode 100644
index eb73e5e3..00000000
--- a/xci/file/mini/inventory
+++ /dev/null
@@ -1,8 +0,0 @@
-[opnfv]
-opnfv ansible_ssh_host=192.168.122.2
-
-[controller]
-controller00 ansible_ssh_host=192.168.122.3
-
-[compute]
-compute00 ansible_ssh_host=192.168.122.4
diff --git a/xci/file/mini/openstack_user_config.yml b/xci/file/mini/openstack_user_config.yml
deleted file mode 100644
index f9ccee24..00000000
--- a/xci/file/mini/openstack_user_config.yml
+++ /dev/null
@@ -1,170 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.11
- external_lb_vip_address: 192.168.122.3
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_linuxbridge_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_linuxbridge_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- type: "vlan"
- range: "1:1"
- net_name: "vlan"
- group_binds:
- - neutron_linuxbridge_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.12"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.12
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.12"
- share: "/volumes"
diff --git a/xci/file/mini/user_ceph.yml b/xci/file/mini/user_ceph.yml
deleted file mode 100644
index 9d5f13a9..00000000
--- a/xci/file/mini/user_ceph.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# The OSA ceph_client role does not support loading IPs from an inventory group,
-# so we have to feed it a list of IPs
-# yamllint disable rule:line-length
-ceph_mons: "[ {% for host in groups[mon_group_name] %}'{{ hostvars[host]['ansible_host'] }}'{% if not loop.last %},{% endif %}{% endfor %} ]"
-# yamllint enable rule:line-length
-cinder_backends:
- "RBD":
- volume_driver: cinder.volume.drivers.rbd.RBDDriver
- rbd_pool: volumes
- rbd_ceph_conf: /etc/ceph/ceph.conf
- rbd_store_chunk_size: 8
- volume_backend_name: rbddriver
- rbd_user: cinder
- rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
- report_discard_supported: true
diff --git a/xci/file/mini/user_variables.yml b/xci/file/mini/user_variables.yml
deleted file mode 100644
index 9ec9e405..00000000
--- a/xci/file/mini/user_variables.yml
+++ /dev/null
@@ -1,165 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ##
-# ## This file contains commonly used overrides for convenience. Please inspect
-# ## the defaults for each role to find additional override options.
-# ##
-
-# # Debug and Verbose options.
-debug: false
-
-# Allow root logins
-security_sshd_permit_root_login: yes
-
-haproxy_keepalived_external_vip_cidr: "192.168.122.3/32"
-haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32"
-haproxy_keepalived_external_interface: br-vlan
-haproxy_keepalived_internal_interface: br-mgmt
-gnocchi_db_sync_options: ""
-
-# The settings below are taken from aio since this flavor is mostly
-# for short CI loops and users with lower requirements.
-# https://github.com/openstack/openstack-ansible/blob/master/tests/roles/bootstrap-host/templates/user_variables.aio.yml.j2
-
-## Galera settings
-galera_innodb_buffer_pool_size: 16M
-galera_innodb_log_buffer_size: 4M
-galera_wsrep_provider_options:
- - { option: "gcache.size", value: "4M" }
-
-## Neutron settings
-neutron_metadata_checksum_fix: True
-
-### Set workers for all services to optimise memory usage
-
-## Repo
-repo_nginx_threads: 2
-
-## Keystone
-keystone_httpd_mpm_start_servers: 2
-keystone_httpd_mpm_min_spare_threads: 1
-keystone_httpd_mpm_max_spare_threads: 2
-keystone_httpd_mpm_thread_limit: 2
-keystone_httpd_mpm_thread_child: 1
-keystone_wsgi_threads: 1
-keystone_wsgi_processes_max: 2
-
-## Barbican
-barbican_wsgi_processes: 2
-barbican_wsgi_threads: 1
-
-## Cinder
-cinder_wsgi_processes_max: 2
-cinder_wsgi_threads: 1
-cinder_wsgi_buffer_size: 16384
-cinder_osapi_volume_workers_max: 2
-
-## Glance
-glance_api_threads_max: 2
-glance_api_threads: 1
-glance_api_workers: 1
-glance_registry_workers: 1
-
-## Nova
-nova_wsgi_threads: 1
-nova_wsgi_processes_max: 2
-nova_wsgi_processes: 2
-nova_wsgi_buffer_size: 16384
-nova_api_threads_max: 2
-nova_api_threads: 1
-nova_osapi_compute_workers: 1
-nova_conductor_workers: 1
-nova_metadata_workers: 1
-
-## Neutron
-neutron_rpc_workers: 1
-neutron_metadata_workers: 1
-neutron_api_workers: 1
-neutron_api_threads_max: 2
-neutron_api_threads: 2
-neutron_num_sync_threads: 1
-
-## Heat
-heat_api_workers: 1
-heat_api_threads_max: 2
-heat_api_threads: 1
-heat_wsgi_threads: 1
-heat_wsgi_processes_max: 2
-heat_wsgi_processes: 1
-heat_wsgi_buffer_size: 16384
-
-## Horizon
-horizon_wsgi_processes: 1
-horizon_wsgi_threads: 1
-horizon_wsgi_threads_max: 2
-
-## Ceilometer
-ceilometer_notification_workers_max: 2
-ceilometer_notification_workers: 1
-
-## AODH
-aodh_wsgi_threads: 1
-aodh_wsgi_processes_max: 2
-aodh_wsgi_processes: 1
-
-## Gnocchi
-gnocchi_wsgi_threads: 1
-gnocchi_wsgi_processes_max: 2
-gnocchi_wsgi_processes: 1
-
-## Swift
-swift_account_server_replicator_workers: 1
-swift_server_replicator_workers: 1
-swift_object_replicator_workers: 1
-swift_account_server_workers: 1
-swift_container_server_workers: 1
-swift_object_server_workers: 1
-swift_proxy_server_workers_max: 2
-swift_proxy_server_workers_not_capped: 1
-swift_proxy_server_workers_capped: 1
-swift_proxy_server_workers: 1
-
-## Ironic
-ironic_wsgi_threads: 1
-ironic_wsgi_processes_max: 2
-ironic_wsgi_processes: 1
-
-## Trove
-trove_api_workers_max: 2
-trove_api_workers: 1
-trove_conductor_workers_max: 2
-trove_conductor_workers: 1
-trove_wsgi_threads: 1
-trove_wsgi_processes_max: 2
-trove_wsgi_processes: 1
-
-## Sahara
-sahara_api_workers_max: 2
-sahara_api_workers: 1
-
-openrc_os_auth_url: "https://192.168.122.3:5000/v3"
-keystone_auth_admin_password: "opnfv-secret-password"
-openrc_os_password: "opnfv-secret-password"
-openrc_os_domain_name: "Default"
-openrc_cinder_endpoint_type: "publicURL"
-openrc_nova_endpoint_type: "publicURL"
-openrc_os_endpoint_type: "publicURL"
-openrc_clouds_yml_interface: "public"
-openrc_region_name: RegionOne
-haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
-haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
-keystone_service_adminuri_insecure: true
-keystone_service_internaluri_insecure: true
diff --git a/xci/file/mini/user_variables_ceph.yml b/xci/file/mini/user_variables_ceph.yml
deleted file mode 100644
index 8f708990..00000000
--- a/xci/file/mini/user_variables_ceph.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-# Copyright 2017, Logan Vig <logan2211@gmail.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-## ceph-ansible settings
-devices: [/dev/loop0, /dev/loop1, /dev/loop2]
-common_single_host_mode: true
-monitor_interface: eth1
-public_network: "172.29.236.0/22"
-cluster_network: "172.29.244.0/22"
-journal_size: 100
-journal_collocation: true
-pool_default_pg_num: 32
-openstack_config: true # Ceph ansible automatically creates pools & keys
-cinder_ceph_client: cinder
-cinder_default_volume_type: RBD
-glance_ceph_client: glance
-glance_default_store: rbd
-glance_rbd_store_pool: images
-nova_libvirt_images_rbd_pool: vms
-nfs_file_gw: False
diff --git a/xci/file/noha/ceph.yml b/xci/file/noha/ceph.yml
deleted file mode 100644
index 0deb522e..00000000
--- a/xci/file/noha/ceph.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-# The infra nodes where the Ceph mon services will run
-ceph-mon_hosts:
- controller00:
- ip: 172.29.236.11
-
-# The nodes that the Ceph OSD disks will be running on
-ceph-osd_hosts:
- compute00:
- ip: 172.29.236.12
- compute01:
- ip: 172.29.236.13
diff --git a/xci/file/noha/flavor-vars.yml b/xci/file/noha/flavor-vars.yml
deleted file mode 100644
index 3c69a34b..00000000
--- a/xci/file/noha/flavor-vars.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-host_info: {
- 'opnfv': {
- 'VLAN_IP': '192.168.122.2',
- 'MGMT_IP': '172.29.236.10',
- 'VXLAN_IP': '172.29.240.10',
- 'STORAGE_IP': '172.29.244.10'
- },
- 'controller00': {
- 'VLAN_IP': '192.168.122.3',
- 'MGMT_IP': '172.29.236.11',
- 'VXLAN_IP': '172.29.240.11',
- 'STORAGE_IP': '172.29.244.11'
- },
- 'compute00': {
- 'VLAN_IP': '192.168.122.4',
- 'MGMT_IP': '172.29.236.12',
- 'VXLAN_IP': '172.29.240.12',
- 'STORAGE_IP': '172.29.244.12'
- },
- 'compute01': {
- 'VLAN_IP': '192.168.122.5',
- 'MGMT_IP': '172.29.236.13',
- 'VXLAN_IP': '172.29.240.13',
- 'STORAGE_IP': '172.29.244.13'
- }
-}
diff --git a/xci/file/noha/inventory b/xci/file/noha/inventory
deleted file mode 100644
index b4f9f6d0..00000000
--- a/xci/file/noha/inventory
+++ /dev/null
@@ -1,9 +0,0 @@
-[opnfv]
-opnfv ansible_ssh_host=192.168.122.2
-
-[controller]
-controller00 ansible_ssh_host=192.168.122.3
-
-[compute]
-compute00 ansible_ssh_host=192.168.122.4
-compute01 ansible_ssh_host=192.168.122.5
diff --git a/xci/file/noha/openstack_user_config.yml b/xci/file/noha/openstack_user_config.yml
deleted file mode 100644
index fb12655e..00000000
--- a/xci/file/noha/openstack_user_config.yml
+++ /dev/null
@@ -1,172 +0,0 @@
----
-cidr_networks:
- container: 172.29.236.0/22
- tunnel: 172.29.240.0/22
- storage: 172.29.244.0/22
-
-used_ips:
- - "172.29.236.1,172.29.236.50"
- - "172.29.240.1,172.29.240.50"
- - "172.29.244.1,172.29.244.50"
- - "172.29.248.1,172.29.248.50"
-
-global_overrides:
- internal_lb_vip_address: 172.29.236.11
- external_lb_vip_address: 192.168.122.3
- tunnel_bridge: "br-vxlan"
- management_bridge: "br-mgmt"
- provider_networks:
- - network:
- container_bridge: "br-mgmt"
- container_type: "veth"
- container_interface: "eth1"
- ip_from_q: "container"
- type: "raw"
- group_binds:
- - all_containers
- - hosts
- is_container_address: true
- is_ssh_address: true
- - network:
- container_bridge: "br-vxlan"
- container_type: "veth"
- container_interface: "eth10"
- ip_from_q: "tunnel"
- type: "vxlan"
- range: "1:1000"
- net_name: "vxlan"
- group_binds:
- - neutron_linuxbridge_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth12"
- host_bind_override: "eth12"
- type: "flat"
- net_name: "flat"
- group_binds:
- - neutron_linuxbridge_agent
- - network:
- container_bridge: "br-vlan"
- container_type: "veth"
- container_interface: "eth11"
- type: "vlan"
- range: "1:1"
- net_name: "vlan"
- group_binds:
- - neutron_linuxbridge_agent
- - network:
- container_bridge: "br-storage"
- container_type: "veth"
- container_interface: "eth2"
- ip_from_q: "storage"
- type: "raw"
- group_binds:
- - glance_api
- - cinder_api
- - cinder_volume
- - nova_compute
-
-# ##
-# ## Infrastructure
-# ##
-
-# galera, memcache, rabbitmq, utility
-shared-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# repository (apt cache, python packages, etc)
-repo-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# load balancer
-# Ideally the load balancer should not use the Infrastructure hosts.
-# Dedicated hardware is best for improved performance and security.
-haproxy_hosts:
- controller00:
- ip: 172.29.236.11
-
-# rsyslog server
-# log_hosts:
-# log1:
-# ip: 172.29.236.14
-
-# ##
-# ## OpenStack
-# ##
-
-# keystone
-identity_hosts:
- controller00:
- ip: 172.29.236.11
-
-# cinder api services
-storage-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# glance
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-image_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- limit_container_types: glance
- glance_nfs_client:
- - server: "172.29.244.12"
- remote_path: "/images"
- local_path: "/var/lib/glance/images"
- type: "nfs"
- options: "_netdev,auto"
-
-# nova api, conductor, etc services
-compute-infra_hosts:
- controller00:
- ip: 172.29.236.11
-
-# heat
-orchestration_hosts:
- controller00:
- ip: 172.29.236.11
-
-# horizon
-dashboard_hosts:
- controller00:
- ip: 172.29.236.11
-
-# neutron server, agents (L3, etc)
-network_hosts:
- controller00:
- ip: 172.29.236.11
-
-# nova hypervisors
-compute_hosts:
- compute00:
- ip: 172.29.236.12
- compute01:
- ip: 172.29.236.13
-
-# cinder volume hosts (NFS-backed)
-# The settings here are repeated for each infra host.
-# They could instead be applied as global settings in
-# user_variables, but are left here to illustrate that
-# each container could have different storage targets.
-storage_hosts:
- controller00:
- ip: 172.29.236.11
- container_vars:
- cinder_backends:
- limit_container_types: cinder_volume
- nfs_volume:
- volume_backend_name: NFS_VOLUME1
- volume_driver: cinder.volume.drivers.nfs.NfsDriver
- nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
- nfs_shares_config: /etc/cinder/nfs_shares
- shares:
- - ip: "172.29.244.12"
- share: "/volumes"
diff --git a/xci/file/noha/user_ceph.yml b/xci/file/noha/user_ceph.yml
deleted file mode 100644
index 9d5f13a9..00000000
--- a/xci/file/noha/user_ceph.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# The OSA ceph_client role does not support loading IPs from an inventory group,
-# so we have to feed it a list of IPs
-# yamllint disable rule:line-length
-ceph_mons: "[ {% for host in groups[mon_group_name] %}'{{ hostvars[host]['ansible_host'] }}'{% if not loop.last %},{% endif %}{% endfor %} ]"
-# yamllint enable rule:line-length
-cinder_backends:
- "RBD":
- volume_driver: cinder.volume.drivers.rbd.RBDDriver
- rbd_pool: volumes
- rbd_ceph_conf: /etc/ceph/ceph.conf
- rbd_store_chunk_size: 8
- volume_backend_name: rbddriver
- rbd_user: cinder
- rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
- report_discard_supported: true
diff --git a/xci/file/noha/user_variables.yml b/xci/file/noha/user_variables.yml
deleted file mode 100644
index 66573428..00000000
--- a/xci/file/noha/user_variables.yml
+++ /dev/null
@@ -1,165 +0,0 @@
----
-# Copyright 2014, Rackspace US, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ##
-# ## This file contains commonly used overrides for convenience. Please inspect
-# ## the defaults for each role to find additional override options.
-# ##
-
-# # Debug and Verbose options.
-debug: false
-
-# Allow root logins
-security_sshd_permit_root_login: yes
-
-haproxy_keepalived_external_vip_cidr: "192.168.122.3/32"
-haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32"
-haproxy_keepalived_external_interface: br-vlan
-haproxy_keepalived_internal_interface: br-mgmt
-gnocchi_db_sync_options: ""
-
-# The settings below are taken from aio to ensure we can bump OSA SHA with current
-# RAM allocation. Higher values will be tested once the bump is done.
-# https://github.com/openstack/openstack-ansible/blob/master/tests/roles/bootstrap-host/templates/user_variables.aio.yml.j2
-
-## Galera settings
-galera_innodb_buffer_pool_size: 16M
-galera_innodb_log_buffer_size: 4M
-galera_wsrep_provider_options:
- - { option: "gcache.size", value: "4M" }
-
-## Neutron settings
-neutron_metadata_checksum_fix: True
-
-### Set workers for all services to optimise memory usage
-
-## Repo
-repo_nginx_threads: 2
-
-## Keystone
-keystone_httpd_mpm_start_servers: 2
-keystone_httpd_mpm_min_spare_threads: 1
-keystone_httpd_mpm_max_spare_threads: 2
-keystone_httpd_mpm_thread_limit: 2
-keystone_httpd_mpm_thread_child: 1
-keystone_wsgi_threads: 1
-keystone_wsgi_processes_max: 2
-
-## Barbican
-barbican_wsgi_processes: 2
-barbican_wsgi_threads: 1
-
-## Cinder
-cinder_wsgi_processes_max: 2
-cinder_wsgi_threads: 1
-cinder_wsgi_buffer_size: 16384
-cinder_osapi_volume_workers_max: 2
-
-## Glance
-glance_api_threads_max: 2
-glance_api_threads: 1
-glance_api_workers: 1
-glance_registry_workers: 1
-
-## Nova
-nova_wsgi_threads: 1
-nova_wsgi_processes_max: 2
-nova_wsgi_processes: 2
-nova_wsgi_buffer_size: 16384
-nova_api_threads_max: 2
-nova_api_threads: 1
-nova_osapi_compute_workers: 1
-nova_conductor_workers: 1
-nova_metadata_workers: 1
-
-## Neutron
-neutron_rpc_workers: 1
-neutron_metadata_workers: 1
-neutron_api_workers: 1
-neutron_api_threads_max: 2
-neutron_api_threads: 2
-neutron_num_sync_threads: 1
-
-## Heat
-heat_api_workers: 1
-heat_api_threads_max: 2
-heat_api_threads: 1
-heat_wsgi_threads: 1
-heat_wsgi_processes_max: 2
-heat_wsgi_processes: 1
-heat_wsgi_buffer_size: 16384
-
-## Horizon
-horizon_wsgi_processes: 1
-horizon_wsgi_threads: 1
-horizon_wsgi_threads_max: 2
-
-## Ceilometer
-ceilometer_notification_workers_max: 2
-ceilometer_notification_workers: 1
-
-## AODH
-aodh_wsgi_threads: 1
-aodh_wsgi_processes_max: 2
-aodh_wsgi_processes: 1
-
-## Gnocchi
-gnocchi_wsgi_threads: 1
-gnocchi_wsgi_processes_max: 2
-gnocchi_wsgi_processes: 1
-
-## Swift
-swift_account_server_replicator_workers: 1
-swift_server_replicator_workers: 1
-swift_object_replicator_workers: 1
-swift_account_server_workers: 1
-swift_container_server_workers: 1
-swift_object_server_workers: 1
-swift_proxy_server_workers_max: 2
-swift_proxy_server_workers_not_capped: 1
-swift_proxy_server_workers_capped: 1
-swift_proxy_server_workers: 1
-
-## Ironic
-ironic_wsgi_threads: 1
-ironic_wsgi_processes_max: 2
-ironic_wsgi_processes: 1
-
-## Trove
-trove_api_workers_max: 2
-trove_api_workers: 1
-trove_conductor_workers_max: 2
-trove_conductor_workers: 1
-trove_wsgi_threads: 1
-trove_wsgi_processes_max: 2
-trove_wsgi_processes: 1
-
-## Sahara
-sahara_api_workers_max: 2
-sahara_api_workers: 1
-
-openrc_os_auth_url: "https://192.168.122.3:5000/v3"
-keystone_auth_admin_password: "opnfv-secret-password"
-openrc_os_password: "opnfv-secret-password"
-openrc_os_domain_name: "Default"
-openrc_cinder_endpoint_type: "publicURL"
-openrc_nova_endpoint_type: "publicURL"
-openrc_os_endpoint_type: "publicURL"
-openrc_clouds_yml_interface: "public"
-openrc_region_name: RegionOne
-haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
-haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
-keystone_service_adminuri_insecure: true
-keystone_service_internaluri_insecure: true
diff --git a/xci/file/noha/user_variables_ceph.yml b/xci/file/noha/user_variables_ceph.yml
deleted file mode 100644
index 8f708990..00000000
--- a/xci/file/noha/user_variables_ceph.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-# Copyright 2017, Logan Vig <logan2211@gmail.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-## ceph-ansible settings
-devices: [/dev/loop0, /dev/loop1, /dev/loop2]
-common_single_host_mode: true
-monitor_interface: eth1
-public_network: "172.29.236.0/22"
-cluster_network: "172.29.244.0/22"
-journal_size: 100
-journal_collocation: true
-pool_default_pg_num: 32
-openstack_config: true # Ceph ansible automatically creates pools & keys
-cinder_ceph_client: cinder
-cinder_default_volume_type: RBD
-glance_ceph_client: glance
-glance_default_store: rbd
-glance_rbd_store_pool: images
-nova_libvirt_images_rbd_pool: vms
-nfs_file_gw: False