summaryrefslogtreecommitdiffstats
path: root/xci/nfvi/osa/files/ha
diff options
context:
space:
mode:
Diffstat (limited to 'xci/nfvi/osa/files/ha')
-rw-r--r--xci/nfvi/osa/files/ha/ceph.yml15
-rw-r--r--xci/nfvi/osa/files/ha/flavor-vars.yml39
-rw-r--r--xci/nfvi/osa/files/ha/inventory11
-rw-r--r--xci/nfvi/osa/files/ha/openstack_user_config.yml255
-rw-r--r--xci/nfvi/osa/files/ha/user_ceph.yml16
-rw-r--r--xci/nfvi/osa/files/ha/user_variables.yml165
-rw-r--r--xci/nfvi/osa/files/ha/user_variables_ceph.yml32
7 files changed, 533 insertions, 0 deletions
diff --git a/xci/nfvi/osa/files/ha/ceph.yml b/xci/nfvi/osa/files/ha/ceph.yml
new file mode 100644
index 00000000..1567c492
--- /dev/null
+++ b/xci/nfvi/osa/files/ha/ceph.yml
@@ -0,0 +1,15 @@
+# The infra nodes where the Ceph mon services will run
+ceph-mon_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# The nodes that the Ceph OSD disks will be running on
+ceph-osd_hosts:
+ compute00:
+ ip: 172.29.236.14
+ compute01:
+ ip: 172.29.236.15
diff --git a/xci/nfvi/osa/files/ha/flavor-vars.yml b/xci/nfvi/osa/files/ha/flavor-vars.yml
new file mode 100644
index 00000000..167502c9
--- /dev/null
+++ b/xci/nfvi/osa/files/ha/flavor-vars.yml
@@ -0,0 +1,39 @@
+---
+host_info: {
+ 'opnfv': {
+ 'VLAN_IP': '192.168.122.2',
+ 'MGMT_IP': '172.29.236.10',
+ 'VXLAN_IP': '172.29.240.10',
+ 'STORAGE_IP': '172.29.244.10'
+ },
+ 'controller00': {
+ 'VLAN_IP': '192.168.122.3',
+ 'MGMT_IP': '172.29.236.11',
+ 'VXLAN_IP': '172.29.240.11',
+ 'STORAGE_IP': '172.29.244.11'
+ },
+ 'controller01': {
+ 'VLAN_IP': '192.168.122.4',
+ 'MGMT_IP': '172.29.236.12',
+ 'VXLAN_IP': '172.29.240.12',
+ 'STORAGE_IP': '172.29.244.12'
+ },
+ 'controller02': {
+ 'VLAN_IP': '192.168.122.5',
+ 'MGMT_IP': '172.29.236.13',
+ 'VXLAN_IP': '172.29.240.13',
+ 'STORAGE_IP': '172.29.244.13'
+ },
+ 'compute00': {
+ 'VLAN_IP': '192.168.122.6',
+ 'MGMT_IP': '172.29.236.14',
+ 'VXLAN_IP': '172.29.240.14',
+ 'STORAGE_IP': '172.29.244.14'
+ },
+ 'compute01': {
+ 'VLAN_IP': '192.168.122.7',
+ 'MGMT_IP': '172.29.236.15',
+ 'VXLAN_IP': '172.29.240.15',
+ 'STORAGE_IP': '172.29.244.15'
+ }
+}
diff --git a/xci/nfvi/osa/files/ha/inventory b/xci/nfvi/osa/files/ha/inventory
new file mode 100644
index 00000000..94b1d074
--- /dev/null
+++ b/xci/nfvi/osa/files/ha/inventory
@@ -0,0 +1,11 @@
+[opnfv]
+opnfv ansible_ssh_host=192.168.122.2
+
+[controller]
+controller00 ansible_ssh_host=192.168.122.3
+controller01 ansible_ssh_host=192.168.122.4
+controller02 ansible_ssh_host=192.168.122.5
+
+[compute]
+compute00 ansible_ssh_host=192.168.122.6
+compute01 ansible_ssh_host=192.168.122.7
diff --git a/xci/nfvi/osa/files/ha/openstack_user_config.yml b/xci/nfvi/osa/files/ha/openstack_user_config.yml
new file mode 100644
index 00000000..360aa5cb
--- /dev/null
+++ b/xci/nfvi/osa/files/ha/openstack_user_config.yml
@@ -0,0 +1,255 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+ - "172.29.236.222"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.222
+ external_lb_vip_address: 192.168.122.220
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+ controller01:
+ ip: 172.29.236.12
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+ controller02:
+ ip: 172.29.236.13
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.14"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+ controller01:
+ ip: 172.29.236.12
+ controller02:
+ ip: 172.29.236.13
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.14
+ compute01:
+ ip: 172.29.236.15
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
+ controller01:
+ ip: 172.29.236.12
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
+ controller02:
+ ip: 172.29.236.13
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.14"
+ share: "/volumes"
diff --git a/xci/nfvi/osa/files/ha/user_ceph.yml b/xci/nfvi/osa/files/ha/user_ceph.yml
new file mode 100644
index 00000000..9d5f13a9
--- /dev/null
+++ b/xci/nfvi/osa/files/ha/user_ceph.yml
@@ -0,0 +1,16 @@
+---
+# The OSA ceph_client role does not support loading IPs from an inventory group,
+# so we have to feed it a list of IPs
+# yamllint disable rule:line-length
+ceph_mons: "[ {% for host in groups[mon_group_name] %}'{{ hostvars[host]['ansible_host'] }}'{% if not loop.last %},{% endif %}{% endfor %} ]"
+# yamllint enable rule:line-length
+cinder_backends:
+ "RBD":
+ volume_driver: cinder.volume.drivers.rbd.RBDDriver
+ rbd_pool: volumes
+ rbd_ceph_conf: /etc/ceph/ceph.conf
+ rbd_store_chunk_size: 8
+ volume_backend_name: rbddriver
+ rbd_user: cinder
+ rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
+ report_discard_supported: true
diff --git a/xci/nfvi/osa/files/ha/user_variables.yml b/xci/nfvi/osa/files/ha/user_variables.yml
new file mode 100644
index 00000000..72960a01
--- /dev/null
+++ b/xci/nfvi/osa/files/ha/user_variables.yml
@@ -0,0 +1,165 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ##
+# ## This file contains commonly used overrides for convenience. Please inspect
+# ## the defaults for each role to find additional override options.
+# ##
+
+# # Debug and Verbose options.
+debug: false
+
+# Allow root logins
+security_sshd_permit_root_login: yes
+
+haproxy_keepalived_external_vip_cidr: "192.168.122.220/32"
+haproxy_keepalived_internal_vip_cidr: "172.29.236.222/32"
+haproxy_keepalived_external_interface: br-vlan
+haproxy_keepalived_internal_interface: br-mgmt
+gnocchi_db_sync_options: ""
+
+# The settings below are taken from aio to ensure we can bump OSA SHA with current
+# RAM allocation. Higher values will be tested once the bump is done.
+# https://github.com/openstack/openstack-ansible/blob/master/tests/roles/bootstrap-host/templates/user_variables.aio.yml.j2
+
+## Galera settings
+galera_innodb_buffer_pool_size: 16M
+galera_innodb_log_buffer_size: 4M
+galera_wsrep_provider_options:
+ - { option: "gcache.size", value: "4M" }
+
+## Neutron settings
+neutron_metadata_checksum_fix: True
+
+### Set workers for all services to optimise memory usage
+
+## Repo
+repo_nginx_threads: 2
+
+## Keystone
+keystone_httpd_mpm_start_servers: 2
+keystone_httpd_mpm_min_spare_threads: 1
+keystone_httpd_mpm_max_spare_threads: 2
+keystone_httpd_mpm_thread_limit: 2
+keystone_httpd_mpm_thread_child: 1
+keystone_wsgi_threads: 1
+keystone_wsgi_processes_max: 2
+
+## Barbican
+barbican_wsgi_processes: 2
+barbican_wsgi_threads: 1
+
+## Cinder
+cinder_wsgi_processes_max: 2
+cinder_wsgi_threads: 1
+cinder_wsgi_buffer_size: 16384
+cinder_osapi_volume_workers_max: 2
+
+## Glance
+glance_api_threads_max: 2
+glance_api_threads: 1
+glance_api_workers: 1
+glance_registry_workers: 1
+
+## Nova
+nova_wsgi_threads: 1
+nova_wsgi_processes_max: 2
+nova_wsgi_processes: 2
+nova_wsgi_buffer_size: 16384
+nova_api_threads_max: 2
+nova_api_threads: 1
+nova_osapi_compute_workers: 1
+nova_conductor_workers: 1
+nova_metadata_workers: 1
+
+## Neutron
+neutron_rpc_workers: 1
+neutron_metadata_workers: 1
+neutron_api_workers: 1
+neutron_api_threads_max: 2
+neutron_api_threads: 2
+neutron_num_sync_threads: 1
+
+## Heat
+heat_api_workers: 1
+heat_api_threads_max: 2
+heat_api_threads: 1
+heat_wsgi_threads: 1
+heat_wsgi_processes_max: 2
+heat_wsgi_processes: 1
+heat_wsgi_buffer_size: 16384
+
+## Horizon
+horizon_wsgi_processes: 1
+horizon_wsgi_threads: 1
+horizon_wsgi_threads_max: 2
+
+## Ceilometer
+ceilometer_notification_workers_max: 2
+ceilometer_notification_workers: 1
+
+## AODH
+aodh_wsgi_threads: 1
+aodh_wsgi_processes_max: 2
+aodh_wsgi_processes: 1
+
+## Gnocchi
+gnocchi_wsgi_threads: 1
+gnocchi_wsgi_processes_max: 2
+gnocchi_wsgi_processes: 1
+
+## Swift
+swift_account_server_replicator_workers: 1
+swift_server_replicator_workers: 1
+swift_object_replicator_workers: 1
+swift_account_server_workers: 1
+swift_container_server_workers: 1
+swift_object_server_workers: 1
+swift_proxy_server_workers_max: 2
+swift_proxy_server_workers_not_capped: 1
+swift_proxy_server_workers_capped: 1
+swift_proxy_server_workers: 1
+
+## Ironic
+ironic_wsgi_threads: 1
+ironic_wsgi_processes_max: 2
+ironic_wsgi_processes: 1
+
+## Trove
+trove_api_workers_max: 2
+trove_api_workers: 1
+trove_conductor_workers_max: 2
+trove_conductor_workers: 1
+trove_wsgi_threads: 1
+trove_wsgi_processes_max: 2
+trove_wsgi_processes: 1
+
+## Sahara
+sahara_api_workers_max: 2
+sahara_api_workers: 1
+
+openrc_os_auth_url: "https://192.168.122.220:5000/v3"
+keystone_auth_admin_password: "opnfv-secret-password"
+openrc_os_password: "opnfv-secret-password"
+openrc_os_domain_name: "Default"
+openrc_cinder_endpoint_type: "publicURL"
+openrc_nova_endpoint_type: "publicURL"
+openrc_os_endpoint_type: "publicURL"
+openrc_clouds_yml_interface: "public"
+openrc_region_name: RegionOne
+haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
+haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
+keystone_service_adminuri_insecure: true
+keystone_service_internaluri_insecure: true
diff --git a/xci/nfvi/osa/files/ha/user_variables_ceph.yml b/xci/nfvi/osa/files/ha/user_variables_ceph.yml
new file mode 100644
index 00000000..8f708990
--- /dev/null
+++ b/xci/nfvi/osa/files/ha/user_variables_ceph.yml
@@ -0,0 +1,32 @@
+---
+# Copyright 2017, Logan Vig <logan2211@gmail.com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+## ceph-ansible settings
+devices: [/dev/loop0, /dev/loop1, /dev/loop2]
+common_single_host_mode: true
+monitor_interface: eth1
+public_network: "172.29.236.0/22"
+cluster_network: "172.29.244.0/22"
+journal_size: 100
+journal_collocation: true
+pool_default_pg_num: 32
+openstack_config: true # Ceph ansible automatically creates pools & keys
+cinder_ceph_client: cinder
+cinder_default_volume_type: RBD
+glance_ceph_client: glance
+glance_default_store: rbd
+glance_rbd_store_pool: images
+nova_libvirt_images_rbd_pool: vms
+nfs_file_gw: False