summaryrefslogtreecommitdiffstats
path: root/xci/installer/osa/files/noha
diff options
context:
space:
mode:
Diffstat (limited to 'xci/installer/osa/files/noha')
-rw-r--r--xci/installer/osa/files/noha/ceph.yml11
-rw-r--r--xci/installer/osa/files/noha/flavor-vars.yml27
-rw-r--r--xci/installer/osa/files/noha/inventory9
-rw-r--r--xci/installer/osa/files/noha/openstack_user_config.yml172
-rw-r--r--xci/installer/osa/files/noha/user_ceph.yml16
-rw-r--r--xci/installer/osa/files/noha/user_variables.yml165
-rw-r--r--xci/installer/osa/files/noha/user_variables_ceph.yml32
7 files changed, 432 insertions, 0 deletions
diff --git a/xci/installer/osa/files/noha/ceph.yml b/xci/installer/osa/files/noha/ceph.yml
new file mode 100644
index 00000000..0deb522e
--- /dev/null
+++ b/xci/installer/osa/files/noha/ceph.yml
@@ -0,0 +1,11 @@
+# The infra nodes where the Ceph mon services will run
+ceph-mon_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# The nodes that the Ceph OSD disks will be running on
+ceph-osd_hosts:
+ compute00:
+ ip: 172.29.236.12
+ compute01:
+ ip: 172.29.236.13
diff --git a/xci/installer/osa/files/noha/flavor-vars.yml b/xci/installer/osa/files/noha/flavor-vars.yml
new file mode 100644
index 00000000..3c69a34b
--- /dev/null
+++ b/xci/installer/osa/files/noha/flavor-vars.yml
@@ -0,0 +1,27 @@
+---
+host_info: {
+ 'opnfv': {
+ 'VLAN_IP': '192.168.122.2',
+ 'MGMT_IP': '172.29.236.10',
+ 'VXLAN_IP': '172.29.240.10',
+ 'STORAGE_IP': '172.29.244.10'
+ },
+ 'controller00': {
+ 'VLAN_IP': '192.168.122.3',
+ 'MGMT_IP': '172.29.236.11',
+ 'VXLAN_IP': '172.29.240.11',
+ 'STORAGE_IP': '172.29.244.11'
+ },
+ 'compute00': {
+ 'VLAN_IP': '192.168.122.4',
+ 'MGMT_IP': '172.29.236.12',
+ 'VXLAN_IP': '172.29.240.12',
+ 'STORAGE_IP': '172.29.244.12'
+ },
+ 'compute01': {
+ 'VLAN_IP': '192.168.122.5',
+ 'MGMT_IP': '172.29.236.13',
+ 'VXLAN_IP': '172.29.240.13',
+ 'STORAGE_IP': '172.29.244.13'
+ }
+}
diff --git a/xci/installer/osa/files/noha/inventory b/xci/installer/osa/files/noha/inventory
new file mode 100644
index 00000000..b4f9f6d0
--- /dev/null
+++ b/xci/installer/osa/files/noha/inventory
@@ -0,0 +1,9 @@
+[opnfv]
+opnfv ansible_ssh_host=192.168.122.2
+
+[controller]
+controller00 ansible_ssh_host=192.168.122.3
+
+[compute]
+compute00 ansible_ssh_host=192.168.122.4
+compute01 ansible_ssh_host=192.168.122.5
diff --git a/xci/installer/osa/files/noha/openstack_user_config.yml b/xci/installer/osa/files/noha/openstack_user_config.yml
new file mode 100644
index 00000000..fb12655e
--- /dev/null
+++ b/xci/installer/osa/files/noha/openstack_user_config.yml
@@ -0,0 +1,172 @@
+---
+cidr_networks:
+ container: 172.29.236.0/22
+ tunnel: 172.29.240.0/22
+ storage: 172.29.244.0/22
+
+used_ips:
+ - "172.29.236.1,172.29.236.50"
+ - "172.29.240.1,172.29.240.50"
+ - "172.29.244.1,172.29.244.50"
+ - "172.29.248.1,172.29.248.50"
+
+global_overrides:
+ internal_lb_vip_address: 172.29.236.11
+ external_lb_vip_address: 192.168.122.3
+ tunnel_bridge: "br-vxlan"
+ management_bridge: "br-mgmt"
+ provider_networks:
+ - network:
+ container_bridge: "br-mgmt"
+ container_type: "veth"
+ container_interface: "eth1"
+ ip_from_q: "container"
+ type: "raw"
+ group_binds:
+ - all_containers
+ - hosts
+ is_container_address: true
+ is_ssh_address: true
+ - network:
+ container_bridge: "br-vxlan"
+ container_type: "veth"
+ container_interface: "eth10"
+ ip_from_q: "tunnel"
+ type: "vxlan"
+ range: "1:1000"
+ net_name: "vxlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth12"
+ host_bind_override: "eth12"
+ type: "flat"
+ net_name: "flat"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-vlan"
+ container_type: "veth"
+ container_interface: "eth11"
+ type: "vlan"
+ range: "1:1"
+ net_name: "vlan"
+ group_binds:
+ - neutron_linuxbridge_agent
+ - network:
+ container_bridge: "br-storage"
+ container_type: "veth"
+ container_interface: "eth2"
+ ip_from_q: "storage"
+ type: "raw"
+ group_binds:
+ - glance_api
+ - cinder_api
+ - cinder_volume
+ - nova_compute
+
+# ##
+# ## Infrastructure
+# ##
+
+# galera, memcache, rabbitmq, utility
+shared-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# repository (apt cache, python packages, etc)
+repo-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# load balancer
+# Ideally the load balancer should not use the Infrastructure hosts.
+# Dedicated hardware is best for improved performance and security.
+haproxy_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# rsyslog server
+# log_hosts:
+# log1:
+# ip: 172.29.236.14
+
+# ##
+# ## OpenStack
+# ##
+
+# keystone
+identity_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# cinder api services
+storage-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# glance
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+image_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ limit_container_types: glance
+ glance_nfs_client:
+ - server: "172.29.244.12"
+ remote_path: "/images"
+ local_path: "/var/lib/glance/images"
+ type: "nfs"
+ options: "_netdev,auto"
+
+# nova api, conductor, etc services
+compute-infra_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# heat
+orchestration_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# horizon
+dashboard_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# neutron server, agents (L3, etc)
+network_hosts:
+ controller00:
+ ip: 172.29.236.11
+
+# nova hypervisors
+compute_hosts:
+ compute00:
+ ip: 172.29.236.12
+ compute01:
+ ip: 172.29.236.13
+
+# cinder volume hosts (NFS-backed)
+# The settings here are repeated for each infra host.
+# They could instead be applied as global settings in
+# user_variables, but are left here to illustrate that
+# each container could have different storage targets.
+storage_hosts:
+ controller00:
+ ip: 172.29.236.11
+ container_vars:
+ cinder_backends:
+ limit_container_types: cinder_volume
+ nfs_volume:
+ volume_backend_name: NFS_VOLUME1
+ volume_driver: cinder.volume.drivers.nfs.NfsDriver
+ nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+ nfs_shares_config: /etc/cinder/nfs_shares
+ shares:
+ - ip: "172.29.244.12"
+ share: "/volumes"
diff --git a/xci/installer/osa/files/noha/user_ceph.yml b/xci/installer/osa/files/noha/user_ceph.yml
new file mode 100644
index 00000000..9d5f13a9
--- /dev/null
+++ b/xci/installer/osa/files/noha/user_ceph.yml
@@ -0,0 +1,16 @@
+---
+# The OSA ceph_client role does not support loading IPs from an inventory group,
+# so we have to feed it a list of IPs
+# yamllint disable rule:line-length
+ceph_mons: "[ {% for host in groups[mon_group_name] %}'{{ hostvars[host]['ansible_host'] }}'{% if not loop.last %},{% endif %}{% endfor %} ]"
+# yamllint enable rule:line-length
+cinder_backends:
+ "RBD":
+ volume_driver: cinder.volume.drivers.rbd.RBDDriver
+ rbd_pool: volumes
+ rbd_ceph_conf: /etc/ceph/ceph.conf
+ rbd_store_chunk_size: 8
+ volume_backend_name: rbddriver
+ rbd_user: cinder
+ rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
+ report_discard_supported: true
diff --git a/xci/installer/osa/files/noha/user_variables.yml b/xci/installer/osa/files/noha/user_variables.yml
new file mode 100644
index 00000000..66573428
--- /dev/null
+++ b/xci/installer/osa/files/noha/user_variables.yml
@@ -0,0 +1,165 @@
+---
+# Copyright 2014, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ##
+# ## This file contains commonly used overrides for convenience. Please inspect
+# ## the defaults for each role to find additional override options.
+# ##
+
+# # Debug and Verbose options.
+debug: false
+
+# Allow root logins
+security_sshd_permit_root_login: yes
+
+haproxy_keepalived_external_vip_cidr: "192.168.122.3/32"
+haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32"
+haproxy_keepalived_external_interface: br-vlan
+haproxy_keepalived_internal_interface: br-mgmt
+gnocchi_db_sync_options: ""
+
+# The settings below are taken from aio to ensure we can bump OSA SHA with current
+# RAM allocation. Higher values will be tested once the bump is done.
+# https://github.com/openstack/openstack-ansible/blob/master/tests/roles/bootstrap-host/templates/user_variables.aio.yml.j2
+
+## Galera settings
+galera_innodb_buffer_pool_size: 16M
+galera_innodb_log_buffer_size: 4M
+galera_wsrep_provider_options:
+ - { option: "gcache.size", value: "4M" }
+
+## Neutron settings
+neutron_metadata_checksum_fix: True
+
+### Set workers for all services to optimise memory usage
+
+## Repo
+repo_nginx_threads: 2
+
+## Keystone
+keystone_httpd_mpm_start_servers: 2
+keystone_httpd_mpm_min_spare_threads: 1
+keystone_httpd_mpm_max_spare_threads: 2
+keystone_httpd_mpm_thread_limit: 2
+keystone_httpd_mpm_thread_child: 1
+keystone_wsgi_threads: 1
+keystone_wsgi_processes_max: 2
+
+## Barbican
+barbican_wsgi_processes: 2
+barbican_wsgi_threads: 1
+
+## Cinder
+cinder_wsgi_processes_max: 2
+cinder_wsgi_threads: 1
+cinder_wsgi_buffer_size: 16384
+cinder_osapi_volume_workers_max: 2
+
+## Glance
+glance_api_threads_max: 2
+glance_api_threads: 1
+glance_api_workers: 1
+glance_registry_workers: 1
+
+## Nova
+nova_wsgi_threads: 1
+nova_wsgi_processes_max: 2
+nova_wsgi_processes: 2
+nova_wsgi_buffer_size: 16384
+nova_api_threads_max: 2
+nova_api_threads: 1
+nova_osapi_compute_workers: 1
+nova_conductor_workers: 1
+nova_metadata_workers: 1
+
+## Neutron
+neutron_rpc_workers: 1
+neutron_metadata_workers: 1
+neutron_api_workers: 1
+neutron_api_threads_max: 2
+neutron_api_threads: 2
+neutron_num_sync_threads: 1
+
+## Heat
+heat_api_workers: 1
+heat_api_threads_max: 2
+heat_api_threads: 1
+heat_wsgi_threads: 1
+heat_wsgi_processes_max: 2
+heat_wsgi_processes: 1
+heat_wsgi_buffer_size: 16384
+
+## Horizon
+horizon_wsgi_processes: 1
+horizon_wsgi_threads: 1
+horizon_wsgi_threads_max: 2
+
+## Ceilometer
+ceilometer_notification_workers_max: 2
+ceilometer_notification_workers: 1
+
+## AODH
+aodh_wsgi_threads: 1
+aodh_wsgi_processes_max: 2
+aodh_wsgi_processes: 1
+
+## Gnocchi
+gnocchi_wsgi_threads: 1
+gnocchi_wsgi_processes_max: 2
+gnocchi_wsgi_processes: 1
+
+## Swift
+swift_account_server_replicator_workers: 1
+swift_server_replicator_workers: 1
+swift_object_replicator_workers: 1
+swift_account_server_workers: 1
+swift_container_server_workers: 1
+swift_object_server_workers: 1
+swift_proxy_server_workers_max: 2
+swift_proxy_server_workers_not_capped: 1
+swift_proxy_server_workers_capped: 1
+swift_proxy_server_workers: 1
+
+## Ironic
+ironic_wsgi_threads: 1
+ironic_wsgi_processes_max: 2
+ironic_wsgi_processes: 1
+
+## Trove
+trove_api_workers_max: 2
+trove_api_workers: 1
+trove_conductor_workers_max: 2
+trove_conductor_workers: 1
+trove_wsgi_threads: 1
+trove_wsgi_processes_max: 2
+trove_wsgi_processes: 1
+
+## Sahara
+sahara_api_workers_max: 2
+sahara_api_workers: 1
+
+openrc_os_auth_url: "https://192.168.122.3:5000/v3"
+keystone_auth_admin_password: "opnfv-secret-password"
+openrc_os_password: "opnfv-secret-password"
+openrc_os_domain_name: "Default"
+openrc_cinder_endpoint_type: "publicURL"
+openrc_nova_endpoint_type: "publicURL"
+openrc_os_endpoint_type: "publicURL"
+openrc_clouds_yml_interface: "public"
+openrc_region_name: RegionOne
+haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt"
+haproxy_user_ssl_key: "/etc/ssl/private/xci.key"
+keystone_service_adminuri_insecure: true
+keystone_service_internaluri_insecure: true
diff --git a/xci/installer/osa/files/noha/user_variables_ceph.yml b/xci/installer/osa/files/noha/user_variables_ceph.yml
new file mode 100644
index 00000000..8f708990
--- /dev/null
+++ b/xci/installer/osa/files/noha/user_variables_ceph.yml
@@ -0,0 +1,32 @@
+---
+# Copyright 2017, Logan Vig <logan2211@gmail.com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+## ceph-ansible settings
+devices: [/dev/loop0, /dev/loop1, /dev/loop2]
+common_single_host_mode: true
+monitor_interface: eth1
+public_network: "172.29.236.0/22"
+cluster_network: "172.29.244.0/22"
+journal_size: 100
+journal_collocation: true
+pool_default_pg_num: 32
+openstack_config: true # Ceph ansible automatically creates pools & keys
+cinder_ceph_client: cinder
+cinder_default_volume_type: RBD
+glance_ceph_client: glance
+glance_default_store: rbd
+glance_rbd_store_pool: images
+nova_libvirt_images_rbd_pool: vms
+nfs_file_gw: False