diff options
Diffstat (limited to 'xci/nfvi/osa')
26 files changed, 1401 insertions, 10 deletions
diff --git a/xci/nfvi/osa/files/aio/flavor-vars.yml b/xci/nfvi/osa/files/aio/flavor-vars.yml new file mode 100644 index 00000000..6ac1e0fe --- /dev/null +++ b/xci/nfvi/osa/files/aio/flavor-vars.yml @@ -0,0 +1,3 @@ +--- +# this file is added intentionally in order to simplify putting files in place +# in future, it might contain vars specific to this flavor diff --git a/xci/nfvi/osa/files/aio/inventory b/xci/nfvi/osa/files/aio/inventory new file mode 100644 index 00000000..9a3dd9ee --- /dev/null +++ b/xci/nfvi/osa/files/aio/inventory @@ -0,0 +1,2 @@ +[opnfv] +opnfv ansible_ssh_host=192.168.122.2 diff --git a/xci/nfvi/osa/files/ha/ceph.yml b/xci/nfvi/osa/files/ha/ceph.yml new file mode 100644 index 00000000..1567c492 --- /dev/null +++ b/xci/nfvi/osa/files/ha/ceph.yml @@ -0,0 +1,15 @@ +# The infra nodes where the Ceph mon services will run +ceph-mon_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# The nodes that the Ceph OSD disks will be running on +ceph-osd_hosts: + compute00: + ip: 172.29.236.14 + compute01: + ip: 172.29.236.15 diff --git a/xci/nfvi/osa/files/ha/flavor-vars.yml b/xci/nfvi/osa/files/ha/flavor-vars.yml new file mode 100644 index 00000000..167502c9 --- /dev/null +++ b/xci/nfvi/osa/files/ha/flavor-vars.yml @@ -0,0 +1,39 @@ +--- +host_info: { + 'opnfv': { + 'VLAN_IP': '192.168.122.2', + 'MGMT_IP': '172.29.236.10', + 'VXLAN_IP': '172.29.240.10', + 'STORAGE_IP': '172.29.244.10' + }, + 'controller00': { + 'VLAN_IP': '192.168.122.3', + 'MGMT_IP': '172.29.236.11', + 'VXLAN_IP': '172.29.240.11', + 'STORAGE_IP': '172.29.244.11' + }, + 'controller01': { + 'VLAN_IP': '192.168.122.4', + 'MGMT_IP': '172.29.236.12', + 'VXLAN_IP': '172.29.240.12', + 'STORAGE_IP': '172.29.244.12' + }, + 'controller02': { + 'VLAN_IP': '192.168.122.5', + 'MGMT_IP': '172.29.236.13', + 'VXLAN_IP': '172.29.240.13', + 'STORAGE_IP': '172.29.244.13' + }, + 'compute00': { + 'VLAN_IP': '192.168.122.6', + 'MGMT_IP': '172.29.236.14', + 'VXLAN_IP': '172.29.240.14', + 'STORAGE_IP': '172.29.244.14' + }, + 'compute01': { + 'VLAN_IP': '192.168.122.7', + 'MGMT_IP': '172.29.236.15', + 'VXLAN_IP': '172.29.240.15', + 'STORAGE_IP': '172.29.244.15' + } +} diff --git a/xci/nfvi/osa/files/ha/inventory b/xci/nfvi/osa/files/ha/inventory new file mode 100644 index 00000000..94b1d074 --- /dev/null +++ b/xci/nfvi/osa/files/ha/inventory @@ -0,0 +1,11 @@ +[opnfv] +opnfv ansible_ssh_host=192.168.122.2 + +[controller] +controller00 ansible_ssh_host=192.168.122.3 +controller01 ansible_ssh_host=192.168.122.4 +controller02 ansible_ssh_host=192.168.122.5 + +[compute] +compute00 ansible_ssh_host=192.168.122.6 +compute01 ansible_ssh_host=192.168.122.7 diff --git a/xci/nfvi/osa/files/ha/openstack_user_config.yml b/xci/nfvi/osa/files/ha/openstack_user_config.yml new file mode 100644 index 00000000..360aa5cb --- /dev/null +++ b/xci/nfvi/osa/files/ha/openstack_user_config.yml @@ -0,0 +1,255 @@ +--- +cidr_networks: + container: 172.29.236.0/22 + tunnel: 172.29.240.0/22 + storage: 172.29.244.0/22 + +used_ips: + - "172.29.236.1,172.29.236.50" + - "172.29.240.1,172.29.240.50" + - "172.29.244.1,172.29.244.50" + - "172.29.248.1,172.29.248.50" + - "172.29.236.222" + +global_overrides: + internal_lb_vip_address: 172.29.236.222 + external_lb_vip_address: 192.168.122.220 + tunnel_bridge: "br-vxlan" + management_bridge: "br-mgmt" + provider_networks: + - network: + container_bridge: "br-mgmt" + container_type: "veth" + container_interface: "eth1" + ip_from_q: "container" + type: "raw" + group_binds: + - all_containers + - hosts + is_container_address: true + is_ssh_address: true + - network: + container_bridge: "br-vxlan" + container_type: "veth" + container_interface: "eth10" + ip_from_q: "tunnel" + type: "vxlan" + range: "1:1000" + net_name: "vxlan" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-vlan" + container_type: "veth" + container_interface: "eth12" + host_bind_override: "eth12" + type: "flat" + net_name: "flat" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-vlan" + container_type: "veth" + container_interface: "eth11" + type: "vlan" + range: "1:1" + net_name: "vlan" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-storage" + container_type: "veth" + container_interface: "eth2" + ip_from_q: "storage" + type: "raw" + group_binds: + - glance_api + - cinder_api + - cinder_volume + - nova_compute + +# ## +# ## Infrastructure +# ## + +# galera, memcache, rabbitmq, utility +shared-infra_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# repository (apt cache, python packages, etc) +repo-infra_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# load balancer +# Ideally the load balancer should not use the Infrastructure hosts. +# Dedicated hardware is best for improved performance and security. +haproxy_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# rsyslog server +# log_hosts: +# log1: +# ip: 172.29.236.14 + +# ## +# ## OpenStack +# ## + +# keystone +identity_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# cinder api services +storage-infra_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# glance +# The settings here are repeated for each infra host. +# They could instead be applied as global settings in +# user_variables, but are left here to illustrate that +# each container could have different storage targets. +image_hosts: + controller00: + ip: 172.29.236.11 + container_vars: + limit_container_types: glance + glance_nfs_client: + - server: "172.29.244.14" + remote_path: "/images" + local_path: "/var/lib/glance/images" + type: "nfs" + options: "_netdev,auto" + controller01: + ip: 172.29.236.12 + container_vars: + limit_container_types: glance + glance_nfs_client: + - server: "172.29.244.14" + remote_path: "/images" + local_path: "/var/lib/glance/images" + type: "nfs" + options: "_netdev,auto" + controller02: + ip: 172.29.236.13 + container_vars: + limit_container_types: glance + glance_nfs_client: + - server: "172.29.244.14" + remote_path: "/images" + local_path: "/var/lib/glance/images" + type: "nfs" + options: "_netdev,auto" + +# nova api, conductor, etc services +compute-infra_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# heat +orchestration_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# horizon +dashboard_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# neutron server, agents (L3, etc) +network_hosts: + controller00: + ip: 172.29.236.11 + controller01: + ip: 172.29.236.12 + controller02: + ip: 172.29.236.13 + +# nova hypervisors +compute_hosts: + compute00: + ip: 172.29.236.14 + compute01: + ip: 172.29.236.15 + +# cinder volume hosts (NFS-backed) +# The settings here are repeated for each infra host. +# They could instead be applied as global settings in +# user_variables, but are left here to illustrate that +# each container could have different storage targets. +storage_hosts: + controller00: + ip: 172.29.236.11 + container_vars: + cinder_backends: + limit_container_types: cinder_volume + nfs_volume: + volume_backend_name: NFS_VOLUME1 + volume_driver: cinder.volume.drivers.nfs.NfsDriver + nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" + nfs_shares_config: /etc/cinder/nfs_shares + shares: + - ip: "172.29.244.14" + share: "/volumes" + controller01: + ip: 172.29.236.12 + container_vars: + cinder_backends: + limit_container_types: cinder_volume + nfs_volume: + volume_backend_name: NFS_VOLUME1 + volume_driver: cinder.volume.drivers.nfs.NfsDriver + nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" + nfs_shares_config: /etc/cinder/nfs_shares + shares: + - ip: "172.29.244.14" + share: "/volumes" + controller02: + ip: 172.29.236.13 + container_vars: + cinder_backends: + limit_container_types: cinder_volume + nfs_volume: + volume_backend_name: NFS_VOLUME1 + volume_driver: cinder.volume.drivers.nfs.NfsDriver + nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" + nfs_shares_config: /etc/cinder/nfs_shares + shares: + - ip: "172.29.244.14" + share: "/volumes" diff --git a/xci/nfvi/osa/files/ha/user_ceph.yml b/xci/nfvi/osa/files/ha/user_ceph.yml new file mode 100644 index 00000000..9d5f13a9 --- /dev/null +++ b/xci/nfvi/osa/files/ha/user_ceph.yml @@ -0,0 +1,16 @@ +--- +# The OSA ceph_client role does not support loading IPs from an inventory group, +# so we have to feed it a list of IPs +# yamllint disable rule:line-length +ceph_mons: "[ {% for host in groups[mon_group_name] %}'{{ hostvars[host]['ansible_host'] }}'{% if not loop.last %},{% endif %}{% endfor %} ]" +# yamllint enable rule:line-length +cinder_backends: + "RBD": + volume_driver: cinder.volume.drivers.rbd.RBDDriver + rbd_pool: volumes + rbd_ceph_conf: /etc/ceph/ceph.conf + rbd_store_chunk_size: 8 + volume_backend_name: rbddriver + rbd_user: cinder + rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}" + report_discard_supported: true diff --git a/xci/nfvi/osa/files/ha/user_variables.yml b/xci/nfvi/osa/files/ha/user_variables.yml new file mode 100644 index 00000000..72960a01 --- /dev/null +++ b/xci/nfvi/osa/files/ha/user_variables.yml @@ -0,0 +1,165 @@ +--- +# Copyright 2014, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ## +# ## This file contains commonly used overrides for convenience. Please inspect +# ## the defaults for each role to find additional override options. +# ## + +# # Debug and Verbose options. +debug: false + +# Allow root logins +security_sshd_permit_root_login: yes + +haproxy_keepalived_external_vip_cidr: "192.168.122.220/32" +haproxy_keepalived_internal_vip_cidr: "172.29.236.222/32" +haproxy_keepalived_external_interface: br-vlan +haproxy_keepalived_internal_interface: br-mgmt +gnocchi_db_sync_options: "" + +# The settings below are taken from aio to ensure we can bump OSA SHA with current +# RAM allocation. Higher values will be tested once the bump is done. +# https://github.com/openstack/openstack-ansible/blob/master/tests/roles/bootstrap-host/templates/user_variables.aio.yml.j2 + +## Galera settings +galera_innodb_buffer_pool_size: 16M +galera_innodb_log_buffer_size: 4M +galera_wsrep_provider_options: + - { option: "gcache.size", value: "4M" } + +## Neutron settings +neutron_metadata_checksum_fix: True + +### Set workers for all services to optimise memory usage + +## Repo +repo_nginx_threads: 2 + +## Keystone +keystone_httpd_mpm_start_servers: 2 +keystone_httpd_mpm_min_spare_threads: 1 +keystone_httpd_mpm_max_spare_threads: 2 +keystone_httpd_mpm_thread_limit: 2 +keystone_httpd_mpm_thread_child: 1 +keystone_wsgi_threads: 1 +keystone_wsgi_processes_max: 2 + +## Barbican +barbican_wsgi_processes: 2 +barbican_wsgi_threads: 1 + +## Cinder +cinder_wsgi_processes_max: 2 +cinder_wsgi_threads: 1 +cinder_wsgi_buffer_size: 16384 +cinder_osapi_volume_workers_max: 2 + +## Glance +glance_api_threads_max: 2 +glance_api_threads: 1 +glance_api_workers: 1 +glance_registry_workers: 1 + +## Nova +nova_wsgi_threads: 1 +nova_wsgi_processes_max: 2 +nova_wsgi_processes: 2 +nova_wsgi_buffer_size: 16384 +nova_api_threads_max: 2 +nova_api_threads: 1 +nova_osapi_compute_workers: 1 +nova_conductor_workers: 1 +nova_metadata_workers: 1 + +## Neutron +neutron_rpc_workers: 1 +neutron_metadata_workers: 1 +neutron_api_workers: 1 +neutron_api_threads_max: 2 +neutron_api_threads: 2 +neutron_num_sync_threads: 1 + +## Heat +heat_api_workers: 1 +heat_api_threads_max: 2 +heat_api_threads: 1 +heat_wsgi_threads: 1 +heat_wsgi_processes_max: 2 +heat_wsgi_processes: 1 +heat_wsgi_buffer_size: 16384 + +## Horizon +horizon_wsgi_processes: 1 +horizon_wsgi_threads: 1 +horizon_wsgi_threads_max: 2 + +## Ceilometer +ceilometer_notification_workers_max: 2 +ceilometer_notification_workers: 1 + +## AODH +aodh_wsgi_threads: 1 +aodh_wsgi_processes_max: 2 +aodh_wsgi_processes: 1 + +## Gnocchi +gnocchi_wsgi_threads: 1 +gnocchi_wsgi_processes_max: 2 +gnocchi_wsgi_processes: 1 + +## Swift +swift_account_server_replicator_workers: 1 +swift_server_replicator_workers: 1 +swift_object_replicator_workers: 1 +swift_account_server_workers: 1 +swift_container_server_workers: 1 +swift_object_server_workers: 1 +swift_proxy_server_workers_max: 2 +swift_proxy_server_workers_not_capped: 1 +swift_proxy_server_workers_capped: 1 +swift_proxy_server_workers: 1 + +## Ironic +ironic_wsgi_threads: 1 +ironic_wsgi_processes_max: 2 +ironic_wsgi_processes: 1 + +## Trove +trove_api_workers_max: 2 +trove_api_workers: 1 +trove_conductor_workers_max: 2 +trove_conductor_workers: 1 +trove_wsgi_threads: 1 +trove_wsgi_processes_max: 2 +trove_wsgi_processes: 1 + +## Sahara +sahara_api_workers_max: 2 +sahara_api_workers: 1 + +openrc_os_auth_url: "https://192.168.122.220:5000/v3" +keystone_auth_admin_password: "opnfv-secret-password" +openrc_os_password: "opnfv-secret-password" +openrc_os_domain_name: "Default" +openrc_cinder_endpoint_type: "publicURL" +openrc_nova_endpoint_type: "publicURL" +openrc_os_endpoint_type: "publicURL" +openrc_clouds_yml_interface: "public" +openrc_region_name: RegionOne +haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt" +haproxy_user_ssl_key: "/etc/ssl/private/xci.key" +keystone_service_adminuri_insecure: true +keystone_service_internaluri_insecure: true diff --git a/xci/nfvi/osa/files/ha/user_variables_ceph.yml b/xci/nfvi/osa/files/ha/user_variables_ceph.yml new file mode 100644 index 00000000..8f708990 --- /dev/null +++ b/xci/nfvi/osa/files/ha/user_variables_ceph.yml @@ -0,0 +1,32 @@ +--- +# Copyright 2017, Logan Vig <logan2211@gmail.com> +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +## ceph-ansible settings +devices: [/dev/loop0, /dev/loop1, /dev/loop2] +common_single_host_mode: true +monitor_interface: eth1 +public_network: "172.29.236.0/22" +cluster_network: "172.29.244.0/22" +journal_size: 100 +journal_collocation: true +pool_default_pg_num: 32 +openstack_config: true # Ceph ansible automatically creates pools & keys +cinder_ceph_client: cinder +cinder_default_volume_type: RBD +glance_ceph_client: glance +glance_default_store: rbd +glance_rbd_store_pool: images +nova_libvirt_images_rbd_pool: vms +nfs_file_gw: False diff --git a/xci/nfvi/osa/files/mini/ceph.yml b/xci/nfvi/osa/files/mini/ceph.yml new file mode 100644 index 00000000..5c09b471 --- /dev/null +++ b/xci/nfvi/osa/files/mini/ceph.yml @@ -0,0 +1,9 @@ +# The infra nodes where the Ceph mon services will run +ceph-mon_hosts: + controller00: + ip: 172.29.236.11 + +# The nodes that the Ceph OSD disks will be running on +ceph-osd_hosts: + compute00: + ip: 172.29.236.12 diff --git a/xci/nfvi/osa/files/mini/flavor-vars.yml b/xci/nfvi/osa/files/mini/flavor-vars.yml new file mode 100644 index 00000000..0d446ba2 --- /dev/null +++ b/xci/nfvi/osa/files/mini/flavor-vars.yml @@ -0,0 +1,21 @@ +--- +host_info: { + 'opnfv': { + 'VLAN_IP': '192.168.122.2', + 'MGMT_IP': '172.29.236.10', + 'VXLAN_IP': '172.29.240.10', + 'STORAGE_IP': '172.29.244.10' + }, + 'controller00': { + 'VLAN_IP': '192.168.122.3', + 'MGMT_IP': '172.29.236.11', + 'VXLAN_IP': '172.29.240.11', + 'STORAGE_IP': '172.29.244.11' + }, + 'compute00': { + 'VLAN_IP': '192.168.122.4', + 'MGMT_IP': '172.29.236.12', + 'VXLAN_IP': '172.29.240.12', + 'STORAGE_IP': '172.29.244.12' + }, +} diff --git a/xci/nfvi/osa/files/mini/inventory b/xci/nfvi/osa/files/mini/inventory new file mode 100644 index 00000000..eb73e5e3 --- /dev/null +++ b/xci/nfvi/osa/files/mini/inventory @@ -0,0 +1,8 @@ +[opnfv] +opnfv ansible_ssh_host=192.168.122.2 + +[controller] +controller00 ansible_ssh_host=192.168.122.3 + +[compute] +compute00 ansible_ssh_host=192.168.122.4 diff --git a/xci/nfvi/osa/files/mini/openstack_user_config.yml b/xci/nfvi/osa/files/mini/openstack_user_config.yml new file mode 100644 index 00000000..f9ccee24 --- /dev/null +++ b/xci/nfvi/osa/files/mini/openstack_user_config.yml @@ -0,0 +1,170 @@ +--- +cidr_networks: + container: 172.29.236.0/22 + tunnel: 172.29.240.0/22 + storage: 172.29.244.0/22 + +used_ips: + - "172.29.236.1,172.29.236.50" + - "172.29.240.1,172.29.240.50" + - "172.29.244.1,172.29.244.50" + - "172.29.248.1,172.29.248.50" + +global_overrides: + internal_lb_vip_address: 172.29.236.11 + external_lb_vip_address: 192.168.122.3 + tunnel_bridge: "br-vxlan" + management_bridge: "br-mgmt" + provider_networks: + - network: + container_bridge: "br-mgmt" + container_type: "veth" + container_interface: "eth1" + ip_from_q: "container" + type: "raw" + group_binds: + - all_containers + - hosts + is_container_address: true + is_ssh_address: true + - network: + container_bridge: "br-vxlan" + container_type: "veth" + container_interface: "eth10" + ip_from_q: "tunnel" + type: "vxlan" + range: "1:1000" + net_name: "vxlan" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-vlan" + container_type: "veth" + container_interface: "eth12" + host_bind_override: "eth12" + type: "flat" + net_name: "flat" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-vlan" + container_type: "veth" + container_interface: "eth11" + type: "vlan" + range: "1:1" + net_name: "vlan" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-storage" + container_type: "veth" + container_interface: "eth2" + ip_from_q: "storage" + type: "raw" + group_binds: + - glance_api + - cinder_api + - cinder_volume + - nova_compute + +# ## +# ## Infrastructure +# ## + +# galera, memcache, rabbitmq, utility +shared-infra_hosts: + controller00: + ip: 172.29.236.11 + +# repository (apt cache, python packages, etc) +repo-infra_hosts: + controller00: + ip: 172.29.236.11 + +# load balancer +# Ideally the load balancer should not use the Infrastructure hosts. +# Dedicated hardware is best for improved performance and security. +haproxy_hosts: + controller00: + ip: 172.29.236.11 + +# rsyslog server +# log_hosts: +# log1: +# ip: 172.29.236.14 + +# ## +# ## OpenStack +# ## + +# keystone +identity_hosts: + controller00: + ip: 172.29.236.11 + +# cinder api services +storage-infra_hosts: + controller00: + ip: 172.29.236.11 + +# glance +# The settings here are repeated for each infra host. +# They could instead be applied as global settings in +# user_variables, but are left here to illustrate that +# each container could have different storage targets. +image_hosts: + controller00: + ip: 172.29.236.11 + container_vars: + limit_container_types: glance + glance_nfs_client: + - server: "172.29.244.12" + remote_path: "/images" + local_path: "/var/lib/glance/images" + type: "nfs" + options: "_netdev,auto" + +# nova api, conductor, etc services +compute-infra_hosts: + controller00: + ip: 172.29.236.11 + +# heat +orchestration_hosts: + controller00: + ip: 172.29.236.11 + +# horizon +dashboard_hosts: + controller00: + ip: 172.29.236.11 + +# neutron server, agents (L3, etc) +network_hosts: + controller00: + ip: 172.29.236.11 + +# nova hypervisors +compute_hosts: + compute00: + ip: 172.29.236.12 + +# cinder volume hosts (NFS-backed) +# The settings here are repeated for each infra host. +# They could instead be applied as global settings in +# user_variables, but are left here to illustrate that +# each container could have different storage targets. +storage_hosts: + controller00: + ip: 172.29.236.11 + container_vars: + cinder_backends: + limit_container_types: cinder_volume + nfs_volume: + volume_backend_name: NFS_VOLUME1 + volume_driver: cinder.volume.drivers.nfs.NfsDriver + nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" + nfs_shares_config: /etc/cinder/nfs_shares + shares: + - ip: "172.29.244.12" + share: "/volumes" diff --git a/xci/nfvi/osa/files/mini/user_ceph.yml b/xci/nfvi/osa/files/mini/user_ceph.yml new file mode 100644 index 00000000..9d5f13a9 --- /dev/null +++ b/xci/nfvi/osa/files/mini/user_ceph.yml @@ -0,0 +1,16 @@ +--- +# The OSA ceph_client role does not support loading IPs from an inventory group, +# so we have to feed it a list of IPs +# yamllint disable rule:line-length +ceph_mons: "[ {% for host in groups[mon_group_name] %}'{{ hostvars[host]['ansible_host'] }}'{% if not loop.last %},{% endif %}{% endfor %} ]" +# yamllint enable rule:line-length +cinder_backends: + "RBD": + volume_driver: cinder.volume.drivers.rbd.RBDDriver + rbd_pool: volumes + rbd_ceph_conf: /etc/ceph/ceph.conf + rbd_store_chunk_size: 8 + volume_backend_name: rbddriver + rbd_user: cinder + rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}" + report_discard_supported: true diff --git a/xci/nfvi/osa/files/mini/user_variables.yml b/xci/nfvi/osa/files/mini/user_variables.yml new file mode 100644 index 00000000..9ec9e405 --- /dev/null +++ b/xci/nfvi/osa/files/mini/user_variables.yml @@ -0,0 +1,165 @@ +--- +# Copyright 2014, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ## +# ## This file contains commonly used overrides for convenience. Please inspect +# ## the defaults for each role to find additional override options. +# ## + +# # Debug and Verbose options. +debug: false + +# Allow root logins +security_sshd_permit_root_login: yes + +haproxy_keepalived_external_vip_cidr: "192.168.122.3/32" +haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32" +haproxy_keepalived_external_interface: br-vlan +haproxy_keepalived_internal_interface: br-mgmt +gnocchi_db_sync_options: "" + +# The settings below are taken from aio since this flavor is mostly +# for short CI loops and users with lower requirements. +# https://github.com/openstack/openstack-ansible/blob/master/tests/roles/bootstrap-host/templates/user_variables.aio.yml.j2 + +## Galera settings +galera_innodb_buffer_pool_size: 16M +galera_innodb_log_buffer_size: 4M +galera_wsrep_provider_options: + - { option: "gcache.size", value: "4M" } + +## Neutron settings +neutron_metadata_checksum_fix: True + +### Set workers for all services to optimise memory usage + +## Repo +repo_nginx_threads: 2 + +## Keystone +keystone_httpd_mpm_start_servers: 2 +keystone_httpd_mpm_min_spare_threads: 1 +keystone_httpd_mpm_max_spare_threads: 2 +keystone_httpd_mpm_thread_limit: 2 +keystone_httpd_mpm_thread_child: 1 +keystone_wsgi_threads: 1 +keystone_wsgi_processes_max: 2 + +## Barbican +barbican_wsgi_processes: 2 +barbican_wsgi_threads: 1 + +## Cinder +cinder_wsgi_processes_max: 2 +cinder_wsgi_threads: 1 +cinder_wsgi_buffer_size: 16384 +cinder_osapi_volume_workers_max: 2 + +## Glance +glance_api_threads_max: 2 +glance_api_threads: 1 +glance_api_workers: 1 +glance_registry_workers: 1 + +## Nova +nova_wsgi_threads: 1 +nova_wsgi_processes_max: 2 +nova_wsgi_processes: 2 +nova_wsgi_buffer_size: 16384 +nova_api_threads_max: 2 +nova_api_threads: 1 +nova_osapi_compute_workers: 1 +nova_conductor_workers: 1 +nova_metadata_workers: 1 + +## Neutron +neutron_rpc_workers: 1 +neutron_metadata_workers: 1 +neutron_api_workers: 1 +neutron_api_threads_max: 2 +neutron_api_threads: 2 +neutron_num_sync_threads: 1 + +## Heat +heat_api_workers: 1 +heat_api_threads_max: 2 +heat_api_threads: 1 +heat_wsgi_threads: 1 +heat_wsgi_processes_max: 2 +heat_wsgi_processes: 1 +heat_wsgi_buffer_size: 16384 + +## Horizon +horizon_wsgi_processes: 1 +horizon_wsgi_threads: 1 +horizon_wsgi_threads_max: 2 + +## Ceilometer +ceilometer_notification_workers_max: 2 +ceilometer_notification_workers: 1 + +## AODH +aodh_wsgi_threads: 1 +aodh_wsgi_processes_max: 2 +aodh_wsgi_processes: 1 + +## Gnocchi +gnocchi_wsgi_threads: 1 +gnocchi_wsgi_processes_max: 2 +gnocchi_wsgi_processes: 1 + +## Swift +swift_account_server_replicator_workers: 1 +swift_server_replicator_workers: 1 +swift_object_replicator_workers: 1 +swift_account_server_workers: 1 +swift_container_server_workers: 1 +swift_object_server_workers: 1 +swift_proxy_server_workers_max: 2 +swift_proxy_server_workers_not_capped: 1 +swift_proxy_server_workers_capped: 1 +swift_proxy_server_workers: 1 + +## Ironic +ironic_wsgi_threads: 1 +ironic_wsgi_processes_max: 2 +ironic_wsgi_processes: 1 + +## Trove +trove_api_workers_max: 2 +trove_api_workers: 1 +trove_conductor_workers_max: 2 +trove_conductor_workers: 1 +trove_wsgi_threads: 1 +trove_wsgi_processes_max: 2 +trove_wsgi_processes: 1 + +## Sahara +sahara_api_workers_max: 2 +sahara_api_workers: 1 + +openrc_os_auth_url: "https://192.168.122.3:5000/v3" +keystone_auth_admin_password: "opnfv-secret-password" +openrc_os_password: "opnfv-secret-password" +openrc_os_domain_name: "Default" +openrc_cinder_endpoint_type: "publicURL" +openrc_nova_endpoint_type: "publicURL" +openrc_os_endpoint_type: "publicURL" +openrc_clouds_yml_interface: "public" +openrc_region_name: RegionOne +haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt" +haproxy_user_ssl_key: "/etc/ssl/private/xci.key" +keystone_service_adminuri_insecure: true +keystone_service_internaluri_insecure: true diff --git a/xci/nfvi/osa/files/mini/user_variables_ceph.yml b/xci/nfvi/osa/files/mini/user_variables_ceph.yml new file mode 100644 index 00000000..8f708990 --- /dev/null +++ b/xci/nfvi/osa/files/mini/user_variables_ceph.yml @@ -0,0 +1,32 @@ +--- +# Copyright 2017, Logan Vig <logan2211@gmail.com> +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +## ceph-ansible settings +devices: [/dev/loop0, /dev/loop1, /dev/loop2] +common_single_host_mode: true +monitor_interface: eth1 +public_network: "172.29.236.0/22" +cluster_network: "172.29.244.0/22" +journal_size: 100 +journal_collocation: true +pool_default_pg_num: 32 +openstack_config: true # Ceph ansible automatically creates pools & keys +cinder_ceph_client: cinder +cinder_default_volume_type: RBD +glance_ceph_client: glance +glance_default_store: rbd +glance_rbd_store_pool: images +nova_libvirt_images_rbd_pool: vms +nfs_file_gw: False diff --git a/xci/nfvi/osa/files/noha/ceph.yml b/xci/nfvi/osa/files/noha/ceph.yml new file mode 100644 index 00000000..0deb522e --- /dev/null +++ b/xci/nfvi/osa/files/noha/ceph.yml @@ -0,0 +1,11 @@ +# The infra nodes where the Ceph mon services will run +ceph-mon_hosts: + controller00: + ip: 172.29.236.11 + +# The nodes that the Ceph OSD disks will be running on +ceph-osd_hosts: + compute00: + ip: 172.29.236.12 + compute01: + ip: 172.29.236.13 diff --git a/xci/nfvi/osa/files/noha/flavor-vars.yml b/xci/nfvi/osa/files/noha/flavor-vars.yml new file mode 100644 index 00000000..3c69a34b --- /dev/null +++ b/xci/nfvi/osa/files/noha/flavor-vars.yml @@ -0,0 +1,27 @@ +--- +host_info: { + 'opnfv': { + 'VLAN_IP': '192.168.122.2', + 'MGMT_IP': '172.29.236.10', + 'VXLAN_IP': '172.29.240.10', + 'STORAGE_IP': '172.29.244.10' + }, + 'controller00': { + 'VLAN_IP': '192.168.122.3', + 'MGMT_IP': '172.29.236.11', + 'VXLAN_IP': '172.29.240.11', + 'STORAGE_IP': '172.29.244.11' + }, + 'compute00': { + 'VLAN_IP': '192.168.122.4', + 'MGMT_IP': '172.29.236.12', + 'VXLAN_IP': '172.29.240.12', + 'STORAGE_IP': '172.29.244.12' + }, + 'compute01': { + 'VLAN_IP': '192.168.122.5', + 'MGMT_IP': '172.29.236.13', + 'VXLAN_IP': '172.29.240.13', + 'STORAGE_IP': '172.29.244.13' + } +} diff --git a/xci/nfvi/osa/files/noha/inventory b/xci/nfvi/osa/files/noha/inventory new file mode 100644 index 00000000..b4f9f6d0 --- /dev/null +++ b/xci/nfvi/osa/files/noha/inventory @@ -0,0 +1,9 @@ +[opnfv] +opnfv ansible_ssh_host=192.168.122.2 + +[controller] +controller00 ansible_ssh_host=192.168.122.3 + +[compute] +compute00 ansible_ssh_host=192.168.122.4 +compute01 ansible_ssh_host=192.168.122.5 diff --git a/xci/nfvi/osa/files/noha/openstack_user_config.yml b/xci/nfvi/osa/files/noha/openstack_user_config.yml new file mode 100644 index 00000000..fb12655e --- /dev/null +++ b/xci/nfvi/osa/files/noha/openstack_user_config.yml @@ -0,0 +1,172 @@ +--- +cidr_networks: + container: 172.29.236.0/22 + tunnel: 172.29.240.0/22 + storage: 172.29.244.0/22 + +used_ips: + - "172.29.236.1,172.29.236.50" + - "172.29.240.1,172.29.240.50" + - "172.29.244.1,172.29.244.50" + - "172.29.248.1,172.29.248.50" + +global_overrides: + internal_lb_vip_address: 172.29.236.11 + external_lb_vip_address: 192.168.122.3 + tunnel_bridge: "br-vxlan" + management_bridge: "br-mgmt" + provider_networks: + - network: + container_bridge: "br-mgmt" + container_type: "veth" + container_interface: "eth1" + ip_from_q: "container" + type: "raw" + group_binds: + - all_containers + - hosts + is_container_address: true + is_ssh_address: true + - network: + container_bridge: "br-vxlan" + container_type: "veth" + container_interface: "eth10" + ip_from_q: "tunnel" + type: "vxlan" + range: "1:1000" + net_name: "vxlan" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-vlan" + container_type: "veth" + container_interface: "eth12" + host_bind_override: "eth12" + type: "flat" + net_name: "flat" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-vlan" + container_type: "veth" + container_interface: "eth11" + type: "vlan" + range: "1:1" + net_name: "vlan" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-storage" + container_type: "veth" + container_interface: "eth2" + ip_from_q: "storage" + type: "raw" + group_binds: + - glance_api + - cinder_api + - cinder_volume + - nova_compute + +# ## +# ## Infrastructure +# ## + +# galera, memcache, rabbitmq, utility +shared-infra_hosts: + controller00: + ip: 172.29.236.11 + +# repository (apt cache, python packages, etc) +repo-infra_hosts: + controller00: + ip: 172.29.236.11 + +# load balancer +# Ideally the load balancer should not use the Infrastructure hosts. +# Dedicated hardware is best for improved performance and security. +haproxy_hosts: + controller00: + ip: 172.29.236.11 + +# rsyslog server +# log_hosts: +# log1: +# ip: 172.29.236.14 + +# ## +# ## OpenStack +# ## + +# keystone +identity_hosts: + controller00: + ip: 172.29.236.11 + +# cinder api services +storage-infra_hosts: + controller00: + ip: 172.29.236.11 + +# glance +# The settings here are repeated for each infra host. +# They could instead be applied as global settings in +# user_variables, but are left here to illustrate that +# each container could have different storage targets. +image_hosts: + controller00: + ip: 172.29.236.11 + container_vars: + limit_container_types: glance + glance_nfs_client: + - server: "172.29.244.12" + remote_path: "/images" + local_path: "/var/lib/glance/images" + type: "nfs" + options: "_netdev,auto" + +# nova api, conductor, etc services +compute-infra_hosts: + controller00: + ip: 172.29.236.11 + +# heat +orchestration_hosts: + controller00: + ip: 172.29.236.11 + +# horizon +dashboard_hosts: + controller00: + ip: 172.29.236.11 + +# neutron server, agents (L3, etc) +network_hosts: + controller00: + ip: 172.29.236.11 + +# nova hypervisors +compute_hosts: + compute00: + ip: 172.29.236.12 + compute01: + ip: 172.29.236.13 + +# cinder volume hosts (NFS-backed) +# The settings here are repeated for each infra host. +# They could instead be applied as global settings in +# user_variables, but are left here to illustrate that +# each container could have different storage targets. +storage_hosts: + controller00: + ip: 172.29.236.11 + container_vars: + cinder_backends: + limit_container_types: cinder_volume + nfs_volume: + volume_backend_name: NFS_VOLUME1 + volume_driver: cinder.volume.drivers.nfs.NfsDriver + nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" + nfs_shares_config: /etc/cinder/nfs_shares + shares: + - ip: "172.29.244.12" + share: "/volumes" diff --git a/xci/nfvi/osa/files/noha/user_ceph.yml b/xci/nfvi/osa/files/noha/user_ceph.yml new file mode 100644 index 00000000..9d5f13a9 --- /dev/null +++ b/xci/nfvi/osa/files/noha/user_ceph.yml @@ -0,0 +1,16 @@ +--- +# The OSA ceph_client role does not support loading IPs from an inventory group, +# so we have to feed it a list of IPs +# yamllint disable rule:line-length +ceph_mons: "[ {% for host in groups[mon_group_name] %}'{{ hostvars[host]['ansible_host'] }}'{% if not loop.last %},{% endif %}{% endfor %} ]" +# yamllint enable rule:line-length +cinder_backends: + "RBD": + volume_driver: cinder.volume.drivers.rbd.RBDDriver + rbd_pool: volumes + rbd_ceph_conf: /etc/ceph/ceph.conf + rbd_store_chunk_size: 8 + volume_backend_name: rbddriver + rbd_user: cinder + rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}" + report_discard_supported: true diff --git a/xci/nfvi/osa/files/noha/user_variables.yml b/xci/nfvi/osa/files/noha/user_variables.yml new file mode 100644 index 00000000..66573428 --- /dev/null +++ b/xci/nfvi/osa/files/noha/user_variables.yml @@ -0,0 +1,165 @@ +--- +# Copyright 2014, Rackspace US, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ## +# ## This file contains commonly used overrides for convenience. Please inspect +# ## the defaults for each role to find additional override options. +# ## + +# # Debug and Verbose options. +debug: false + +# Allow root logins +security_sshd_permit_root_login: yes + +haproxy_keepalived_external_vip_cidr: "192.168.122.3/32" +haproxy_keepalived_internal_vip_cidr: "172.29.236.11/32" +haproxy_keepalived_external_interface: br-vlan +haproxy_keepalived_internal_interface: br-mgmt +gnocchi_db_sync_options: "" + +# The settings below are taken from aio to ensure we can bump OSA SHA with current +# RAM allocation. Higher values will be tested once the bump is done. +# https://github.com/openstack/openstack-ansible/blob/master/tests/roles/bootstrap-host/templates/user_variables.aio.yml.j2 + +## Galera settings +galera_innodb_buffer_pool_size: 16M +galera_innodb_log_buffer_size: 4M +galera_wsrep_provider_options: + - { option: "gcache.size", value: "4M" } + +## Neutron settings +neutron_metadata_checksum_fix: True + +### Set workers for all services to optimise memory usage + +## Repo +repo_nginx_threads: 2 + +## Keystone +keystone_httpd_mpm_start_servers: 2 +keystone_httpd_mpm_min_spare_threads: 1 +keystone_httpd_mpm_max_spare_threads: 2 +keystone_httpd_mpm_thread_limit: 2 +keystone_httpd_mpm_thread_child: 1 +keystone_wsgi_threads: 1 +keystone_wsgi_processes_max: 2 + +## Barbican +barbican_wsgi_processes: 2 +barbican_wsgi_threads: 1 + +## Cinder +cinder_wsgi_processes_max: 2 +cinder_wsgi_threads: 1 +cinder_wsgi_buffer_size: 16384 +cinder_osapi_volume_workers_max: 2 + +## Glance +glance_api_threads_max: 2 +glance_api_threads: 1 +glance_api_workers: 1 +glance_registry_workers: 1 + +## Nova +nova_wsgi_threads: 1 +nova_wsgi_processes_max: 2 +nova_wsgi_processes: 2 +nova_wsgi_buffer_size: 16384 +nova_api_threads_max: 2 +nova_api_threads: 1 +nova_osapi_compute_workers: 1 +nova_conductor_workers: 1 +nova_metadata_workers: 1 + +## Neutron +neutron_rpc_workers: 1 +neutron_metadata_workers: 1 +neutron_api_workers: 1 +neutron_api_threads_max: 2 +neutron_api_threads: 2 +neutron_num_sync_threads: 1 + +## Heat +heat_api_workers: 1 +heat_api_threads_max: 2 +heat_api_threads: 1 +heat_wsgi_threads: 1 +heat_wsgi_processes_max: 2 +heat_wsgi_processes: 1 +heat_wsgi_buffer_size: 16384 + +## Horizon +horizon_wsgi_processes: 1 +horizon_wsgi_threads: 1 +horizon_wsgi_threads_max: 2 + +## Ceilometer +ceilometer_notification_workers_max: 2 +ceilometer_notification_workers: 1 + +## AODH +aodh_wsgi_threads: 1 +aodh_wsgi_processes_max: 2 +aodh_wsgi_processes: 1 + +## Gnocchi +gnocchi_wsgi_threads: 1 +gnocchi_wsgi_processes_max: 2 +gnocchi_wsgi_processes: 1 + +## Swift +swift_account_server_replicator_workers: 1 +swift_server_replicator_workers: 1 +swift_object_replicator_workers: 1 +swift_account_server_workers: 1 +swift_container_server_workers: 1 +swift_object_server_workers: 1 +swift_proxy_server_workers_max: 2 +swift_proxy_server_workers_not_capped: 1 +swift_proxy_server_workers_capped: 1 +swift_proxy_server_workers: 1 + +## Ironic +ironic_wsgi_threads: 1 +ironic_wsgi_processes_max: 2 +ironic_wsgi_processes: 1 + +## Trove +trove_api_workers_max: 2 +trove_api_workers: 1 +trove_conductor_workers_max: 2 +trove_conductor_workers: 1 +trove_wsgi_threads: 1 +trove_wsgi_processes_max: 2 +trove_wsgi_processes: 1 + +## Sahara +sahara_api_workers_max: 2 +sahara_api_workers: 1 + +openrc_os_auth_url: "https://192.168.122.3:5000/v3" +keystone_auth_admin_password: "opnfv-secret-password" +openrc_os_password: "opnfv-secret-password" +openrc_os_domain_name: "Default" +openrc_cinder_endpoint_type: "publicURL" +openrc_nova_endpoint_type: "publicURL" +openrc_os_endpoint_type: "publicURL" +openrc_clouds_yml_interface: "public" +openrc_region_name: RegionOne +haproxy_user_ssl_cert: "/etc/ssl/certs/xci.crt" +haproxy_user_ssl_key: "/etc/ssl/private/xci.key" +keystone_service_adminuri_insecure: true +keystone_service_internaluri_insecure: true diff --git a/xci/nfvi/osa/files/noha/user_variables_ceph.yml b/xci/nfvi/osa/files/noha/user_variables_ceph.yml new file mode 100644 index 00000000..8f708990 --- /dev/null +++ b/xci/nfvi/osa/files/noha/user_variables_ceph.yml @@ -0,0 +1,32 @@ +--- +# Copyright 2017, Logan Vig <logan2211@gmail.com> +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +## ceph-ansible settings +devices: [/dev/loop0, /dev/loop1, /dev/loop2] +common_single_host_mode: true +monitor_interface: eth1 +public_network: "172.29.236.0/22" +cluster_network: "172.29.244.0/22" +journal_size: 100 +journal_collocation: true +pool_default_pg_num: 32 +openstack_config: true # Ceph ansible automatically creates pools & keys +cinder_ceph_client: cinder +cinder_default_volume_type: RBD +glance_ceph_client: glance +glance_default_store: rbd +glance_rbd_store_pool: images +nova_libvirt_images_rbd_pool: vms +nfs_file_gw: False diff --git a/xci/nfvi/osa/nfvi-deploy.sh b/xci/nfvi/osa/nfvi-deploy.sh index c4419de6..14577dd5 100755 --- a/xci/nfvi/osa/nfvi-deploy.sh +++ b/xci/nfvi/osa/nfvi-deploy.sh @@ -180,7 +180,7 @@ echo "Info: OpenStack installation is successfully completed!" #------------------------------------------------------------------------------- echo "Info: Openstack login details" echo "-----------------------------------------------------------------------" -OS_USER_CONFIG=$XCI_PATH/xci/file/$XCI_FLAVOR/openstack_user_config.yml +OS_USER_CONFIG=$XCI_FLAVOR_ANSIBLE_FILE_PATH/openstack_user_config.yml python -c \ "import yaml if '$XCI_FLAVOR' is 'aio': diff --git a/xci/nfvi/osa/playbooks/configure-opnfvhost.yml b/xci/nfvi/osa/playbooks/configure-opnfvhost.yml index 656f18e8..7f0e43f9 100644 --- a/xci/nfvi/osa/playbooks/configure-opnfvhost.yml +++ b/xci/nfvi/osa/playbooks/configure-opnfvhost.yml @@ -18,11 +18,11 @@ file: "{{ item }}" with_items: - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml" - - "{{ XCI_PATH }}/xci/file/{{ XCI_FLAVOR }}/flavor-vars.yml" + - "{{ XCI_FLAVOR_ANSIBLE_FILE_PATH }}/flavor-vars.yml" - name: Set facts for remote deployment set_fact: remote_xci_path: "{{ ansible_env.HOME }}/releng-xci" - remote_xci_flavor_files: "{{ ansible_env.HOME }}/releng-xci/xci/file/{{ XCI_FLAVOR }}" + remote_xci_flavor_files: "{{ ansible_env.HOME }}/releng-xci/xci/nfvi/{{XCI_NFVI}}/files/{{ XCI_FLAVOR }}" remote_xci_playbooks: "{{ ansible_env.HOME }}/releng-xci/xci/playbooks" roles: @@ -37,7 +37,7 @@ - name: fetch public key fetch: src: "{{ ansible_env.HOME }}/.ssh/id_rsa.pub" - dest: "{{ XCI_PATH }}/xci/file/authorized_keys" + dest: "{{ XCI_PATH }}/xci/files/authorized_keys" flat: yes - name: Copy releng-xci to remote host synchronize: @@ -152,7 +152,7 @@ tasks: - name: Append public keys to authorized_keys - shell: "/bin/cat {{ ansible_env.HOME }}/.ssh/id_rsa.pub >> {{ XCI_PATH }}/xci/file/authorized_keys" + shell: "/bin/cat {{ ansible_env.HOME }}/.ssh/id_rsa.pub >> {{ XCI_PATH }}/xci/files/authorized_keys" - hosts: opnfv remote_user: root @@ -166,8 +166,8 @@ failed_when: false with_items: - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml" - - "{{ XCI_PATH }}/xci/file/{{ XCI_FLAVOR }}/flavor-vars.yml" - - "{{ XCI_PATH }}/xci/file/{{ XCI_FLAVOR }}/user_variables.yml" + - "{{ XCI_FLAVOR_ANSIBLE_FILE_PATH }}/flavor-vars.yml" + - "{{ XCI_FLAVOR_ANSIBLE_FILE_PATH }}/user_variables.yml" roles: - role: "openstack-ansible-openstack_openrc" diff --git a/xci/nfvi/osa/playbooks/configure-targethosts.yml b/xci/nfvi/osa/playbooks/configure-targethosts.yml index 14a9149b..fb43a920 100644 --- a/xci/nfvi/osa/playbooks/configure-targethosts.yml +++ b/xci/nfvi/osa/playbooks/configure-targethosts.yml @@ -4,7 +4,7 @@ tasks: - name: add public key to host copy: - src: "{{ XCI_PATH }}/xci/file/authorized_keys" + src: "{{ XCI_PATH }}/xci/files/authorized_keys" dest: /root/.ssh/authorized_keys - hosts: controller @@ -18,7 +18,7 @@ file: "{{ item }}" with_items: - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml" - - "{{ XCI_PATH }}/xci/file/{{ XCI_FLAVOR }}/flavor-vars.yml" + - "{{ XCI_FLAVOR_ANSIBLE_FILE_PATH }}/flavor-vars.yml" roles: - role: configure-network # we need to force sync time with ntp or the nodes will be out of sync timewise @@ -35,7 +35,7 @@ file: "{{ item }}" with_items: - "{{ XCI_PATH }}/xci/var/{{ ansible_os_family }}.yml" - - "{{ XCI_PATH }}/xci/file/{{ XCI_FLAVOR }}/flavor-vars.yml" + - "{{ XCI_FLAVOR_ANSIBLE_FILE_PATH }}/flavor-vars.yml" roles: - role: configure-network # we need to force sync time with ntp or the nodes will be out of sync timewise |