From 72121b19a9c12961afdfd6ee7d4fe3eff95d8b10 Mon Sep 17 00:00:00 2001 From: Manuel Buil Date: Tue, 3 Oct 2017 11:09:57 +0200 Subject: Add tacker to the scenario role Change-Id: I0d6f48eff3edd7e1117ec0c5f7f16c1de35300d5 Signed-off-by: Manuel Buil --- .../tacker_files/noha/openstack_user_config.yml | 177 +++++++++++++++++++++ 1 file changed, 177 insertions(+) create mode 100644 scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/noha/openstack_user_config.yml (limited to 'scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/noha') diff --git a/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/noha/openstack_user_config.yml b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/noha/openstack_user_config.yml new file mode 100644 index 00000000..ee8889d2 --- /dev/null +++ b/scenarios/os-odl-sfc/role/os-odl-sfc/files/tacker_files/noha/openstack_user_config.yml @@ -0,0 +1,177 @@ +--- +cidr_networks: + container: 172.29.236.0/22 + tunnel: 172.29.240.0/22 + storage: 172.29.244.0/22 + +used_ips: + - "172.29.236.1,172.29.236.50" + - "172.29.240.1,172.29.240.50" + - "172.29.244.1,172.29.244.50" + - "172.29.248.1,172.29.248.50" + +global_overrides: + internal_lb_vip_address: 172.29.236.11 + external_lb_vip_address: 192.168.122.3 + tunnel_bridge: "br-vxlan" + management_bridge: "br-mgmt" + provider_networks: + - network: + container_bridge: "br-mgmt" + container_type: "veth" + container_interface: "eth1" + ip_from_q: "container" + type: "raw" + group_binds: + - all_containers + - hosts + is_container_address: true + is_ssh_address: true + - network: + container_bridge: "br-vxlan" + container_type: "veth" + container_interface: "eth10" + ip_from_q: "tunnel" + type: "vxlan" + range: "1:1000" + net_name: "vxlan" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-vlan" + container_type: "veth" + container_interface: "eth12" + host_bind_override: "eth12" + type: "flat" + net_name: "flat" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-vlan" + container_type: "veth" + container_interface: "eth11" + type: "vlan" + range: "1:1" + net_name: "vlan" + group_binds: + - neutron_linuxbridge_agent + - network: + container_bridge: "br-storage" + container_type: "veth" + container_interface: "eth2" + ip_from_q: "storage" + type: "raw" + group_binds: + - glance_api + - cinder_api + - cinder_volume + - nova_compute + +# ## +# ## Infrastructure +# ## + +# galera, memcache, rabbitmq, utility +shared-infra_hosts: + controller00: + ip: 172.29.236.11 + +# repository (apt cache, python packages, etc) +repo-infra_hosts: + controller00: + ip: 172.29.236.11 + +# load balancer +# Ideally the load balancer should not use the Infrastructure hosts. +# Dedicated hardware is best for improved performance and security. +haproxy_hosts: + controller00: + ip: 172.29.236.11 + +# rsyslog server +# log_hosts: +# log1: +# ip: 172.29.236.14 + +# ## +# ## OpenStack +# ## + +# keystone +identity_hosts: + controller00: + ip: 172.29.236.11 + +# cinder api services +storage-infra_hosts: + controller00: + ip: 172.29.236.11 + +# glance +# The settings here are repeated for each infra host. +# They could instead be applied as global settings in +# user_variables, but are left here to illustrate that +# each container could have different storage targets. +image_hosts: + controller00: + ip: 172.29.236.11 + container_vars: + limit_container_types: glance + glance_nfs_client: + - server: "172.29.244.12" + remote_path: "/images" + local_path: "/var/lib/glance/images" + type: "nfs" + options: "_netdev,auto" + +# nova api, conductor, etc services +compute-infra_hosts: + controller00: + ip: 172.29.236.11 + +# heat +orchestration_hosts: + controller00: + ip: 172.29.236.11 + +# horizon +dashboard_hosts: + controller00: + ip: 172.29.236.11 + +# tacker +mano_hosts: + controller00: + ip: 172.29.236.11 + +# neutron server, agents (L3, etc) +network_hosts: + controller00: + ip: 172.29.236.11 + +# nova hypervisors +compute_hosts: + compute00: + ip: 172.29.236.12 + compute01: + ip: 172.29.236.13 + +# cinder volume hosts (NFS-backed) +# The settings here are repeated for each infra host. +# They could instead be applied as global settings in +# user_variables, but are left here to illustrate that +# each container could have different storage targets. +storage_hosts: + controller00: + ip: 172.29.236.11 + container_vars: + cinder_backends: + limit_container_types: cinder_volume + nfs_volume: + volume_backend_name: NFS_VOLUME1 + volume_driver: cinder.volume.drivers.nfs.NfsDriver + nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120" + nfs_shares_config: /etc/cinder/nfs_shares + shares: + - ip: "172.29.244.12" + share: "/volumes" -- cgit 1.2.3-korg