--- cidr_networks: container: {{ mgmt_cidr }} tunnel: {{ tenant_cidr }} storage: {{ storage_cidr }} used_ips: {% for item in network_cfg["ip_settings"] %} - "{{ ','.join(item["ip_ranges"][0]) }}" {% if item["name"] == "mgmt" %} - "{{ ','.join(item["dhcp_ranges"][0]) }}" {% endif %} {% if "gw" in item %} - "{{ item["gw"] }}" {% endif %} {% endfor %} - "{{ internal_vip.ip }}" - "{{ public_vip.ip }}" {% if "linuxbridge" == NEUTRON_MECHANISM_DRIVERS[0] %} {% set neutron_agent = "neutron_linuxbridge_agent" %} {% else %} {% set neutron_agent = "neutron_openvswitch_agent" %} {% endif %} {% set provider_net_mappings = network_cfg["provider_net_mappings"] %} {% set public_net_info = network_cfg["public_net_info"] %} {% set ext_physnet = public_net_info["provider_network"] %} {% set ext_type = public_net_info["type"] %} {% set ext_intf = [] %} {% for item in provider_net_mappings %} {% if item["network"] == ext_physnet %} {% set _ = ext_intf.append(item["interface"]) %} {% endif %} {% endfor %} global_overrides: internal_lb_vip_address: {{ internal_vip.ip }} external_lb_vip_address: {{ public_vip.ip }} tunnel_bridge: "br-tenant" management_bridge: "br-mgmt" provider_networks: - network: container_bridge: "br-mgmt" container_type: "veth" container_interface: "eth1" ip_from_q: "container" type: "raw" group_binds: - all_containers - hosts is_container_address: true is_ssh_address: true {% if tenant_net_info["type"] == "vxlan" %} - network: container_bridge: "br-tenant" container_type: "veth" container_interface: "eth2" ip_from_q: "tunnel" type: "vxlan" range: "{{ tenant_net_info["range"] }}" net_name: "vxlan" group_binds: - {{ neutron_agent }} {% endif %} - network: container_bridge: "br-external" container_type: "veth" container_interface: "{{ ext_intf[0] }}" host_bind_override: "{{ ext_intf[0] }}" type: "{{ ext_type }}" net_name: "{{ ext_physnet }}" group_binds: - {{ neutron_agent }} {% for item in provider_net_mappings %} {% if item["network"] != ext_physnet and "controller" in item["role"] %} - network: container_bridge: "br-tenant" container_type: "veth" container_interface: "{{ item["interface"] }}" host_bind_override: "{{ item["interface"] }}" type: "flat" net_name: "{{ item["network"] }}" group_binds: - {{ neutron_agent }} {% endif %} {% endfor %} - network: container_bridge: "br-storage" container_type: "veth" container_interface: "eth2" ip_from_q: "storage" type: "raw" group_binds: - glance_api - cinder_api - cinder_volume - nova_compute ### ### Infrastructure ### # galera, memcache, rabbitmq, utility shared-infra_hosts: {% for host in groups.controller%} {{host}}: ip: {{ hostvars[host]['ansible_ssh_host'] }} {% endfor %} # repository (apt cache, python packages, etc) repo-infra_hosts: {% for host in groups.controller%} {{host}}: ip: {{ hostvars[host]['ansible_ssh_host'] }} {% endfor %} # load balancer # Ideally the load balancer should not use the Infrastructure hosts. # Dedicated hardware is best for improved performance and security. haproxy_hosts: {% for host in groups.controller%} {{host}}: ip: {{ hostvars[host]['ansible_ssh_host'] }} {% endfor %} # rsyslog server #log_hosts: # log1: # ip: 10.1.0.53 ### ### OpenStack ### # keystone identity_hosts: {% for host in groups.controller%} {{host}}: ip: {{ hostvars[host]['ansible_ssh_host'] }} {% endfor %} # cinder api services storage-infra_hosts: {% for host in groups.controller%} {{host}}: ip: {{ hostvars[host]['ansible_ssh_host'] }} {% endfor %} # glance # The settings here are repeated for each infra host. # They could instead be applied as global settings in # user_variables, but are left here to illustrate that # each container could have different storage targets. image_hosts: {% for host in groups.controller%} {{host}}: ip: {{ hostvars[host]['ansible_ssh_host'] }} container_vars: limit_container_types: glance glance_nfs_client: - server: "{{ip_settings[groups.compute[0]]['storage']['ip']}}" remote_path: "/images" local_path: "/var/lib/glance/images" type: "nfs" options: "_netdev,auto" {% endfor %} # nova api, conductor, etc services compute-infra_hosts: {% for host in groups.controller%} {{host}}: ip: {{ hostvars[host]['ansible_ssh_host'] }} {% endfor %} # heat orchestration_hosts: {% for host in groups.controller%} {{host}}: ip: {{ hostvars[host]['ansible_ssh_host'] }} {% endfor %} # horizon dashboard_hosts: {% for host in groups.controller%} {{host}}: ip: {{ hostvars[host]['ansible_ssh_host'] }} {% endfor %} # neutron server, agents (L3, etc) network_hosts: {% for host in groups.controller%} {{host}}: ip: {{ hostvars[host]['ansible_ssh_host'] }} {% endfor %} # ceilometer (telemetry API) metering-infra_hosts: {% for host in groups.controller%} {{host}}: ip: {{ hostvars[host]['ansible_ssh_host'] }} {% endfor %} # aodh (telemetry alarm service) metering-alarm_hosts: {% for host in groups.controller%} {{host}}: ip: {{ hostvars[host]['ansible_ssh_host'] }} {% endfor %} # gnocchi (telemetry metrics storage) metrics_hosts: {% for host in groups.controller%} {{host}}: ip: {{ hostvars[host]['ansible_ssh_host'] }} {% endfor %} # tacker (mano service) mano_hosts: {% for host in groups.controller%} {{host}}: ip: {{ hostvars[host]['ansible_ssh_host'] }} {% endfor %} # nova hypervisors compute_hosts: {% for host in groups.compute%} {{host}}: ip: {{ hostvars[host]['ansible_ssh_host'] }} {% endfor %} # ceilometer compute agent (telemetry) metering-compute_hosts: {% for host in groups.compute%} {{host}}: ip: {{ hostvars[host]['ansible_ssh_host'] }} {% endfor %} # cinder volume hosts (NFS-backed) # The settings here are repeated for each infra host. # They could instead be applied as global settings in # user_variables, but are left here to illustrate that # each container could have different storage targets. storage_hosts: {% for host in groups.compute%} {{host}}: ip: {{ hostvars[host]['ansible_ssh_host'] }} container_vars: cinder_backends: limit_container_types: cinder_volume lvm: volume_group: cinder-volumes volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver volume_backend_name: LVM_iSCSI iscsi_ip_address: "{{ip_settings[host]['storage']['ip']}}" {% endfor %}