##############################################################################
# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
classes:
  - system.linux.system.repo.mcp.mirror.v1.openstack
  - system.ceilometer.client
  - system.memcached.server.single
  - system.keystone.server.cluster
  - system.keystone.server.wsgi
  - system.glance.control.cluster
  - system.nova.control.cluster
  - system.cinder.control.cluster
  - system.cinder.control.backend.lvm
  - system.heat.server.cluster
  - system.designate.server.cluster
  - system.designate.server.backend.bind
  - system.barbican.server.cluster
  - system.apache.server.site.barbican
  - service.barbican.server.plugin.simple_crypto
  - system.apache.server.single
  - system.bind.server.single
  - system.haproxy.proxy.listen.openstack.placement
  - system.glusterfs.client.cluster
  - system.glusterfs.client.volume.glance
  - system.glusterfs.client.volume.keystone
{%- if not conf.MCP_VCP %}
  # sync from kvm
  - service.keepalived.cluster.single
  - system.glusterfs.server.volume.glance
  - system.glusterfs.server.volume.keystone
  - system.glusterfs.server.cluster
  # NOTE(armband): Disabled for novcp
  # - system.salt.control.virt
  # - system.salt.control.cluster.openstack_control_cluster
  # - system.salt.control.cluster.openstack_proxy_cluster
  # - system.salt.control.cluster.openstack_database_cluster
  # - system.salt.control.cluster.openstack_message_queue_cluster
  # - system.salt.control.cluster.openstack_telemetry_cluster
  # - system.salt.control.cluster.stacklight_server_cluster
  # - system.salt.control.cluster.stacklight_log_cluster
  # - system.salt.control.cluster.stacklight_telemetry_cluster
  - cluster.mcp-common-ha.glusterfs_repo
  - cluster.mcp-common-ha.infra.kvm_pdf
  - cluster.all-mcp-arch-common.opnfv.maas_proxy
  - cluster.all-mcp-arch-common.opnfv.lab_proxy_pdf
{%- endif %}
parameters:
  _param:
{%- if not conf.MCP_VCP %}
    linux_system_codename: xenial  # sync from kvm
    # For NOVCP, we switch keepalived VIPs, to keep cluster_vip_address in ctl
    single_nic: br-ctl  # for keepalive_vip_interface interpolation
    control_nic: ~      # Dummy value to keep reclass 1.5.2 happy
    keepalived_openstack_web_public_vip_address: ${_param:openstack_proxy_address}
    keepalived_openstack_web_public_vip_interface: br-ex
{%- endif %}
    keepalived_vip_interface: ${_param:single_nic}
    keepalived_vip_virtual_router_id: 50
    cluster_vip_address: ${_param:openstack_control_address}
    cluster_local_address: ${_param:single_address}
    cluster_node01_hostname: ${_param:openstack_control_node01_hostname}
    cluster_node01_address: ${_param:openstack_control_node01_address}
    cluster_node02_hostname: ${_param:openstack_control_node02_hostname}
    cluster_node02_address: ${_param:openstack_control_node02_address}
    cluster_node03_hostname: ${_param:openstack_control_node03_hostname}
    cluster_node03_address: ${_param:openstack_control_node03_address}
    nova_vncproxy_url: https://${_param:cluster_public_host}:6080
    barbican_integration_enabled: 'false'
    fernet_rotation_driver: 'shared_filesystem'
    credential_rotation_driver: 'shared_filesystem'
  nova:
    controller: &db_conn_recycle_time
      database:
        connection_recycle_time: ${_param:db_connection_recycle_time}
      barbican:
        enabled: ${_param:barbican_integration_enabled}
  cinder:
    controller:
      <<: *db_conn_recycle_time
  neutron:
    server:
      <<: *db_conn_recycle_time
      vlan_aware_vms: true
      root_helper_daemon: false
  keystone:
    server:
      <<: *db_conn_recycle_time
      cacert: /etc/ssl/certs/mcp_os_cacert
      openrc_extra:
        volume_device_name: sdc
  glance:
    server:
      <<: *db_conn_recycle_time
      identity:
        barbican_endpoint: ${barbican:server:host_href}
{%- if conf.MCP_VCP %}
  heat:
    server:
      <<: *db_conn_recycle_time
      metadata:
        host: ${_param:openstack_proxy_control_address}
        port: 8000
        protocol: http
      waitcondition:
        host: ${_param:openstack_proxy_control_address}
        port: 8000
        protocol: http
      watch:
        host: ${_param:openstack_proxy_control_address}
        port: 8003
        protocol: http
{%- else %}
  libvirt:
    server:
      service: libvirtd
      config_sys: /etc/default/libvirtd
      unix_sock_group: libvirt
  linux:
    network:
      # Add public IPs here as overrides, no need to fork another kvm_pdf.j2
      interface:
        br-ex:
          address: ${_param:external_address}
          proto: static
  apache:
    server:
      bind:
        listen_default_ports: false
  # sync from common-ha kvm role
  glusterfs:
    server:
      service: glusterd
      volumes:
        nova_instances:
          storage: /srv/glusterfs/nova_instances
          replica: 3
          bricks:
            - ${_param:cluster_node01_address}:/srv/glusterfs/nova_instances
            - ${_param:cluster_node02_address}:/srv/glusterfs/nova_instances
            - ${_param:cluster_node03_address}:/srv/glusterfs/nova_instances
          options:
            cluster.readdir-optimize: 'True'
            nfs.disable: 'True'
            network.remote-dio: 'True'
            cluster.favorite-child-policy: mtime
            diagnostics.client-log-level: WARNING
            diagnostics.brick-log-level: WARNING
{%- endif %}
  haproxy:
    proxy:
      listen:
        heat_cloudwatch_api:
          enabled: false
  barbican:
    server:
      ks_notifications_enable: true
      store:
        software:
          crypto_plugin: simple_crypto
          store_plugin: store_crypto
          global_default: true
      database:
        connection_recycle_time: ${_param:db_connection_recycle_time}
        host: ${_param:openstack_database_address}
  bind:
    server:
      control:
        mgmt:
          enabled: true
          bind:
            address: ${_param:single_address}
            port: 953
          allow:
            - ${_param:openstack_control_node01_address}
            - ${_param:openstack_control_node02_address}
            - ${_param:openstack_control_node03_address}
          keys:
            - designate
  designate:
    _support:
      sphinx:
        enabled: False  # Workaround broken meta/sphinx.yml in salt-formula-designate
    server:
      pools:
        default:
          description: 'test pool'
          targets:
            default:
              description: 'test target1'
            default1:
              type: ${_param:designate_pool_target_type}
              description: 'test target2'
              masters: ${_param:designate_pool_target_masters}
              options:
                host: ${_param:openstack_control_node02_address}
                port: 53
                rndc_host: ${_param:openstack_control_node02_address}
                rndc_port: 953
                rndc_key_file: /etc/designate/rndc.key
            default2:
              type: ${_param:designate_pool_target_type}
              description: 'test target3'
              masters: ${_param:designate_pool_target_masters}
              options:
                host: ${_param:openstack_control_node03_address}
                port: 53
                rndc_host: ${_param:openstack_control_node03_address}
                rndc_port: 953
                rndc_key_file: /etc/designate/rndc.key