summaryrefslogtreecommitdiffstats
path: root/overcloud-resource-registry-puppet.yaml
diff options
context:
space:
mode:
Diffstat (limited to 'overcloud-resource-registry-puppet.yaml')
0 files changed, 0 insertions, 0 deletions
a id='n67' href='#n67'>67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
##############################################################################
# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
classes:
  - service.keepalived.cluster.single
  - system.glusterfs.server.volume.glance
  - system.glusterfs.server.volume.keystone
  - system.glusterfs.server.cluster
  - system.salt.control.virt
  - system.salt.control.cluster.openstack_control_cluster
  - system.salt.control.cluster.openstack_proxy_cluster
  - system.salt.control.cluster.openstack_database_cluster
  - system.salt.control.cluster.openstack_message_queue_cluster
  - system.salt.control.cluster.openstack_telemetry_cluster
  # - system.salt.control.cluster.stacklight_server_cluster
  # - system.salt.control.cluster.stacklight_log_cluster
  # - system.salt.control.cluster.stacklight_telemetry_cluster
  - cluster.baremetal-mcp-pike-common-ha.infra.kvm_pdf
parameters:
  _param:
    linux_system_codename: xenial
    cluster_vip_address: ${_param:infra_kvm_address}
    cluster_node01_address: ${_param:infra_kvm_node01_address}
    cluster_node02_address: ${_param:infra_kvm_node02_address}
    cluster_node03_address: ${_param:infra_kvm_node03_address}
    keepalived_vip_interface: br-ctl
    keepalived_vip_virtual_router_id: 69
    # {dhcp,single}_nic are not used, but referenced
    dhcp_nic: ${_param:opnfv_vcp_vm_primary_interface}
    single_nic: ${_param:opnfv_vcp_vm_secondary_interface}
  linux:
    network:
      remove_iface_files:
        - '/etc/network/interfaces.d/50-cloud-init.cfg'
  libvirt:
    server:
      service: libvirtd
      config_sys: /etc/default/libvirtd
  salt:
    control:
      size:  # RAM 4096,8192,16384,32768,65536
        # Default production sizing
        openstack.control:
          cpu: 4
          ram: 12288
          disk_profile: small
          net_profile: default
        openstack.database:
          cpu: 4
          ram: 6144
          disk_profile: large
          net_profile: default
        openstack.message_queue:
          cpu: 4
          ram: 2048
          disk_profile: small
          net_profile: default
        openstack.telemetry:
          cpu: 2
          ram: 3072
          disk_profile: xxlarge
          net_profile: default
        # stacklight.log:
        #   cpu: 2
        #   ram: 4096
        #   disk_profile: xxlarge
        #   net_profile: default
        # stacklight.server:
        #   cpu: 2
        #   ram: 4096
        #   disk_profile: small
        #   net_profile: default
        # stacklight.telemetry:
        #   cpu: 2
        #   ram: 4096
        #   disk_profile: xxlarge
        #   net_profile: default
        openstack.proxy:
          cpu: 2
          ram: 2048
          disk_profile: small
          net_profile: default_ext
      cluster:
        internal:
          node:
            mdb01:
              image: ${_param:salt_control_xenial_image}
            mdb02:
              image: ${_param:salt_control_xenial_image}
            mdb03:
              image: ${_param:salt_control_xenial_image}
            ctl01:
              image: ${_param:salt_control_xenial_image}
            ctl02:
              image: ${_param:salt_control_xenial_image}
            ctl03:
              image: ${_param:salt_control_xenial_image}
            dbs01:
              image: ${_param:salt_control_xenial_image}
            dbs02:
              image: ${_param:salt_control_xenial_image}
            dbs03:
              image: ${_param:salt_control_xenial_image}
            msg01:
              image: ${_param:salt_control_xenial_image}
            msg02:
              image: ${_param:salt_control_xenial_image}
            msg03:
              image: ${_param:salt_control_xenial_image}
            prx01:
              image: ${_param:salt_control_xenial_image}
            prx02:
              image: ${_param:salt_control_xenial_image}
              provider: kvm03.${_param:cluster_domain}
  virt:
    nic:
      default:
        eth1:
          bridge: br-mgmt
          model: virtio
        eth0:
          bridge: br-ctl
          model: virtio
      default_ext:
        eth2:
          bridge: br-mgmt
          model: virtio
        eth1:
          bridge: br-ex
          model: virtio
        eth0:
          bridge: br-ctl
          model: virtio
  glusterfs:
    server:
      service: glusterd
      volumes:
        nova_instances:
          storage: /srv/glusterfs/nova_instances
          replica: 3
          bricks:
            - ${_param:cluster_node01_address}:/srv/glusterfs/nova_instances
            - ${_param:cluster_node02_address}:/srv/glusterfs/nova_instances
            - ${_param:cluster_node03_address}:/srv/glusterfs/nova_instances
          options:
            cluster.readdir-optimize: 'True'
            nfs.disable: 'True'
            network.remote-dio: 'True'
            cluster.favorite-child-policy: mtime
            diagnostics.client-log-level: WARNING
            diagnostics.brick-log-level: WARNING