From 6acecf3b104a072c60d071364344b9ff04994168 Mon Sep 17 00:00:00 2001 From: Alexandru Avadanii Date: Thu, 1 Feb 2018 00:28:17 +0100 Subject: [baremetal] Rename all to drop baremetal prefix A few things differ between baremetal and virtual nodes: - provisioning method; - network setup; Since now we support completely dynamic network config based on PDF + IDF, as well as dynamic provisioning of VMs on jumpserver (as virtual cluster nodes), respectively MaaS-driven baremetal provisioning, let's drop the 'baremetal-' prefix from cluster model names and prepare for unified scenarios. Note that some limitations still apply, e.g. virtual nodes are spawned only on jumpserver (localhost) for now. JIRA: FUEL-310 Change-Id: If20077ac37c6f15961468abc58db7e16f2c29260 Signed-off-by: Alexandru Avadanii --- .../cluster/mcp-pike-common-ha/infra/kvm.yml | 162 +++++++++++++++++++++ 1 file changed, 162 insertions(+) create mode 100644 mcp/reclass/classes/cluster/mcp-pike-common-ha/infra/kvm.yml (limited to 'mcp/reclass/classes/cluster/mcp-pike-common-ha/infra/kvm.yml') diff --git a/mcp/reclass/classes/cluster/mcp-pike-common-ha/infra/kvm.yml b/mcp/reclass/classes/cluster/mcp-pike-common-ha/infra/kvm.yml new file mode 100644 index 000000000..a933adf48 --- /dev/null +++ b/mcp/reclass/classes/cluster/mcp-pike-common-ha/infra/kvm.yml @@ -0,0 +1,162 @@ +############################################################################## +# Copyright (c) 2017 Mirantis Inc., Enea AB and others. +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +classes: + - system.linux.system.repo.glusterfs + - service.keepalived.cluster.single + - system.glusterfs.server.volume.glance + - system.glusterfs.server.volume.keystone + - system.glusterfs.server.cluster + - system.salt.control.virt + - system.salt.control.cluster.openstack_control_cluster + - system.salt.control.cluster.openstack_proxy_cluster + - system.salt.control.cluster.openstack_database_cluster + - system.salt.control.cluster.openstack_message_queue_cluster + - system.salt.control.cluster.openstack_telemetry_cluster + # - system.salt.control.cluster.stacklight_server_cluster + # - system.salt.control.cluster.stacklight_log_cluster + # - system.salt.control.cluster.stacklight_telemetry_cluster + - cluster.mcp-pike-common-ha.infra.kvm_pdf + - cluster.mcp-pike-common-ha.include.proxy +parameters: + _param: + linux_system_codename: xenial + glusterfs_version: '3.13' + cluster_vip_address: ${_param:infra_kvm_address} + cluster_node01_address: ${_param:infra_kvm_node01_address} + cluster_node02_address: ${_param:infra_kvm_node02_address} + cluster_node03_address: ${_param:infra_kvm_node03_address} + keepalived_vip_interface: br-ctl + keepalived_vip_virtual_router_id: 69 + linux: + network: + remove_iface_files: + - '/etc/network/interfaces.d/50-cloud-init.cfg' + system: + kernel: + boot_options: + - spectre_v2=off + - nopti + libvirt: + server: + service: libvirtd + config_sys: /etc/default/libvirtd + unix_sock_group: libvirt + salt: + control: + size: # RAM 4096,8192,16384,32768,65536 + # Default production sizing + openstack.control: + cpu: 4 + ram: 12288 + disk_profile: small + net_profile: default + openstack.database: + cpu: 4 + ram: 6144 + disk_profile: large + net_profile: default + openstack.message_queue: + cpu: 4 + ram: 2048 + disk_profile: small + net_profile: default + openstack.telemetry: + cpu: 2 + ram: 3072 + disk_profile: xxlarge + net_profile: default + # stacklight.log: + # cpu: 2 + # ram: 4096 + # disk_profile: xxlarge + # net_profile: default + # stacklight.server: + # cpu: 2 + # ram: 4096 + # disk_profile: small + # net_profile: default + # stacklight.telemetry: + # cpu: 2 + # ram: 4096 + # disk_profile: xxlarge + # net_profile: default + openstack.proxy: + cpu: 2 + ram: 2048 + disk_profile: small + net_profile: default_ext + cluster: + internal: + node: + mdb01: + image: ${_param:salt_control_xenial_image} + mdb02: + image: ${_param:salt_control_xenial_image} + mdb03: + image: ${_param:salt_control_xenial_image} + ctl01: + image: ${_param:salt_control_xenial_image} + ctl02: + image: ${_param:salt_control_xenial_image} + ctl03: + image: ${_param:salt_control_xenial_image} + dbs01: + image: ${_param:salt_control_xenial_image} + dbs02: + image: ${_param:salt_control_xenial_image} + dbs03: + image: ${_param:salt_control_xenial_image} + msg01: + image: ${_param:salt_control_xenial_image} + msg02: + image: ${_param:salt_control_xenial_image} + msg03: + image: ${_param:salt_control_xenial_image} + prx01: + image: ${_param:salt_control_xenial_image} + prx02: + image: ${_param:salt_control_xenial_image} + provider: kvm03.${_param:cluster_domain} + virt: + nic: + default: + eth1: + bridge: br-mgmt + model: virtio + eth0: + bridge: br-ctl + model: virtio + default_ext: + eth2: + bridge: br-mgmt + model: virtio + eth1: + bridge: br-ex + model: virtio + eth0: + bridge: br-ctl + model: virtio + glusterfs: + server: + service: glusterd + volumes: + nova_instances: + storage: /srv/glusterfs/nova_instances + replica: 3 + bricks: + - ${_param:cluster_node01_address}:/srv/glusterfs/nova_instances + - ${_param:cluster_node02_address}:/srv/glusterfs/nova_instances + - ${_param:cluster_node03_address}:/srv/glusterfs/nova_instances + options: + cluster.readdir-optimize: 'True' + nfs.disable: 'True' + network.remote-dio: 'True' + cluster.favorite-child-policy: mtime + diagnostics.client-log-level: WARNING + diagnostics.brick-log-level: WARNING -- cgit 1.2.3-korg