summaryrefslogtreecommitdiffstats
path: root/mcp/reclass/classes/cluster/mcp-pike-common-ha/openstack_control.yml
diff options
context:
space:
mode:
authorAlexandru Avadanii <Alexandru.Avadanii@enea.com>2018-02-01 00:28:17 +0100
committerAlexandru Avadanii <Alexandru.Avadanii@enea.com>2018-02-05 06:04:07 +0100
commit6acecf3b104a072c60d071364344b9ff04994168 (patch)
treefa2e0bede90a83464f0818c07f38c06b77bdf42b /mcp/reclass/classes/cluster/mcp-pike-common-ha/openstack_control.yml
parentcefce150621699e9d9d3ac5c884a28ee4766c24d (diff)
[baremetal] Rename all to drop baremetal prefix
A few things differ between baremetal and virtual nodes: - provisioning method; - network setup; Since now we support completely dynamic network config based on PDF + IDF, as well as dynamic provisioning of VMs on jumpserver (as virtual cluster nodes), respectively MaaS-driven baremetal provisioning, let's drop the 'baremetal-' prefix from cluster model names and prepare for unified scenarios. Note that some limitations still apply, e.g. virtual nodes are spawned only on jumpserver (localhost) for now. JIRA: FUEL-310 Change-Id: If20077ac37c6f15961468abc58db7e16f2c29260 Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
Diffstat (limited to 'mcp/reclass/classes/cluster/mcp-pike-common-ha/openstack_control.yml')
-rw-r--r--mcp/reclass/classes/cluster/mcp-pike-common-ha/openstack_control.yml112
1 files changed, 112 insertions, 0 deletions
diff --git a/mcp/reclass/classes/cluster/mcp-pike-common-ha/openstack_control.yml b/mcp/reclass/classes/cluster/mcp-pike-common-ha/openstack_control.yml
new file mode 100644
index 000000000..cf09a4123
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-pike-common-ha/openstack_control.yml
@@ -0,0 +1,112 @@
+##############################################################################
+# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - system.linux.system.repo.glusterfs
+ - system.ceilometer.client
+ - system.memcached.server.single
+ - system.keystone.server.cluster
+ - system.keystone.server.wsgi
+ - system.glance.control.cluster
+ - system.nova.control.cluster
+ - system.cinder.control.cluster
+ - system.cinder.control.backend.lvm
+ - system.heat.server.cluster
+ - system.designate.server.cluster
+ - system.designate.server.backend.bind
+ - system.bind.server.single
+ - system.haproxy.proxy.listen.openstack.nova-placement
+ - system.haproxy.proxy.listen.openstack.glare
+ - system.glusterfs.client.cluster
+ - system.glusterfs.client.volume.glance
+ - system.glusterfs.client.volume.keystone
+parameters:
+ _param:
+ keepalived_vip_interface: ${_param:single_nic}
+ keepalived_vip_virtual_router_id: 50
+ cluster_vip_address: ${_param:openstack_control_address}
+ cluster_local_address: ${_param:single_address}
+ cluster_node01_hostname: ${_param:openstack_control_node01_hostname}
+ cluster_node01_address: ${_param:openstack_control_node01_address}
+ cluster_node02_hostname: ${_param:openstack_control_node02_hostname}
+ cluster_node02_address: ${_param:openstack_control_node02_address}
+ cluster_node03_hostname: ${_param:openstack_control_node03_hostname}
+ cluster_node03_address: ${_param:openstack_control_node03_address}
+ nova_vncproxy_url: https://${_param:cluster_public_host}:6080
+ glusterfs_version: '3.13'
+ heat:
+ server:
+ metadata:
+ host: ${_param:openstack_proxy_control_address}
+ port: 8000
+ protocol: http
+ waitcondition:
+ host: ${_param:openstack_proxy_control_address}
+ port: 8000
+ protocol: http
+ watch:
+ host: ${_param:openstack_proxy_control_address}
+ port: 8003
+ protocol: http
+ nova:
+ controller:
+ pkgs:
+ - nova-api
+ - nova-conductor
+ - nova-consoleauth
+ - nova-novncproxy
+ - nova-scheduler
+ - python-novaclient
+ neutron:
+ server:
+ vlan_aware_vms: true
+ keystone:
+ server:
+ cacert: /etc/ssl/certs/mcp_os_cacert
+ bind:
+ server:
+ control:
+ mgmt:
+ enabled: true
+ bind:
+ address: ${_param:single_address}
+ port: 953
+ allow:
+ - ${_param:openstack_control_node01_address}
+ - ${_param:openstack_control_node02_address}
+ - ${_param:openstack_control_node03_address}
+ keys:
+ - designate
+ designate:
+ server:
+ pools:
+ default:
+ description: 'test pool'
+ targets:
+ default:
+ description: 'test target1'
+ default1:
+ type: ${_param:designate_pool_target_type}
+ description: 'test target2'
+ masters: ${_param:designate_pool_target_masters}
+ options:
+ host: ${_param:openstack_control_node02_address}
+ port: 53
+ rndc_host: ${_param:openstack_control_node02_address}
+ rndc_port: 953
+ rndc_key_file: /etc/designate/rndc.key
+ default2:
+ type: ${_param:designate_pool_target_type}
+ description: 'test target3'
+ masters: ${_param:designate_pool_target_masters}
+ options:
+ host: ${_param:openstack_control_node03_address}
+ port: 53
+ rndc_host: ${_param:openstack_control_node03_address}
+ rndc_port: 953
+ rndc_key_file: /etc/designate/rndc.key