summaryrefslogtreecommitdiffstats
path: root/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra
diff options
context:
space:
mode:
authorAlexandru Avadanii <Alexandru.Avadanii@enea.com>2017-08-01 22:18:41 +0200
committerAlexandru Avadanii <Alexandru.Avadanii@enea.com>2017-08-17 02:59:30 +0200
commit5039d069265df15ed3d8e41f7a1c7f9457a9d58a (patch)
tree18a9160f72be9a01ef0008e3aa9912e18262057d /mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra
parent9720ddf955b76d678a08dc7ea53684400c659ce3 (diff)
Bring in baremetal support
- ci/deploy.sh: fail if default scenario file is missing; - start by copying reclass/classes/cluster/virtual-mcp-ocata-ovs as classes/cluster/baremetal-mcp-ocata-ovs; - add new state (maas) that will handle MaaS configuration; - Split PXE network in two for baremetal: * rename old "pxe" virtual network to "mcpcontrol", make it non-configurable and identical for baremetal/virtual deploys; * new "pxebr" bridge is dedicated for MaaS fabric network, which comes with its own DHCP, TFTP etc.; - Drop hardcoded PXE gateway & static IP for MaaS node, since "mcpcontrol" remains a NAT-ed virtual network, with its own DHCP; - Keep internet access available on first interfaces for cfg01/mas01; - Align MaaS IP addrs (all x.y.z.3), add public IP for easy debug via MaaS dashboard; - Add static IP in new network segment (192.168.11.3/24) on MaaS node's PXE interface; - Set MaaS PXE interface MTU 1500 (weird network errors with jumbo); - MaaS node: Add NAT iptables traffic forward from "mcpcontrol" to "pxebr" interfaces; - MaaS: Add harcoded lf-pod2 machine info (fixed identation in v6); - Switch our targeted scenario to HA; * scenario: s/os-nosdn-nofeature-noha/os-nosdn-nofeature-ha/ - maas region: Use mcp.rsa.pub from ~ubuntu/.ssh/authorized_keys; - add route for 192.168.11.0/24 via mas01 on cfg01; - fix race condition on kvm nodes network setup: * add "noifupdown" support in salt formula for linux.network; * keep primary eth/br-mgmt unconfigured till reboot; TODO: - Read all this info from PDF (Pod Descriptor File) later; - investigate leftover references to eno2, eth3; - add public network interfaces config, IPs; - improve wait conditions for MaaS commision/deploy; - report upstream breakage in system.single; Change-Id: Ie8dd584b140991d2bd992acdfe47f5644bf51409 Signed-off-by: Michael Polenchuk <mpolenchuk@mirantis.com> Signed-off-by: Guillermo Herrero <Guillermo.Herrero@enea.com> Signed-off-by: Charalampos Kominos <Charalampos.Kominos@enea.com> Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
Diffstat (limited to 'mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra')
-rw-r--r--mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/config.yml132
-rw-r--r--mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml88
-rw-r--r--mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/kvm.yml150
-rw-r--r--mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml126
4 files changed, 496 insertions, 0 deletions
diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/config.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/config.yml
new file mode 100644
index 000000000..77443deec
--- /dev/null
+++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/config.yml
@@ -0,0 +1,132 @@
+classes:
+- service.git.client
+- system.linux.system.single
+- system.linux.system.repo.mcp.salt
+- system.linux.system.repo.saltstack.xenial
+- system.salt.master.api
+- system.salt.master.pkg
+- system.salt.minion.ca.salt_master
+- system.reclass.storage.salt
+- system.reclass.storage.system.physical_control_cluster
+- system.reclass.storage.system.openstack_control_cluster
+- system.reclass.storage.system.openstack_proxy_cluster
+- system.reclass.storage.system.openstack_gateway_cluster
+- system.reclass.storage.system.openstack_database_cluster
+- system.reclass.storage.system.openstack_message_queue_cluster
+- system.reclass.storage.system.openstack_telemetry_cluster
+# - system.reclass.storage.system.stacklight_log_cluster
+# - system.reclass.storage.system.stacklight_monitor_cluster
+# - system.reclass.storage.system.stacklight_telemetry_cluster
+- system.reclass.storage.system.infra_maas_single
+- cluster.baremetal-mcp-ocata-ovs-ha.infra
+parameters:
+ _param:
+ salt_master_base_environment: prd
+ reclass_data_repository: local
+ salt_master_environment_repository: "https://github.com/tcpcloud"
+ salt_master_environment_revision: master
+ reclass_config_master: ${_param:infra_config_deploy_address}
+ single_address: ${_param:infra_config_address}
+ deploy_address: ${_param:infra_config_deploy_address}
+ salt_master_host: ${_param:infra_config_deploy_address}
+ salt_api_password_hash: "$6$sGnRlxGf$al5jMCetLP.vfI/fTl3Z0N7Za1aeiexL487jAtyRABVfT3NlwZxQGVhO7S1N8OwS/34VHYwZQA8lkXwKMN/GS1"
+ linux:
+ network:
+ interface:
+ ens3: ${_param:linux_dhcp_interface}
+ ens4: ${_param:linux_single_interface}
+ salt:
+ master:
+ accept_policy: open_mode
+ reclass:
+ storage:
+ data_source:
+ engine: local
+ node:
+ infra_kvm_node01:
+ params:
+ keepalived_vip_priority: 100
+ linux_system_codename: xenial
+ infra_kvm_node02:
+ params:
+ keepalived_vip_priority: 101
+ linux_system_codename: xenial
+ infra_kvm_node03:
+ params:
+ keepalived_vip_priority: 102
+ linux_system_codename: xenial
+ openstack_telemetry_node01:
+ params:
+ linux_system_codename: xenial
+ openstack_telemetry_node02:
+ params:
+ linux_system_codename: xenial
+ openstack_telemetry_node03:
+ params:
+ linux_system_codename: xenial
+ openstack_message_queue_node01:
+ params:
+ linux_system_codename: xenial
+ openstack_message_queue_node02:
+ params:
+ linux_system_codename: xenial
+ openstack_message_queue_node03:
+ params:
+ linux_system_codename: xenial
+ openstack_proxy_node01:
+ params:
+ linux_system_codename: xenial
+ openstack_proxy_node02:
+ params:
+ linux_system_codename: xenial
+# stacklight_log_node01:
+# classes:
+# - system.elasticsearch.client.single
+# stacklight_monitor_node01:
+# classes:
+# - system.grafana.client.single
+# - system.kibana.client.single
+ openstack_control_node01:
+ classes:
+ - cluster.${_param:cluster_name}.openstack.control_init
+ params:
+ linux_system_codename: xenial
+ openstack_control_node02:
+ params:
+ linux_system_codename: xenial
+ openstack_control_node03:
+ params:
+ linux_system_codename: xenial
+ openstack_database_node01:
+ classes:
+ - cluster.${_param:cluster_name}.openstack.database_init
+ params:
+ linux_system_codename: xenial
+ openstack_database_node02:
+ params:
+ linux_system_codename: xenial
+ openstack_database_node03:
+ params:
+ linux_system_codename: xenial
+ openstack_compute_node01:
+ name: ${_param:openstack_compute_node01_hostname}
+ domain: ${_param:cluster_domain}
+ classes:
+ - cluster.${_param:cluster_name}.openstack.compute
+ params:
+ salt_master_host: ${_param:reclass_config_master}
+ linux_system_codename: xenial
+ control_address: ${_param:openstack_compute_node01_control_address}
+ single_address: ${_param:openstack_compute_node01_single_address}
+ tenant_address: ${_param:openstack_compute_node01_tenant_address}
+ openstack_compute_node02:
+ name: ${_param:openstack_compute_node02_hostname}
+ domain: ${_param:cluster_domain}
+ classes:
+ - cluster.${_param:cluster_name}.openstack.compute
+ params:
+ salt_master_host: ${_param:reclass_config_master}
+ linux_system_codename: xenial
+ control_address: ${_param:openstack_compute_node02_control_address}
+ single_address: ${_param:openstack_compute_node02_single_address}
+ tenant_address: ${_param:openstack_compute_node02_tenant_address}
diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml
new file mode 100644
index 000000000..55ffcae12
--- /dev/null
+++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/init.yml
@@ -0,0 +1,88 @@
+classes:
+- system.linux.system.single
+- cluster.baremetal-mcp-ocata-ovs-ha.openstack
+# - cluster.baremetal-mcp-ocata-ovs-ha.stacklight
+# - cluster.baremetal-mcp-ocata-ovs-ha.stacklight.client
+parameters:
+ _param:
+ apt_mk_version: nightly
+ mcp_repo_version: 1.1
+ cluster_name: baremetal-mcp-ocata-ovs-ha
+ cluster_domain: ${_param:cluster_name}.local
+# stacklight_environment: ${_param:cluster_domain}
+ reclass_data_revision: master
+ cluster_public_host: ${_param:openstack_proxy_address}
+ infra_config_hostname: cfg01
+ infra_maas_database_password: opnfv_secret
+
+ # infra service addresses
+ infra_config_address: 10.167.4.100
+ infra_config_deploy_address: 192.168.10.100
+ infra_maas_node01_address: 10.167.4.3
+ infra_maas_node01_deploy_address: 192.168.11.3
+ infra_maas_node01_external_address: 10.16.0.3
+ infra_compute_node01_address: 10.167.4.141
+ infra_compute_node02_address: 10.167.4.142
+ infra_compute_node03_address: 10.167.4.143
+
+ infra_kvm_address: 10.167.4.140
+ infra_kvm_node01_address: 10.167.4.141
+ infra_kvm_node02_address: 10.167.4.142
+ infra_kvm_node03_address: 10.167.4.143
+
+ infra_maas_node01_hostname: mas01
+ infra_kvm_node01_hostname: kvm01
+ infra_kvm_node02_hostname: kvm02
+ infra_kvm_node03_hostname: kvm03
+
+ # Interface definitions
+ reclass:
+ storage:
+ node:
+ name: default
+ linux_dhcp_interface:
+ enabled: true
+ type: eth
+ proto: dhcp
+ linux_single_interface:
+ enabled: true
+ type: eth
+ proto: static
+ address: ${_param:single_address}
+ netmask: 255.255.255.0
+
+ salt_control_xenial_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ salt_api_password_hash: "$6$WV0P1shnoDh2gI/Z$22/Bcd7ffMv0jDlFpT63cAU4PiXHz9pjXwngToKwqAsgoeK4HNR3PiKaushjxp3JsQ8hNoJmAC6TxzVqfV8WH/"
+ linux:
+ network:
+ host:
+ cfg01:
+ address: ${_param:infra_config_address}
+ names:
+ - cfg01
+ - cfg01.${_param:cluster_domain}
+ cfg:
+ address: ${_param:infra_config_address}
+ names:
+ - ${_param:infra_config_hostname}
+ - ${_param:infra_config_hostname}.${_param:cluster_domain}
+ mas01:
+ address: ${_param:infra_maas_node01_address}
+ names:
+ - ${_param:infra_maas_node01_hostname}
+ - ${_param:infra_maas_node01_hostname}.${_param:cluster_domain}
+ kvm01:
+ address: ${_param:infra_kvm_node01_address}
+ names:
+ - ${_param:infra_kvm_node01_hostname}
+ - ${_param:infra_kvm_node01_hostname}.${_param:cluster_domain}
+ kvm02:
+ address: ${_param:infra_kvm_node02_address}
+ names:
+ - ${_param:infra_kvm_node02_hostname}
+ - ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
+ kvm03:
+ address: ${_param:infra_kvm_node03_address}
+ names:
+ - ${_param:infra_kvm_node03_hostname}
+ - ${_param:infra_kvm_node03_hostname}.${_param:cluster_domain}
diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/kvm.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/kvm.yml
new file mode 100644
index 000000000..5c33f9ecd
--- /dev/null
+++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/kvm.yml
@@ -0,0 +1,150 @@
+classes:
+- system.linux.system.repo.mcp.openstack
+- system.linux.system.repo.mcp.extra
+- system.linux.system.repo.saltstack.xenial
+- service.keepalived.cluster.single
+- system.glusterfs.server.volume.glance
+- system.glusterfs.server.volume.keystone
+- system.glusterfs.server.cluster
+- system.salt.control.virt
+- system.salt.control.cluster.openstack_control_cluster
+- system.salt.control.cluster.openstack_proxy_cluster
+- system.salt.control.cluster.openstack_database_cluster
+- system.salt.control.cluster.openstack_message_queue_cluster
+- system.salt.control.cluster.openstack_telemetry_cluster
+# - system.salt.control.cluster.stacklight_server_cluster
+# - system.salt.control.cluster.stacklight_log_cluster
+# - system.salt.control.cluster.stacklight_telemetry_cluster
+- cluster.baremetal-mcp-ocata-ovs-ha.infra
+parameters:
+ _param:
+ linux_system_codename: xenial
+ cluster_vip_address: ${_param:infra_kvm_address}
+ cluster_node01_address: ${_param:infra_kvm_node01_address}
+ cluster_node02_address: ${_param:infra_kvm_node02_address}
+ cluster_node03_address: ${_param:infra_kvm_node03_address}
+ keepalived_vip_interface: br-ctl
+ keepalived_vip_virtual_router_id: 69
+ deploy_nic: enp6s0
+ salt:
+ control:
+ size: #RAM 4096,8192,16384,32768,65536
+ ##Default production sizing
+ openstack.control:
+ cpu: 6
+ ram: 8192
+ disk_profile: small
+ net_profile: default
+ openstack.database:
+ cpu: 6
+ ram: 8192
+ disk_profile: large
+ net_profile: default
+ openstack.message_queue:
+ cpu: 6
+ ram: 8192
+ disk_profile: small
+ net_profile: default
+ openstack.telemetry:
+ cpu: 4
+ ram: 4096
+ disk_profile: xxlarge
+ net_profile: default
+ openstack.proxy:
+ cpu: 4
+ ram: 4096
+ disk_profile: small
+ net_profile: default
+# stacklight.log:
+# cpu: 2
+# ram: 4096
+# disk_profile: xxlarge
+# net_profile: default
+# stacklight.server:
+# cpu: 2
+# ram: 4096
+# disk_profile: small
+# net_profile: default
+# stacklight.telemetry:
+# cpu: 2
+# ram: 4096
+# disk_profile: xxlarge
+# net_profile: default
+ cluster:
+ internal:
+ node:
+ prx02:
+ provider: kvm03.${_param:cluster_domain}
+ mdb01:
+ image: ${_param:salt_control_xenial_image}
+ mdb02:
+ image: ${_param:salt_control_xenial_image}
+ mdb03:
+ image: ${_param:salt_control_xenial_image}
+ ctl01:
+ image: ${_param:salt_control_xenial_image}
+ ctl02:
+ image: ${_param:salt_control_xenial_image}
+ ctl03:
+ image: ${_param:salt_control_xenial_image}
+ dbs01:
+ image: ${_param:salt_control_xenial_image}
+ dbs02:
+ image: ${_param:salt_control_xenial_image}
+ dbs03:
+ image: ${_param:salt_control_xenial_image}
+ msg01:
+ image: ${_param:salt_control_xenial_image}
+ msg02:
+ image: ${_param:salt_control_xenial_image}
+ msg03:
+ image: ${_param:salt_control_xenial_image}
+ prx01:
+ image: ${_param:salt_control_xenial_image}
+ prx02:
+ image: ${_param:salt_control_xenial_image}
+ virt:
+ nic:
+ default:
+ eth0:
+ bridge: br-mgmt
+ model: virtio
+ eth1:
+ bridge: br-ctl
+ model: virtio
+ linux:
+ network:
+ interface:
+ eth3:
+ enabled: true
+ type: eth
+ proto: manual
+ address: 0.0.0.0
+ netmask: 255.255.255.0
+ name: ${_param:deploy_nic}
+ noifupdown: true
+ br-mgmt:
+ enabled: true
+ proto: dhcp
+ type: bridge
+ name_servers:
+ - 8.8.8.8
+ - 8.8.4.4
+ use_interfaces:
+ - ${_param:deploy_nic}
+ noifupdown: true
+ vlan300:
+ enabled: true
+ proto: manual
+ type: vlan
+ name: ${_param:deploy_nic}.300
+ use_interfaces:
+ - ${_param:deploy_nic}
+ br-ctl:
+ enabled: true
+ type: bridge
+ proto: static
+ address: ${_param:single_address}
+ netmask: 255.255.255.0
+ use_interfaces:
+ - ${_param:deploy_nic}.300
diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml
new file mode 100644
index 000000000..7fc45e23b
--- /dev/null
+++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml
@@ -0,0 +1,126 @@
+classes:
+- system.linux.system.repo.saltstack.xenial
+- system.maas.region.single
+- cluster.baremetal-mcp-ocata-ovs-ha.infra
+parameters:
+ _param:
+ dhcp_interface: ens3
+ primary_interface: ens4
+ pxe_interface: ens5
+ external_interface: ens6
+ interface_mtu: 1500
+ # MaaS has issues using MTU > 1500 for PXE interface
+ pxe_interface_mtu: 1500
+ linux_system_codename: xenial
+ maas_admin_username: opnfv
+ maas_admin_password: opnfv_secret
+ maas_db_password: opnfv_secret
+ dns_server01: 8.8.4.4
+ single_address: ${_param:infra_maas_node01_deploy_address}
+ maas:
+ region:
+ salt_master_ip: ${_param:infra_config_deploy_address}
+ domain: ${_param:cluster_domain}
+ maas_config:
+ commissioning_distro_series: 'xenial'
+ default_distro_series: 'xenial'
+ default_osystem: 'ubuntu'
+ default_storage_layout: 'lvm'
+ disk_erase_with_secure_erase: false
+ dnssec_validation: 'no'
+ enable_third_party_drivers: true
+ network_discovery: 'enabled'
+ default_min_hwe_kernel: 'hwe-16.04'
+ subnets:
+# TODO: parametrize address root (192.168.11), fabric-2, dhcp start/end?
+ 192.168.11.0/24:
+ fabric: fabric-2
+ cidr: 192.168.11.0/24
+ gateway_ip: ${_param:single_address}
+ iprange:
+ start: 192.168.11.5
+ end: 192.168.11.250
+ machines:
+ kvm01:
+ interface:
+ mac: "00:25:b5:a0:00:2a"
+ power_parameters:
+ power_address: "172.30.8.75"
+ power_password: "octopus"
+ power_type: ipmi
+ power_user: "admin"
+ architecture: 'amd64/generic'
+ distro_series: xenial
+ hwe_kernel: hwe-16.04
+ kvm02:
+ interface:
+ mac: "00:25:b5:a0:00:3a"
+ power_parameters:
+ power_address: "172.30.8.65"
+ power_password: "octopus"
+ power_type: ipmi
+ power_user: "admin"
+ architecture: 'amd64/generic'
+ distro_series: xenial
+ hwe_kernel: hwe-16.04
+ kvm03:
+ interface:
+ mac: "00:25:b5:a0:00:4a"
+ power_parameters:
+ power_address: "172.30.8.74"
+ power_password: "octopus"
+ power_type: ipmi
+ power_user: "admin"
+ architecture: 'amd64/generic'
+ distro_series: xenial
+ hwe_kernel: hwe-16.04
+ cmp001:
+ interface:
+ mac: "00:25:b5:a0:00:5a"
+ power_parameters:
+ power_address: "172.30.8.73"
+ power_password: "octopus"
+ power_type: ipmi
+ power_user: "admin"
+ architecture: 'amd64/generic'
+ distro_series: xenial
+ hwe_kernel: hwe-16.04
+ cmp002:
+ interface:
+ mac: "00:25:b5:a0:00:6a"
+ power_parameters:
+ power_address: "172.30.8.72"
+ power_password: "octopus"
+ power_type: ipmi
+ power_user: "admin"
+ architecture: 'amd64/generic'
+ distro_series: xenial
+ hwe_kernel: hwe-16.04
+ linux:
+ network:
+ interface:
+ dhcp_interface: ${_param:linux_dhcp_interface}
+ primary_interface:
+ enabled: true
+ name: ${_param:primary_interface}
+ mtu: ${_param:interface_mtu}
+ proto: static
+ address: ${_param:infra_maas_node01_address}
+ netmask: 255.255.255.0
+ type: eth
+ pxe_interface:
+ enabled: true
+ name: ${_param:pxe_interface}
+ mtu: ${_param:pxe_interface_mtu}
+ proto: static
+ address: ${_param:single_address}
+ netmask: 255.255.255.0
+ type: eth
+ external_interface:
+ enabled: true
+ name: ${_param:external_interface}
+ mtu: ${_param:interface_mtu}
+ proto: static
+ address: ${_param:infra_maas_node01_external_address}
+ netmask: 255.255.255.0
+ type: eth