aboutsummaryrefslogtreecommitdiffstats
path: root/mcp
diff options
context:
space:
mode:
authorAlexandru Avadanii <Alexandru.Avadanii@enea.com>2019-10-07 17:07:31 +0200
committerAlexandru Avadanii <Alexandru.Avadanii@enea.com>2019-10-25 00:12:51 +0200
commitfab511daabb05c493943b5f7a2ea8b5fe0802e2d (patch)
treeb9101bf4a534e6d76f93c1eb0ea2f774848f9075 /mcp
parent8e3a4154830c60b98636ec7a4c61002cbb882828 (diff)
[baremetal] Stein, Bionic, py3 support
Change-Id: If3f8cb6bfeedeb766a050d5a271b21c90bb3ba1c Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
Diffstat (limited to 'mcp')
-rwxr-xr-xmcp/config/states/baremetal_init5
-rwxr-xr-xmcp/config/states/openstack_ha8
-rwxr-xr-xmcp/config/states/virtual_control_plane3
-rw-r--r--mcp/patches/salt-formula-maas/0001-maas-region-skip-credentials-update.patch2
-rw-r--r--mcp/patches/salt-formula-maas/0002-maas-region-allow-timeout-override.patch2
-rw-r--r--mcp/patches/salt-formula-maas/0003-Extend-wait_for-maas.py-wait_for_-attempts-arg.patch2
-rw-r--r--mcp/patches/salt-formula-maas/0004-curtin-Tune-default-salt-minion-config.patch2
-rw-r--r--mcp/patches/salt-formula-maas/0005-Implement-tags-support.patch2
-rw-r--r--mcp/patches/salt-formula-maas/0006-curtin-Add-Bionic-support.patch213
-rw-r--r--mcp/patches/salt-formula-rabbitmq/0001-Stop-epmd.socket-before-relaunching-rabbit-service.patch34
-rw-r--r--mcp/patches/salt-formula-redis/0001-Add-Ubuntu-Bionic-support.patch44
-rw-r--r--mcp/reclass/classes/cluster/all-mcp-arch-common/backports.yml32
-rw-r--r--mcp/reclass/classes/cluster/all-mcp-arch-common/infra/config_pdf.yml.j21
-rw-r--r--mcp/reclass/classes/cluster/all-mcp-arch-common/infra/maas.yml.j217
-rw-r--r--mcp/reclass/classes/cluster/all-mcp-arch-common/init.yml.j24
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/infra/config.yml.j235
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm.yml.j233
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/openstack_compute.yml12
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/openstack_control.yml.j234
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/openstack_init.yml.j25
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/openstack_proxy.yml.j21
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/openstack_telemetry.yml.j24
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-noha/openstack_init.yml.j24
-rw-r--r--mcp/reclass/classes/cluster/mcp-iec-noha/infra/kvm.yml.j24
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-ha/infra/kvm.yml.j22
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-ha/infra/maas.yml.j25
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-ha/opendaylight/control.yml.j22
-rw-r--r--mcp/reclass/nodes/cfg01.mcp-fdio-ha.local.yml2
-rw-r--r--mcp/reclass/nodes/cfg01.mcp-odl-ha.local.yml2
-rw-r--r--mcp/reclass/nodes/cfg01.mcp-ovn-ha.local.yml2
-rw-r--r--mcp/reclass/nodes/cfg01.mcp-ovs-dpdk-ha.local.yml2
-rw-r--r--mcp/reclass/nodes/cfg01.mcp-ovs-ha.local.yml2
m---------mcp/scripts/pharos0
33 files changed, 445 insertions, 77 deletions
diff --git a/mcp/config/states/baremetal_init b/mcp/config/states/baremetal_init
index 992821571..dcedfbeda 100755
--- a/mcp/config/states/baremetal_init
+++ b/mcp/config/states/baremetal_init
@@ -25,9 +25,12 @@ salt -C "${cluster_nodes_query}" file.replace $debian_ip_source \
pattern="^\s{8}__salt__\['pkg.install'\]\('vlan'\)" \
repl="\n if not __salt__['pkg.version']('vlan'):\n __salt__['pkg.install']('vlan')"
-salt -C "${cluster_nodes_query}" pkg.install bridge-utils
salt -C "${cluster_nodes_query}" state.apply linux.system.repo
+salt -C "${cluster_nodes_query}" pkg.install force_yes=true bridge-utils,python-jinja2
+salt -C "${cluster_nodes_query}" service.restart salt-minion
wait_for 5.0 "salt -C '${cluster_nodes_query}' state.apply salt.minion"
+salt -C "${cluster_nodes_query}" file.remove /etc/resolv.conf
+salt -C "${cluster_nodes_query}" file.touch /etc/resolv.conf
wait_for 5.0 "salt -C '${cluster_nodes_query}' state.apply linux,ntp"
wait_for 30.0 "salt -C '${cluster_nodes_query}' test.ping"
diff --git a/mcp/config/states/openstack_ha b/mcp/config/states/openstack_ha
index 7cf40086e..c28d0b46b 100755
--- a/mcp/config/states/openstack_ha
+++ b/mcp/config/states/openstack_ha
@@ -66,6 +66,12 @@ salt -I 'barbican:server:role:primary' state.sls barbican
salt -I 'barbican:server:role:secondary' state.sls barbican
salt -I 'barbican:client' state.sls barbican
+# remove config files coming from packages
+for service in gnocchi panko; do
+ salt -I "${service}:server" pkg.install ${service}-api
+ salt -I "${service}:server" file.remove "/etc/apache2/sites-enabled/${service}-api.conf"
+done
+
salt -I 'redis:cluster:role:master' state.sls redis
salt -I 'redis:server' state.sls redis
salt -I 'gnocchi:server:role:primary' state.sls gnocchi
@@ -77,7 +83,7 @@ salt -I 'aodh:server:role:secondary' state.sls aodh
salt -I 'ceilometer:server' state.sls ceilometer
salt -I 'ceilometer:agent' state.sls ceilometer
-salt -I 'horizon:server' state.sls apache,horizon
+wait_for 3.0 "salt -I 'horizon:server' state.sls apache,horizon"
salt -I 'nginx:server' state.sls nginx
cluster_public_host=$(salt -C 'I@nginx:server and *01*' --out=yaml \
diff --git a/mcp/config/states/virtual_control_plane b/mcp/config/states/virtual_control_plane
index 183360b96..f2e861ac2 100755
--- a/mcp/config/states/virtual_control_plane
+++ b/mcp/config/states/virtual_control_plane
@@ -48,6 +48,9 @@ cd /srv/salt/env/prd/maas/files && ln -sf \
salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' cp.get_file \
"salt://maas/files/$(basename "${APT_CONF_D_CURTIN}")" "${APT_CONF_D_CURTIN}"
+salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' state.sls linux.system.repo
+salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' pkg.install force_yes=true python-jinja2
+salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' service.restart salt-minion
wait_for 10.0 "salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' state.apply salt"
wait_for 10.0 "salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' state.apply linux,ntp"
diff --git a/mcp/patches/salt-formula-maas/0001-maas-region-skip-credentials-update.patch b/mcp/patches/salt-formula-maas/0001-maas-region-skip-credentials-update.patch
index f238978f2..eb607cf81 100644
--- a/mcp/patches/salt-formula-maas/0001-maas-region-skip-credentials-update.patch
+++ b/mcp/patches/salt-formula-maas/0001-maas-region-skip-credentials-update.patch
@@ -1,5 +1,5 @@
::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-: Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
:
: All rights reserved. This program and the accompanying materials
: are made available under the terms of the Apache License, Version 2.0
diff --git a/mcp/patches/salt-formula-maas/0002-maas-region-allow-timeout-override.patch b/mcp/patches/salt-formula-maas/0002-maas-region-allow-timeout-override.patch
index 10dfddf48..3d8deff60 100644
--- a/mcp/patches/salt-formula-maas/0002-maas-region-allow-timeout-override.patch
+++ b/mcp/patches/salt-formula-maas/0002-maas-region-allow-timeout-override.patch
@@ -1,5 +1,5 @@
::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-: Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
:
: All rights reserved. This program and the accompanying materials
: are made available under the terms of the Apache License, Version 2.0
diff --git a/mcp/patches/salt-formula-maas/0003-Extend-wait_for-maas.py-wait_for_-attempts-arg.patch b/mcp/patches/salt-formula-maas/0003-Extend-wait_for-maas.py-wait_for_-attempts-arg.patch
index aa4d95deb..1be210f3c 100644
--- a/mcp/patches/salt-formula-maas/0003-Extend-wait_for-maas.py-wait_for_-attempts-arg.patch
+++ b/mcp/patches/salt-formula-maas/0003-Extend-wait_for-maas.py-wait_for_-attempts-arg.patch
@@ -1,5 +1,5 @@
::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-: Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
:
: All rights reserved. This program and the accompanying materials
: are made available under the terms of the Apache License, Version 2.0
diff --git a/mcp/patches/salt-formula-maas/0004-curtin-Tune-default-salt-minion-config.patch b/mcp/patches/salt-formula-maas/0004-curtin-Tune-default-salt-minion-config.patch
index cd9ea2edc..978f4ecb6 100644
--- a/mcp/patches/salt-formula-maas/0004-curtin-Tune-default-salt-minion-config.patch
+++ b/mcp/patches/salt-formula-maas/0004-curtin-Tune-default-salt-minion-config.patch
@@ -1,5 +1,5 @@
::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-: Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
:
: All rights reserved. This program and the accompanying materials
: are made available under the terms of the Apache License, Version 2.0
diff --git a/mcp/patches/salt-formula-maas/0005-Implement-tags-support.patch b/mcp/patches/salt-formula-maas/0005-Implement-tags-support.patch
index 77d8ff789..e59c2f2e8 100644
--- a/mcp/patches/salt-formula-maas/0005-Implement-tags-support.patch
+++ b/mcp/patches/salt-formula-maas/0005-Implement-tags-support.patch
@@ -1,5 +1,5 @@
::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-: Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
:
: All rights reserved. This program and the accompanying materials
: are made available under the terms of the Apache License, Version 2.0
diff --git a/mcp/patches/salt-formula-maas/0006-curtin-Add-Bionic-support.patch b/mcp/patches/salt-formula-maas/0006-curtin-Add-Bionic-support.patch
new file mode 100644
index 000000000..1f53697ad
--- /dev/null
+++ b/mcp/patches/salt-formula-maas/0006-curtin-Add-Bionic-support.patch
@@ -0,0 +1,213 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Wed, 16 Oct 2019 15:02:39 +0200
+Subject: [PATCH] curtin: Add Bionic support
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ .../curtin_userdata_amd64_generic_bionic | 80 +++++++++++++++++++
+ .../curtin_userdata_arm64_generic_bionic | 65 +++++++++++++++
+ maas/region.sls | 24 ++++++
+ 3 files changed, 169 insertions(+)
+ create mode 100644 maas/files/curtin_userdata_amd64_generic_bionic
+ create mode 100644 maas/files/curtin_userdata_arm64_generic_bionic
+
+diff --git a/maas/files/curtin_userdata_amd64_generic_bionic b/maas/files/curtin_userdata_amd64_generic_bionic
+new file mode 100644
+index 0000000..0b6fd15
+--- /dev/null
++++ b/maas/files/curtin_userdata_amd64_generic_bionic
+@@ -0,0 +1,80 @@
++{%- from "maas/map.jinja" import cluster with context %}
++{%- raw %}
++#cloud-config
++debconf_selections:
++ maas: |
++ {{for line in str(curtin_preseed).splitlines()}}
++ {{line}}
++ {{endfor}}
++{{if third_party_drivers and driver}}
++early_commands:
++ {{py: key_string = ''.join(['\\x%x' % x for x in map(ord, driver['key_binary'])])}}
++ driver_00_get_key: /bin/echo -en '{{key_string}}' > /tmp/maas-{{driver['package']}}.gpg
++ driver_01_add_key: ["apt-key", "add", "/tmp/maas-{{driver['package']}}.gpg"]
++ driver_02_add: ["add-apt-repository", "-y", "deb {{driver['repository']}} {{node.get_distro_series()}} main"]
++ driver_03_update_install: ["sh", "-c", "apt-get update --quiet && apt-get --assume-yes install {{driver['package']}}"]
++ driver_04_load: ["sh", "-c", "depmod && modprobe {{driver['module']}}"]
++{{endif}}
++late_commands:
++ maas: [wget, '--no-proxy', {{node_disable_pxe_url|escape.json}}, '--post-data', {{node_disable_pxe_data|escape.json}}, '-O', '/dev/null']
++{% endraw %}
++
++{%- if cluster.get('saltstack_repo_key', False) %}
++ {% set salt_repo_key = salt['hashutil.base64_b64encode'](cluster.saltstack_repo_key) %}
++ apt_00_set_gpg: ["curtin", "in-target", "--", "sh", "-c", "echo '{{salt_repo_key}}' | base64 -d | apt-key add -"]
++{%- endif %}
++{% if cluster.saltstack_repo_bionic.startswith('deb') %}
++ {%- set saltstack_repo = cluster.saltstack_repo_bionic -%}
++{%- else %}
++ {%- set saltstack_repo = 'deb [arch=amd64] ' + cluster.saltstack_repo_bionic -%}
++{%- endif %}
++
++ apt_01_set_repo: ["curtin", "in-target", "--", "sh", "-c", "echo '{{ saltstack_repo }}' >> /etc/apt/sources.list.d/mcp_saltstack.list"]
++ apt_03_update: ["curtin", "in-target", "--", "apt-get", "update"]
++{%- if salt['pillar.get']('maas:cluster:curtin_vars:amd64:bionic:extra_pkgs:enabled')|default(false) %}
++ {% for pkg in pillar.maas.cluster.curtin_vars.amd64.bionic.extra_pkgs.pkgs -%}
++ apt_04_install_pkgs_{{ loop.index }}: ["curtin", "in-target", "--", "apt-get", "-y", "install", "{{ pkg }}"]
++ {% endfor %}
++{%- endif %}
++ salt_01_install: ["curtin", "in-target", "--", "apt-get", "-y", "install", "salt-minion", "ifupdown", "cloud-init", "dnsmasq"]
++ salt_02_hostname_set: ["curtin", "in-target", "--", "echo", "{% raw %}{{node.hostname}}{% endraw %}.{{pillar.linux.system.domain}}"]
++ salt_03_hostname_get: ["curtin", "in-target", "--", "sh", "-c", "echo 'id: {% raw %}{{node.hostname}}{% endraw %}.{{pillar.linux.system.domain}}' >> /etc/salt/minion.d/minion.conf"]
++ salt_04_master: ["curtin", "in-target", "--", "sh", "-c", "echo 'master: {{ salt_master_ip }}' >> /etc/salt/minion.d/minion.conf"]
++ salt_05_max_event_size: ["curtin", "in-target", "--", "sh", "-c", "echo 'max_event_size: 100000000' >> /etc/salt/minion.d/minion.conf"]
++ salt_06_acceptance_wait_time_max: ["curtin", "in-target", "--", "sh", "-c", "echo 'acceptance_wait_time_max: 60' >> /etc/salt/minion.d/minion.conf"]
++ salt_07_acceptance_wait_time: ["curtin", "in-target", "--", "sh", "-c", "echo 'acceptance_wait_time: 10' >> /etc/salt/minion.d/minion.conf"]
++ salt_08_random_reauth_delay: ["curtin", "in-target", "--", "sh", "-c", "echo 'random_reauth_delay: 270' >> /etc/salt/minion.d/minion.conf"]
++ salt_09_recon_default: ["curtin", "in-target", "--", "sh", "-c", "echo 'recon_default: 1000' >> /etc/salt/minion.d/minion.conf"]
++ salt_10_recon_max: ["curtin", "in-target", "--", "sh", "-c", "echo 'recon_max: 60000' >> /etc/salt/minion.d/minion.conf"]
++ salt_11_recon_randomize: ["curtin", "in-target", "--", "sh", "-c", "echo 'recon_randomize: True' >> /etc/salt/minion.d/minion.conf"]
++ salt_12_auth_timeout: ["curtin", "in-target", "--", "sh", "-c", "echo 'auth_timeout: 60' >> /etc/salt/minion.d/minion.conf"]
++
++ salt_20_bionic_nplan_stop: ["curtin", "in-target", "--", "systemctl", "stop", "systemd-networkd.socket", "systemd-networkd", "networkd-dispatcher", "systemd-networkd-wait-online", "systemd-resolved"]
++ salt_21_bionic_nplan_disable: ["curtin", "in-target", "--", "systemctl", "disable", "systemd-networkd.socket", "systemd-networkd", "networkd-dispatcher", "systemd-networkd-wait-online", "systemd-resolved"]
++ salt_22_bionic_nplan_mask: ["curtin", "in-target", "--", "systemctl", "mask", "systemd-networkd.socket", "systemd-networkd", "networkd-dispatcher", "systemd-networkd-wait-online", "systemd-resolved"]
++ salt_23_bionic_nplan_purge: ["curtin", "in-target", "--", "apt", "--assume-yes", "purge", "nplan", "netplan.io"]
++ salt_24_bionic_interfaces: ["curtin", "in-target", "--", "sh", "-c", "echo 'source /etc/network/interfaces.d/*' >> /etc/network/interfaces"]
++ salt_25_bionic_networking_unmask: ["curtin", "in-target", "--", "systemctl", "unmask", "networking.service"]
++ salt_26_bionic_networking_enable: ["curtin", "in-target", "--", "systemctl", "enable", "networking.service"]
++ salt_27_bionic_networking_start: ["curtin", "in-target", "--", "systemctl", "start", "networking.service"]
++
++{%- if salt['pillar.get']('maas:cluster:curtin_vars:amd64:bionic:kernel_package:enabled')|default(false) %}
++kernel:
++ package: {{ pillar.maas.cluster.curtin_vars.amd64.bionic.kernel_package.value }}
++{%- endif %}
++
++{% raw %}
++{{if third_party_drivers and driver}}
++ driver_00_key_get: curtin in-target -- sh -c "/bin/echo -en '{{key_string}}' > /tmp/maas-{{driver['package']}}.gpg"
++ driver_02_key_add: ["curtin", "in-target", "--", "apt-key", "add", "/tmp/maas-{{driver['package']}}.gpg"]
++ driver_03_add: ["curtin", "in-target", "--", "add-apt-repository", "-y", "deb {{driver['repository']}} {{node.get_distro_series()}} main"]
++ driver_04_update_install: ["curtin", "in-target", "--", "apt-get", "update", "--quiet"]
++ driver_05_install: ["curtin", "in-target", "--", "apt-get", "-y", "install", "{{driver['package']}}"]
++ driver_06_depmod: ["curtin", "in-target", "--", "depmod"]
++ driver_07_update_initramfs: ["curtin", "in-target", "--", "update-initramfs", "-u"]
++{{endif}}
++{% endraw %}
++
++{#
++# vim: ft=jinja
++#}
+diff --git a/maas/files/curtin_userdata_arm64_generic_bionic b/maas/files/curtin_userdata_arm64_generic_bionic
+new file mode 100644
+index 0000000..c7cb997
+--- /dev/null
++++ b/maas/files/curtin_userdata_arm64_generic_bionic
+@@ -0,0 +1,65 @@
++{%- from "maas/map.jinja" import cluster with context %}
++{% raw %}
++#cloud-config
++debconf_selections:
++ maas: |
++ {{for line in str(curtin_preseed).splitlines()}}
++ {{line}}
++ {{endfor}}
++{{if third_party_drivers and driver}}
++early_commands:
++ {{py: key_string = ''.join(['\\x%x' % x for x in map(ord, driver['key_binary'])])}}
++ driver_00_get_key: /bin/echo -en '{{key_string}}' > /tmp/maas-{{driver['package']}}.gpg
++ driver_01_add_key: ["apt-key", "add", "/tmp/maas-{{driver['package']}}.gpg"]
++ driver_02_add: ["add-apt-repository", "-y", "deb {{driver['repository']}} {{node.get_distro_series()}} main"]
++ driver_03_update_install: ["sh", "-c", "apt-get update --quiet && apt-get --assume-yes install {{driver['package']}}"]
++ driver_04_load: ["sh", "-c", "depmod && modprobe {{driver['module']}}"]
++{{endif}}
++late_commands:
++ maas: [wget, '--no-proxy', {{node_disable_pxe_url|escape.json}}, '--post-data', {{node_disable_pxe_data|escape.json}}, '-O', '/dev/null']
++{% endraw %}
++{%- if cluster.get('saltstack_repo_key', False) %}
++ {% set salt_repo_key = salt['hashutil.base64_b64encode'](cluster.saltstack_repo_key) %}
++ apt_00_set_gpg: ["curtin", "in-target", "--", "sh", "-c", "echo '{{salt_repo_key}}' | base64 -d | apt-key add -"]
++{%- endif %}
++{% if cluster.saltstack_repo_bionic.startswith('deb') %}
++ {%- set saltstack_repo = cluster.saltstack_repo_bionic -%}
++{%- else %}
++ {%- set saltstack_repo = 'deb [arch=amd64] ' + cluster.saltstack_repo_bionic -%}
++{%- endif %}
++{#- NOTE: Re-use amd64 repos on arm64 since most packages are arch independent #}
++ apt_01_set_repo: ["curtin", "in-target", "--", "sh", "-c", "echo '{{ saltstack_repo }}' >> /etc/apt/sources.list.d/mcp_saltstack.list"]
++ apt_03_update: ["curtin", "in-target", "--", "apt-get", "update"]
++ salt_01_install: ["curtin", "in-target", "--", "apt-get", "-y", "install", "salt-minion", "python-futures", "ifupdown", "cloud-init", "dnsmasq"]
++ salt_02_hostname_set: ["curtin", "in-target", "--", "echo", "{% raw %}{{node.hostname}}{% endraw %}.{{pillar.linux.system.domain}}"]
++ salt_03_hostname_get: ["curtin", "in-target", "--", "sh", "-c", "echo 'id: {% raw %}{{node.hostname}}{% endraw %}.{{pillar.linux.system.domain}}' >> /etc/salt/minion.d/minion.conf"]
++ salt_04_master: ["curtin", "in-target", "--", "sh", "-c", "echo 'master: {{ salt_master_ip }}' >> /etc/salt/minion.d/minion.conf"]
++ salt_05_max_event_size: ["curtin", "in-target", "--", "sh", "-c", "echo 'max_event_size: 100000000' >> /etc/salt/minion.d/minion.conf"]
++ salt_06_acceptance_wait_time_max: ["curtin", "in-target", "--", "sh", "-c", "echo 'acceptance_wait_time_max: 60' >> /etc/salt/minion.d/minion.conf"]
++ salt_07_acceptance_wait_time: ["curtin", "in-target", "--", "sh", "-c", "echo 'acceptance_wait_time: 10' >> /etc/salt/minion.d/minion.conf"]
++ salt_08_random_reauth_delay: ["curtin", "in-target", "--", "sh", "-c", "echo 'random_reauth_delay: 270' >> /etc/salt/minion.d/minion.conf"]
++ salt_09_recon_default: ["curtin", "in-target", "--", "sh", "-c", "echo 'recon_default: 1000' >> /etc/salt/minion.d/minion.conf"]
++ salt_10_recon_max: ["curtin", "in-target", "--", "sh", "-c", "echo 'recon_max: 60000' >> /etc/salt/minion.d/minion.conf"]
++ salt_11_recon_randomize: ["curtin", "in-target", "--", "sh", "-c", "echo 'recon_randomize: True' >> /etc/salt/minion.d/minion.conf"]
++ salt_12_auth_timeout: ["curtin", "in-target", "--", "sh", "-c", "echo 'auth_timeout: 60' >> /etc/salt/minion.d/minion.conf"]
++
++ salt_20_bionic_nplan_stop: ["curtin", "in-target", "--", "systemctl", "stop", "systemd-networkd.socket", "systemd-networkd", "networkd-dispatcher", "systemd-networkd-wait-online", "systemd-resolved"]
++ salt_21_bionic_nplan_disable: ["curtin", "in-target", "--", "systemctl", "disable", "systemd-networkd.socket", "systemd-networkd", "networkd-dispatcher", "systemd-networkd-wait-online", "systemd-resolved"]
++ salt_22_bionic_nplan_mask: ["curtin", "in-target", "--", "systemctl", "mask", "systemd-networkd.socket", "systemd-networkd", "networkd-dispatcher", "systemd-networkd-wait-online", "systemd-resolved"]
++ salt_23_bionic_nplan_purge: ["curtin", "in-target", "--", "apt", "--assume-yes", "purge", "nplan", "netplan.io"]
++ salt_24_bionic_interfaces: ["curtin", "in-target", "--", "sh", "-c", "echo 'source /etc/network/interfaces.d/*' >> /etc/network/interfaces"]
++ salt_25_bionic_networking_unmask: ["curtin", "in-target", "--", "systemctl", "unmask", "networking.service"]
++ salt_26_bionic_networking_enable: ["curtin", "in-target", "--", "systemctl", "enable", "networking.service"]
++ salt_27_bionic_networking_start: ["curtin", "in-target", "--", "systemctl", "start", "networking.service"]
++
++{% raw %}
++{{if third_party_drivers and driver}}
++ driver_00_key_get: curtin in-target -- sh -c "/bin/echo -en '{{key_string}}' > /tmp/maas-{{driver['package']}}.gpg"
++ driver_02_key_add: ["curtin", "in-target", "--", "apt-key", "add", "/tmp/maas-{{driver['package']}}.gpg"]
++ driver_03_add: ["curtin", "in-target", "--", "add-apt-repository", "-y", "deb {{driver['repository']}} {{node.get_distro_series()}} main"]
++ driver_04_update_install: ["curtin", "in-target", "--", "apt-get", "update", "--quiet"]
++ driver_05_install: ["curtin", "in-target", "--", "apt-get", "-y", "install", "{{driver['package']}}"]
++ driver_06_depmod: ["curtin", "in-target", "--", "depmod"]
++ driver_07_update_initramfs: ["curtin", "in-target", "--", "update-initramfs", "-u"]
++{{endif}}
++{% endraw %}
+diff --git a/maas/region.sls b/maas/region.sls
+index 52fb952..ca876ee 100644
+--- a/maas/region.sls
++++ b/maas/region.sls
+@@ -138,6 +138,30 @@ maas_apache_headers:
+ - require:
+ - pkg: maas_region_packages
+
++/etc/maas/preseeds/curtin_userdata_amd64_generic_bionic:
++ file.managed:
++ - source: salt://maas/files/curtin_userdata_amd64_generic_bionic
++ - template: jinja
++ - user: root
++ - group: root
++ - mode: 644
++ - context:
++ salt_master_ip: {{ region.salt_master_ip }}
++ - require:
++ - pkg: maas_region_packages
++
++/etc/maas/preseeds/curtin_userdata_arm64_generic_bionic:
++ file.managed:
++ - source: salt://maas/files/curtin_userdata_arm64_generic_bionic
++ - template: jinja
++ - user: root
++ - group: root
++ - mode: 644
++ - context:
++ salt_master_ip: {{ region.salt_master_ip }}
++ - require:
++ - pkg: maas_region_packages
++
+ Configure /root/.pgpass for MAAS:
+ file.managed:
+ - name: /root/.pgpass
diff --git a/mcp/patches/salt-formula-rabbitmq/0001-Stop-epmd.socket-before-relaunching-rabbit-service.patch b/mcp/patches/salt-formula-rabbitmq/0001-Stop-epmd.socket-before-relaunching-rabbit-service.patch
new file mode 100644
index 000000000..f7b85f5ab
--- /dev/null
+++ b/mcp/patches/salt-formula-rabbitmq/0001-Stop-epmd.socket-before-relaunching-rabbit-service.patch
@@ -0,0 +1,34 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Fri, 18 Oct 2019 17:07:13 +0200
+Subject: [PATCH] Stop epmd.socket before relaunching rabbit service
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ rabbitmq/server/service.sls | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/rabbitmq/server/service.sls b/rabbitmq/server/service.sls
+index 3e95a30..05f2eb1 100644
+--- a/rabbitmq/server/service.sls
++++ b/rabbitmq/server/service.sls
+@@ -58,6 +58,12 @@ rabbitmq_limits_systemd:
+ - require:
+ - pkg: rabbitmq_server
+
++rabbitmq_epmd_socket:
++ service.dead:
++ - name: epmd.socket
++ - require:
++ - pkg: rabbitmq_server
++
+ {%- endif %}
+
+ {%- if server.secret_key is defined and not grains.get('noservices', False) %}
diff --git a/mcp/patches/salt-formula-redis/0001-Add-Ubuntu-Bionic-support.patch b/mcp/patches/salt-formula-redis/0001-Add-Ubuntu-Bionic-support.patch
new file mode 100644
index 000000000..1c06bd9d3
--- /dev/null
+++ b/mcp/patches/salt-formula-redis/0001-Add-Ubuntu-Bionic-support.patch
@@ -0,0 +1,44 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Thu, 24 Oct 2019 23:04:16 +0200
+Subject: [PATCH] Add Ubuntu Bionic support
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ redis/map.jinja | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/redis/map.jinja b/redis/map.jinja
+index 299d5a4..7cbceb4 100755
+--- a/redis/map.jinja
++++ b/redis/map.jinja
+@@ -37,6 +37,9 @@
+ 'xenial': {
+ 'version': '3.0',
+ },
++ 'bionic': {
++ 'version': '3.0',
++ },
+ }, grain='oscodename', merge=salt['pillar.get']('redis:server'))) %}
+
+ {% set cluster = salt['grains.filter_by']({
+@@ -60,4 +63,12 @@
+ 'port': '26379'
+ }
+ },
++ 'bionic': {
++ 'pkgs': ['redis-sentinel'],
++ 'service': 'redis-sentinel',
++ 'sentinel': {
++ 'address': '127.0.0.1',
++ 'port': '26379'
++ }
++ },
+ }, grain='oscodename', merge=salt['pillar.get']('redis:cluster'))) %}
diff --git a/mcp/reclass/classes/cluster/all-mcp-arch-common/backports.yml b/mcp/reclass/classes/cluster/all-mcp-arch-common/backports.yml
index e2de54dc8..04b38abae 100644
--- a/mcp/reclass/classes/cluster/all-mcp-arch-common/backports.yml
+++ b/mcp/reclass/classes/cluster/all-mcp-arch-common/backports.yml
@@ -12,9 +12,41 @@ classes:
parameters:
_param:
backports_version: rocky
+ fakeinitscripts_ppa_key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+
+ xsFNBFwKq9ABEADqWu9anJFs3RJ87i53tU8lBC8JGa55YmRlN7LgvkPYMtXj3xOR
+ tBn8HJ3B0b2fKx2htUs+oWtFFCkNUmptnNz+tMVdwXt1lXSr2MEzO6PgBBAvak0j
+ GMLSsI4p60YqoPARMjPXvZ+VNcGZ6RSOKlNnEqSb+M76iaVaqEWBipDR1g+llCd9
+ lgUVQ8iKolw+5iCnPnjmm0GdE9iw7Az0aUIv3yXNaEZwnGb9egdoioY4OvkY9HqR
+ KkgsrTVBWiTOsoDctrPkLNsB1BZLA/Qkgv4Sih2Bc7atgid6SvvuGClex+9MdBPQ
+ r0nT03O0uiXQ4Zk/ULlXaE2ci9dhMD5SNspgZnEULcubqL/Xd2iq6DlW22iXmj2X
+ PSoF6YxrtxlocaC2ChKFGITR7yiudxDYSCyBzXBMP7zfLVwZC3IX309HaxJRPCk5
+ PEatmq0++z3lWfNXEjQ48Rt0mYTC5ktcJQGpSSp30hjrIfz5Jxa/FACQCJBGbr0/
+ jO6cB6TJpHDnwdsEvCLJmeI6+OYkEzExarL8Wg8DdQUo5uppS4zANAgMsUbVqFz5
+ 7WDlLMKPRAheEdZJIwCHXZrB3TibZTNUuafmQD+4a50cfKgNHlb+ks/5gbkxRdNj
+ DdZYI6gbh7PZcvIKOvakrEer8RIpqgSXyWPxIviyCGpp/+webUyapFwstQARAQAB
+ zRxMYXVuY2hwYWQgUFBBIGZvciBPUE5GViBGdWVswsF4BBMBAgAiBQJcCqvQAhsD
+ BgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRCVWdwe/i1a+JgbEADZPwsdXStw
+ kqS+bg+bL4sCK55LnYAPWWnqXLuqpEEXusuGYEyahu69SOidL3/AXY1iM9FnbBE0
+ qyycLQVOv/lt7Bs1WVg7M3gNjTsnCH7RbZsGVWDnOuZ1G0KP2o72dmrR9GYFArHA
+ MMc3YVoKAWhRBWHUKdSp/D68i/cfJ4V1PNhDpchOz4ytPjo2xyHyBW+wxLxNiC32
+ 3uZeT7EpO8UbhuFDd3+PLaNrI1p2mkYxdmTpVBLIdKdAMq1QYi0B1nLvJ7Cp2yck
+ 2HKrI6pb74l7dkQOxx+x/inAMbZKX/AvKSjzyJ+Fxc4TT28m79QLuHtORiaPWCep
+ HePcl/0Qu2n85qOtWbWFWCJwlmvfTkHw2u7PEjutTgX9zOLdEFliu3v9nhvec7Mk
+ AzwpilBD6eAHav8Yhx6CKNR5GReK3viJ8+lso/D/56ap7el+W+M6K59imJ/r8WVx
+ 79qPXTAB29Co8hC5ky2qqeHMHw39VqC/JpCYPjH7qZNyWWhXBwHcobktuCc+tXdq
+ t1qlTz0aU/DLGUW8Buk9R6ZZTvSUibT8tRqDYtVhyJ7u/2qCdqhFoculWr6e6DQF
+ KP41NGKN4LtqQh7HmFCswvBnlu7BpkVlBqlHEMpqRUbJd7fg0oGkEf6P8hhWwdd2
+ 0keWK/lCMRHDEN6+/1ppP7M90/JyUPXfFA==
+ =stQK
+ -----END PGP PUBLIC KEY BLOCK-----
linux:
system:
repo:
+ opnfv_fakeinitscripts:
+ source: "deb http://ppa.launchpad.net/opnfv-fuel/fakeinitscripts/ubuntu ${_param:linux_system_codename} main"
+ key: ${_param:fakeinitscripts_ppa_key}
mirantis_openstack:
# yamllint disable-line rule:line-length
source: "deb ${_param:linux_system_repo_url}/openstack-${_param:backports_version}/xenial xenial main"
diff --git a/mcp/reclass/classes/cluster/all-mcp-arch-common/infra/config_pdf.yml.j2 b/mcp/reclass/classes/cluster/all-mcp-arch-common/infra/config_pdf.yml.j2
index 17a89f0e8..1178843d9 100644
--- a/mcp/reclass/classes/cluster/all-mcp-arch-common/infra/config_pdf.yml.j2
+++ b/mcp/reclass/classes/cluster/all-mcp-arch-common/infra/config_pdf.yml.j2
@@ -24,6 +24,7 @@ parameters:
salt_master_base_environment: prd
# yamllint disable-line rule:line-length
salt_api_password_hash: "$6$sGnRlxGf$al5jMCetLP.vfI/fTl3Z0N7Za1aeiexL487jAtyRABVfT3NlwZxQGVhO7S1N8OwS/34VHYwZQA8lkXwKMN/GS1"
+ infra_maas_system_codename: bionic
linux:
system:
user:
diff --git a/mcp/reclass/classes/cluster/all-mcp-arch-common/infra/maas.yml.j2 b/mcp/reclass/classes/cluster/all-mcp-arch-common/infra/maas.yml.j2
index fa85ab79c..b5cb3fece 100644
--- a/mcp/reclass/classes/cluster/all-mcp-arch-common/infra/maas.yml.j2
+++ b/mcp/reclass/classes/cluster/all-mcp-arch-common/infra/maas.yml.j2
@@ -16,11 +16,11 @@ classes:
- cluster.all-mcp-arch-common.opnfv.pod_config
parameters:
_param:
- linux_system_codename: xenial
+ linux_system_codename: bionic
maas_admin_username: opnfv
dns_server01: '{{ nm.dns_public[0] }}'
single_address: ${_param:infra_maas_node01_deploy_address}
- hwe_kernel: 'hwe-16.04'
+ hwe_kernel: 'ga-18.04'
opnfv_maas_timeout_comissioning: {{ nm.maas_timeout_comissioning }}
opnfv_maas_timeout_deploying: {{ nm.maas_timeout_deploying }}
maas:
@@ -43,7 +43,7 @@ parameters:
url: http://images.maas.io/ephemeral-v3/daily
keyring_file: /usr/share/keyrings/ubuntu-cloudimage-keyring.gpg
boot_sources_selections:
- xenial:
+ bionic:
url: "http://images.maas.io/ephemeral-v3/daily"
os: "ubuntu"
release: "${_param:linux_system_codename}"
@@ -53,8 +53,7 @@ parameters:
{%- endfor %}
subarches:
- "generic"
- - "ga-16.04"
- - "hwe-16.04"
+ - "ga-18.04"
labels: '"*"'
fabrics:
pxe_admin:
@@ -85,7 +84,7 @@ parameters:
armband:
name: armband
enabled: '1'
- url: 'http://linux.enea.com/mcp-repos/${_param:armband_repo_version}/${_param:linux_system_codename}'
+ url: 'http://linux.enea.com/mcp-repos/${_param:armband_repo_version}/xenial'
distributions: '${_param:armband_repo_version}-armband'
components: 'main'
arches: 'arm64'
@@ -98,8 +97,8 @@ parameters:
active_discovery_interval: 600
ntp_external_only: true
upstream_dns: ${_param:dns_server01}
- commissioning_distro_series: 'xenial'
- default_distro_series: 'xenial'
+ commissioning_distro_series: 'bionic'
+ default_distro_series: 'bionic'
default_osystem: 'ubuntu'
default_storage_layout: 'lvm'
enable_http_proxy: true
@@ -109,7 +108,7 @@ parameters:
network_discovery: 'enabled'
default_min_hwe_kernel: ${_param:hwe_kernel}
cluster:
- saltstack_repo_xenial: "deb [arch=amd64] http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7/ xenial main"
+ saltstack_repo_bionic: "deb [arch=amd64] http://repo.saltstack.com/apt/ubuntu/18.04/amd64/2017.7/ bionic main"
region:
host: ${_param:single_address}
port: 5240
diff --git a/mcp/reclass/classes/cluster/all-mcp-arch-common/init.yml.j2 b/mcp/reclass/classes/cluster/all-mcp-arch-common/init.yml.j2
index f9c2f20ec..fc5bbaa7b 100644
--- a/mcp/reclass/classes/cluster/all-mcp-arch-common/init.yml.j2
+++ b/mcp/reclass/classes/cluster/all-mcp-arch-common/init.yml.j2
@@ -14,13 +14,13 @@ classes:
parameters:
_param:
openstack_version: stein
- armband_repo_version: ${_param:openstack_version}
+ armband_repo_version: rocky
mcp_version: nightly
banner_company_name: OPNFV
salt_control_trusty_image: '' # Dummy value, to keep reclass 1.5.2 happy
- salt_control_xenial_image: salt://salt/files/control/images/base_image_opnfv_fuel_vcp.img
+ salt_control_bionic_image: salt://salt/files/control/images/base_image_opnfv_fuel_vcp.img
# VCP VMs spawned on KVM Hosts net ifaces (max 3)
{%- if conf.MCP_JUMP_ARCH == 'aarch64' %}
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/infra/config.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-ha/infra/config.yml.j2
index 41d73e322..0ecc2e364 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/infra/config.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/infra/config.yml.j2
@@ -8,7 +8,6 @@
{%- import 'net_map.j2' as nm with context %}
---
classes:
- - system.linux.system.repo.saltstack.xenial
- system.reclass.storage.system.physical_control_cluster
- system.reclass.storage.system.openstack_control_cluster
- system.reclass.storage.system.openstack_proxy_cluster
@@ -36,7 +35,7 @@ parameters:
infra_kvm_node01:
params:
keepalived_vip_priority: 100
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_infra_kvm_node01_pxe_admin_address}
infra_kvm_node02:
{%- if not conf.MCP_VCP %}
@@ -45,16 +44,16 @@ parameters:
{%- endif %}
params:
keepalived_vip_priority: 101
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_infra_kvm_node02_pxe_admin_address}
infra_kvm_node03:
params:
keepalived_vip_priority: 102
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_infra_kvm_node03_pxe_admin_address}
openstack_telemetry_node01:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
# create resources only from 1 controller
# to prevent race conditions
ceilometer_create_gnocchi_resources: true
@@ -62,33 +61,33 @@ parameters:
pxe_admin_address: ${_param:opnfv_openstack_telemetry_node01_pxe_admin_address}
openstack_telemetry_node02:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
redis_cluster_role: 'slave'
pxe_admin_address: ${_param:opnfv_openstack_telemetry_node02_pxe_admin_address}
openstack_telemetry_node03:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
redis_cluster_role: 'slave'
pxe_admin_address: ${_param:opnfv_openstack_telemetry_node03_pxe_admin_address}
openstack_message_queue_node01:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_openstack_message_queue_node01_pxe_admin_address}
openstack_message_queue_node02:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_openstack_message_queue_node02_pxe_admin_address}
openstack_message_queue_node03:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_openstack_message_queue_node03_pxe_admin_address}
openstack_proxy_node01:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_openstack_proxy_node01_pxe_admin_address}
openstack_proxy_node02:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_openstack_proxy_node02_pxe_admin_address}
# stacklight_log_node01:
# classes:
@@ -101,31 +100,31 @@ parameters:
classes:
- cluster.mcp-common-ha.openstack_control_init
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
# NOTE: When VCP is present, external_address is not used
external_address: ${_param:openstack_proxy_node01_address}
pxe_admin_address: ${_param:opnfv_openstack_control_node01_pxe_admin_address}
openstack_control_node02:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
external_address: 0.0.0.0
pxe_admin_address: ${_param:opnfv_openstack_control_node02_pxe_admin_address}
openstack_control_node03:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
external_address: ${_param:openstack_proxy_node02_address}
pxe_admin_address: ${_param:opnfv_openstack_control_node03_pxe_admin_address}
openstack_database_node01:
classes:
- cluster.mcp-common-ha.openstack_database_init
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_openstack_database_node01_pxe_admin_address}
openstack_database_node02:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_openstack_database_node02_pxe_admin_address}
openstack_database_node03:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_openstack_database_node03_pxe_admin_address}
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm.yml.j2
index 822e8bfb7..fe337fa5b 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm.yml.j2
@@ -28,7 +28,7 @@ classes:
- cluster.all-mcp-arch-common.opnfv.lab_proxy_pdf
parameters:
_param:
- linux_system_codename: xenial
+ linux_system_codename: bionic
cluster_vip_address: ${_param:infra_kvm_address}
cluster_node01_address: ${_param:infra_kvm_node01_address}
cluster_node02_address: ${_param:infra_kvm_node02_address}
@@ -53,6 +53,7 @@ parameters:
unix_sock_group: libvirt
salt:
control:
+ virt_service: libvirtd
size: # RAM 4096,8192,16384,32768,65536
# Default production sizing
openstack.control:
@@ -98,8 +99,8 @@ parameters:
cluster:
internal:
node:
- mdb01: &salt_control_xenial_image_common_attr
- image: ${_param:salt_control_xenial_image}
+ mdb01: &salt_control_bionic_image_common_attr
+ image: ${_param:salt_control_bionic_image}
{%- if conf.nodes[nm.ctl01.idx].node.arch == 'aarch64' %}
seed: qemu-nbd
~cloud_init: ~
@@ -111,31 +112,31 @@ parameters:
path: /usr/share/AAVMF/AAVMF_CODE.fd
{%- endif %}
mdb02:
- <<: *salt_control_xenial_image_common_attr
+ <<: *salt_control_bionic_image_common_attr
mdb03:
- <<: *salt_control_xenial_image_common_attr
+ <<: *salt_control_bionic_image_common_attr
ctl01:
- <<: *salt_control_xenial_image_common_attr
+ <<: *salt_control_bionic_image_common_attr
ctl02:
- <<: *salt_control_xenial_image_common_attr
+ <<: *salt_control_bionic_image_common_attr
ctl03:
- <<: *salt_control_xenial_image_common_attr
+ <<: *salt_control_bionic_image_common_attr
dbs01:
- <<: *salt_control_xenial_image_common_attr
+ <<: *salt_control_bionic_image_common_attr
dbs02:
- <<: *salt_control_xenial_image_common_attr
+ <<: *salt_control_bionic_image_common_attr
dbs03:
- <<: *salt_control_xenial_image_common_attr
+ <<: *salt_control_bionic_image_common_attr
msg01:
- <<: *salt_control_xenial_image_common_attr
+ <<: *salt_control_bionic_image_common_attr
msg02:
- <<: *salt_control_xenial_image_common_attr
+ <<: *salt_control_bionic_image_common_attr
msg03:
- <<: *salt_control_xenial_image_common_attr
+ <<: *salt_control_bionic_image_common_attr
prx01:
- <<: *salt_control_xenial_image_common_attr
+ <<: *salt_control_bionic_image_common_attr
prx02:
- <<: *salt_control_xenial_image_common_attr
+ <<: *salt_control_bionic_image_common_attr
provider: kvm03.${_param:cluster_domain}
virt:
nic:
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_compute.yml b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_compute.yml
index 85e69b666..70a2ba0c9 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_compute.yml
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_compute.yml
@@ -34,7 +34,7 @@ parameters:
nova_vncproxy_url: https://${_param:cluster_public_host}:6080
keepalived_vip_interface: br-ctl
keepalived_vip_virtual_router_id: 69
- linux_system_codename: xenial
+ linux_system_codename: bionic
glusterfs:
client:
volumes:
@@ -54,6 +54,11 @@ parameters:
connection_recycle_time: ${_param:db_connection_recycle_time}
barbican:
enabled: ${_param:barbican_integration_enabled}
+ pkgs:
+ - cinder-volume
+ openiscsi_services:
+ - tgt
+ - iscsid
linux:
storage:
lvm:
@@ -86,3 +91,8 @@ parameters:
enabled: ${_param:barbican_integration_enabled}
image:
verify_glance_signatures: false
+ pkgs:
+ - nova-compute
+ - python3-novaclient
+ - pm-utils
+ - sysfsutils
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_control.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_control.yml.j2
index fe844e7c1..b3ab9e2c7 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_control.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_control.yml.j2
@@ -52,7 +52,7 @@ classes:
parameters:
_param:
{%- if not conf.MCP_VCP %}
- linux_system_codename: xenial # sync from kvm
+ linux_system_codename: bionic # sync from kvm
# For NOVCP, we switch keepalived VIPs, to keep cluster_vip_address in ctl
single_nic: br-ctl # for keepalive_vip_interface interpolation
control_nic: ~ # Dummy value to keep reclass 1.5.2 happy
@@ -73,14 +73,26 @@ parameters:
barbican_integration_enabled: 'false'
fernet_rotation_driver: 'shared_filesystem'
credential_rotation_driver: 'shared_filesystem'
+ common_conn_recycle_time: &db_conn_recycle_time
+ database:
+ connection_recycle_time: ${_param:db_connection_recycle_time}
nova:
- controller: &db_conn_recycle_time
- database:
- connection_recycle_time: ${_param:db_connection_recycle_time}
+ controller:
+ <<: *db_conn_recycle_time
barbican:
enabled: ${_param:barbican_integration_enabled}
+ pkgs:
+ - nova-api
+ - nova-conductor
+ - nova-consoleauth
+ - nova-scheduler
+ - nova-novncproxy
+ - python3-novaclient
cinder:
controller:
+ pkgs:
+ - cinder-api
+ - cinder-scheduler
<<: *db_conn_recycle_time
neutron:
server:
@@ -91,17 +103,27 @@ parameters:
global_physnet_mtu: ${_param:interface_mtu}
backend:
external_mtu: ${_param:interface_mtu}
+ pkgs:
+ - neutron-server
keystone:
server:
<<: *db_conn_recycle_time
cacert: /etc/ssl/certs/mcp_os_cacert
openrc_extra:
volume_device_name: sdc
+ pkgs:
+ - keystone
+ - python3-memcache
+ - python3-openstackclient
glance:
server:
<<: *db_conn_recycle_time
identity:
barbican_endpoint: ${barbican:server:host_href}
+ pkgs:
+ - glance
+ services:
+ - glance-api
{%- if conf.MCP_VCP %}
heat:
server:
@@ -118,6 +140,9 @@ parameters:
host: ${_param:openstack_proxy_control_address}
port: 8003
protocol: http
+ apache:
+ server:
+ mod_wsgi: libapache2-mod-wsgi-py3
{%- else %}
libvirt:
server:
@@ -135,6 +160,7 @@ parameters:
server:
bind:
listen_default_ports: false
+ mod_wsgi: libapache2-mod-wsgi-py3
# sync from common-ha kvm role
glusterfs:
server:
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_init.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_init.yml.j2
index c3f90bca4..bdc23ff6a 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_init.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_init.yml.j2
@@ -144,6 +144,7 @@ parameters:
aodh_version: ${_param:openstack_version}
barbican_version: ${_param:openstack_version}
barbican_service_host: ${_param:openstack_control_address}
+ apache_barbican_api_address: ${_param:single_address}
barbican_integration_enabled: true
horizon_version: ${_param:openstack_version}
horizon_identity_host: ${_param:openstack_control_address}
@@ -200,11 +201,11 @@ parameters:
{%- if 'aarch64' in nm.cluster.arch %}
repo:
armband_3: # Should be in sync with the repo config generated via curtin/MaaS
- source: "deb http://linux.enea.com/mcp-repos/${_param:armband_repo_version}/${_param:linux_system_codename} ${_param:armband_repo_version}-armband main"
+ source: "deb http://linux.enea.com/mcp-repos/${_param:armband_repo_version}/xenial ${_param:armband_repo_version}-armband main"
key: ${_param:armband_key}
pin:
- pin: 'release a=${_param:armband_repo_version}-armband'
- priority: 1201
+ priority: 15
package: '*'
{%- endif %}
kernel:
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_proxy.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_proxy.yml.j2
index cef23c9ee..31bfeddb4 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_proxy.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_proxy.yml.j2
@@ -95,5 +95,6 @@ parameters:
args: 'nginx'
apache:
server:
+ mod_wsgi: libapache2-mod-wsgi-py3
bind:
listen_default_ports: false
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_telemetry.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_telemetry.yml.j2
index 0ec9d25db..776e520d2 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_telemetry.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_telemetry.yml.j2
@@ -56,6 +56,7 @@ parameters:
cluster:
enabled: true
mode: sentinel
+ password: ${_param:opnfv_main_password}
role: ${_param:redis_cluster_role}
quorum: 2
master:
@@ -74,8 +75,7 @@ parameters:
- python-memcache
apache:
server:
- bind:
- listen_default_ports: false
+ mod_wsgi: libapache2-mod-wsgi-py3
~modules:
- rewrite
{%- if conf.MCP_VCP %} {#- wsgi module will be enabled by a different class inherited later #}
diff --git a/mcp/reclass/classes/cluster/mcp-common-noha/openstack_init.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-noha/openstack_init.yml.j2
index de54136a6..8fe37f9ef 100644
--- a/mcp/reclass/classes/cluster/mcp-common-noha/openstack_init.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-noha/openstack_init.yml.j2
@@ -84,11 +84,11 @@ parameters:
{%- if 'aarch64' in nm.cluster.arch %}
repo:
armband_3: # Should be in sync with the repo config generated via curtin/MaaS
- source: "deb http://linux.enea.com/mcp-repos/${_param:armband_repo_version}/${_param:linux_system_codename} ${_param:armband_repo_version}-armband main"
+ source: "deb http://linux.enea.com/mcp-repos/${_param:armband_repo_version}/xenial ${_param:armband_repo_version}-armband main"
key: ${_param:armband_key}
pin:
- pin: 'release a=${_param:armband_repo_version}-armband'
- priority: 1201
+ priority: 15
package: '*'
{%- endif %}
network:
diff --git a/mcp/reclass/classes/cluster/mcp-iec-noha/infra/kvm.yml.j2 b/mcp/reclass/classes/cluster/mcp-iec-noha/infra/kvm.yml.j2
index 7b8709c7d..95b39f637 100644
--- a/mcp/reclass/classes/cluster/mcp-iec-noha/infra/kvm.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-iec-noha/infra/kvm.yml.j2
@@ -19,7 +19,7 @@ classes:
- cluster.mcp-iec-noha.infra
parameters:
_param:
- linux_system_codename: xenial
+ linux_system_codename: bionic
linux:
network:
interface:
@@ -87,7 +87,7 @@ parameters:
name: ${_param:akraino_iec_node0{{ i }}_hostname}
provider: ${_param:infra_kvm_node0{{ i }}_hostname}.${_param:cluster_domain}
size: akraino.iec
- image: ${_param:salt_control_xenial_image}
+ image: ${_param:salt_control_bionic_image}
{%- if conf.nodes[nm.ctl01.idx].node.arch == 'aarch64' %}
machine: virt
cpu_mode: host-passthrough
diff --git a/mcp/reclass/classes/cluster/mcp-odl-ha/infra/kvm.yml.j2 b/mcp/reclass/classes/cluster/mcp-odl-ha/infra/kvm.yml.j2
index ef874cdb5..9ff091941 100644
--- a/mcp/reclass/classes/cluster/mcp-odl-ha/infra/kvm.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-odl-ha/infra/kvm.yml.j2
@@ -29,7 +29,7 @@ parameters:
odl0{{ i }}:
name: ${_param:opendaylight_server_node0{{ i }}_hostname}
provider: ${_param:infra_kvm_node0{{ i }}_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
+ image: ${_param:salt_control_bionic_image}
size: opendaylight.server
{%- if conf.nodes[nm.ctl01.idx].node.arch == 'aarch64' %}
machine: virt
diff --git a/mcp/reclass/classes/cluster/mcp-odl-ha/infra/maas.yml.j2 b/mcp/reclass/classes/cluster/mcp-odl-ha/infra/maas.yml.j2
index ff9eff551..3a87ab558 100644
--- a/mcp/reclass/classes/cluster/mcp-odl-ha/infra/maas.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-odl-ha/infra/maas.yml.j2
@@ -10,8 +10,3 @@
classes:
- cluster.mcp-odl-ha.infra
- cluster.all-mcp-arch-common.infra.maas
-{%- if 'aarch64' not in nm.cluster.arch %}
-parameters:
- _param:
- hwe_kernel: 'ga-16.04'
-{%- endif %}
diff --git a/mcp/reclass/classes/cluster/mcp-odl-ha/opendaylight/control.yml.j2 b/mcp/reclass/classes/cluster/mcp-odl-ha/opendaylight/control.yml.j2
index 531e01f92..23d1072d7 100644
--- a/mcp/reclass/classes/cluster/mcp-odl-ha/opendaylight/control.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-odl-ha/opendaylight/control.yml.j2
@@ -14,7 +14,7 @@ classes:
{%- endif %}
parameters:
_param:
- linux_system_codename: xenial
+ linux_system_codename: bionic
opendaylight:
server:
odl_bind_ip: ${_param:single_address}
diff --git a/mcp/reclass/nodes/cfg01.mcp-fdio-ha.local.yml b/mcp/reclass/nodes/cfg01.mcp-fdio-ha.local.yml
index defa03158..8a17ec1ab 100644
--- a/mcp/reclass/nodes/cfg01.mcp-fdio-ha.local.yml
+++ b/mcp/reclass/nodes/cfg01.mcp-fdio-ha.local.yml
@@ -10,7 +10,7 @@ classes:
- cluster.mcp-fdio-ha.infra.config
parameters:
_param:
- linux_system_codename: xenial
+ linux_system_codename: bionic
reclass_data_revision: master
linux:
system:
diff --git a/mcp/reclass/nodes/cfg01.mcp-odl-ha.local.yml b/mcp/reclass/nodes/cfg01.mcp-odl-ha.local.yml
index dbf3a4adf..f0d912e8b 100644
--- a/mcp/reclass/nodes/cfg01.mcp-odl-ha.local.yml
+++ b/mcp/reclass/nodes/cfg01.mcp-odl-ha.local.yml
@@ -10,7 +10,7 @@ classes:
- cluster.mcp-odl-ha.infra.config
parameters:
_param:
- linux_system_codename: xenial
+ linux_system_codename: bionic
reclass_data_revision: master
linux:
system:
diff --git a/mcp/reclass/nodes/cfg01.mcp-ovn-ha.local.yml b/mcp/reclass/nodes/cfg01.mcp-ovn-ha.local.yml
index 00fc19eb4..6d4a8bef3 100644
--- a/mcp/reclass/nodes/cfg01.mcp-ovn-ha.local.yml
+++ b/mcp/reclass/nodes/cfg01.mcp-ovn-ha.local.yml
@@ -10,7 +10,7 @@ classes:
- cluster.mcp-ovn-ha.infra.config
parameters:
_param:
- linux_system_codename: xenial
+ linux_system_codename: bionic
reclass_data_revision: master
linux:
system:
diff --git a/mcp/reclass/nodes/cfg01.mcp-ovs-dpdk-ha.local.yml b/mcp/reclass/nodes/cfg01.mcp-ovs-dpdk-ha.local.yml
index d51b66da5..9ad516f18 100644
--- a/mcp/reclass/nodes/cfg01.mcp-ovs-dpdk-ha.local.yml
+++ b/mcp/reclass/nodes/cfg01.mcp-ovs-dpdk-ha.local.yml
@@ -10,7 +10,7 @@ classes:
- cluster.mcp-ovs-dpdk-ha.infra.config
parameters:
_param:
- linux_system_codename: xenial
+ linux_system_codename: bionic
reclass_data_revision: master
linux:
system:
diff --git a/mcp/reclass/nodes/cfg01.mcp-ovs-ha.local.yml b/mcp/reclass/nodes/cfg01.mcp-ovs-ha.local.yml
index d5171277d..8f1cc2cd5 100644
--- a/mcp/reclass/nodes/cfg01.mcp-ovs-ha.local.yml
+++ b/mcp/reclass/nodes/cfg01.mcp-ovs-ha.local.yml
@@ -10,7 +10,7 @@ classes:
- cluster.mcp-ovs-ha.infra.config
parameters:
_param:
- linux_system_codename: xenial
+ linux_system_codename: bionic
reclass_data_revision: master
linux:
system:
diff --git a/mcp/scripts/pharos b/mcp/scripts/pharos
-Subproject 0a5938bca8b4c6b8f328bdbb1f9da35fd8da002
+Subproject 9b9235ea136928b8148c57b1c3f3ddee6f761e6