aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--deployed-server/deployed-server.yaml4
-rw-r--r--environments/major-upgrade-all-in-one.yaml8
-rw-r--r--extraconfig/tasks/major_upgrade_block_storage.sh13
-rw-r--r--extraconfig/tasks/major_upgrade_ceph_storage.sh18
-rw-r--r--extraconfig/tasks/major_upgrade_compute.sh15
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_2.sh13
-rw-r--r--extraconfig/tasks/major_upgrade_object_storage.sh15
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker.yaml6
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml13
-rwxr-xr-xextraconfig/tasks/pacemaker_common_functions.sh24
-rwxr-xr-xextraconfig/tasks/yum_update.sh13
-rw-r--r--extraconfig/tasks/yum_update.yaml7
-rwxr-xr-xnetwork/scripts/run-os-net-config.sh10
-rw-r--r--network/service_net_map.j2.yaml1
-rw-r--r--overcloud-resource-registry-puppet.j2.yaml1
-rw-r--r--overcloud.j2.yaml17
-rw-r--r--puppet/blockstorage-role.yaml45
-rw-r--r--puppet/cephstorage-role.yaml45
-rw-r--r--puppet/compute-role.yaml45
-rw-r--r--puppet/controller-role.yaml44
-rw-r--r--puppet/major_upgrade_steps.j2.yaml32
-rw-r--r--puppet/objectstorage-role.yaml45
-rw-r--r--puppet/role.role.j2.yaml53
-rw-r--r--puppet/services/ceilometer-base.yaml10
-rw-r--r--puppet/services/horizon.yaml1
-rw-r--r--puppet/services/neutron-sriov-agent.yaml5
-rw-r--r--puppet/services/panko-base.yaml1
-rw-r--r--puppet/services/rabbitmq.yaml4
28 files changed, 359 insertions, 149 deletions
diff --git a/deployed-server/deployed-server.yaml b/deployed-server/deployed-server.yaml
index 690c3f2f..e4f35507 100644
--- a/deployed-server/deployed-server.yaml
+++ b/deployed-server/deployed-server.yaml
@@ -49,7 +49,7 @@ resources:
InstanceIdConfig:
type: OS::Heat::StructuredConfig
properties:
- group: os-apply-config
+ group: apply-config
config:
instance-id: {get_resource: deployed-server}
@@ -67,7 +67,7 @@ resources:
#!/bin/bash
set -eux
mkdir -p $heat_outputs_path
- host=$(hostnamectl --transient)
+ host=$(hostname -s)
echo -n $host > $heat_outputs_path.hostname
cat $heat_outputs_path.hostname
outputs:
diff --git a/environments/major-upgrade-all-in-one.yaml b/environments/major-upgrade-all-in-one.yaml
new file mode 100644
index 00000000..69d72edd
--- /dev/null
+++ b/environments/major-upgrade-all-in-one.yaml
@@ -0,0 +1,8 @@
+# We run the upgrade steps without disabling the OS::TripleO::PostDeploySteps
+# this means you can do a major upgrade in one pass, which may be useful
+# e.g for all-in-one deployments where we can upgrade the compute services
+# at the same time as the controlplane
+# Note that it will be necessary to pass a mapping of OS::Heat::None again for
+# any subsequent updates, or the upgrade steps will run again.
+resource_registry:
+ OS::TripleO::UpgradeSteps: ../puppet/major_upgrade_steps.yaml
diff --git a/extraconfig/tasks/major_upgrade_block_storage.sh b/extraconfig/tasks/major_upgrade_block_storage.sh
index 39861826..64c4457e 100644
--- a/extraconfig/tasks/major_upgrade_block_storage.sh
+++ b/extraconfig/tasks/major_upgrade_block_storage.sh
@@ -5,18 +5,7 @@
set -eu
# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun option"
- rpm -U --replacepkgs --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
+special_case_ovs_upgrade_if_needed
yum -y install python-zaqarclient # needed for os-collect-config
yum -y -q update
diff --git a/extraconfig/tasks/major_upgrade_ceph_storage.sh b/extraconfig/tasks/major_upgrade_ceph_storage.sh
index d84cad45..59b7c20e 100644
--- a/extraconfig/tasks/major_upgrade_ceph_storage.sh
+++ b/extraconfig/tasks/major_upgrade_ceph_storage.sh
@@ -8,7 +8,9 @@ set -o pipefail
UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh
-cat > $UPGRADE_SCRIPT << 'ENDOFCAT'
+declare -f special_case_ovs_upgrade_if_needed > $UPGRADE_SCRIPT
+# use >> here so we don't lose the declaration we added above
+cat >> $UPGRADE_SCRIPT << 'ENDOFCAT'
#!/bin/bash
### DO NOT MODIFY THIS FILE
### This file is automatically delivered to the ceph-storage nodes as part of the
@@ -49,19 +51,7 @@ timeout 60 bash -c "while kill -0 ${OSD_PIDS} 2> /dev/null; do
sleep 2;
done"
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun option"
- rpm -U --replacepkgs --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
+special_case_ovs_upgrade_if_needed
# Update (Ceph to Jewel)
yum -y install python-zaqarclient # needed for os-collect-config
diff --git a/extraconfig/tasks/major_upgrade_compute.sh b/extraconfig/tasks/major_upgrade_compute.sh
index b0d42806..7a3e1073 100644
--- a/extraconfig/tasks/major_upgrade_compute.sh
+++ b/extraconfig/tasks/major_upgrade_compute.sh
@@ -18,19 +18,8 @@ set -eu
crudini --set /etc/nova/nova.conf upgrade_levels compute $upgrade_level_nova_compute
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n \$(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun option"
- rpm -U --replacepkgs --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
+$(declare -f special_case_ovs_upgrade_if_needed)
+special_case_ovs_upgrade_if_needed
yum -y install python-zaqarclient # needed for os-collect-config
yum -y update
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
index 7cc6735f..6bfe1239 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
@@ -100,18 +100,7 @@ if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
fi
# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun option"
- rpm -U --replacepkgs --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
+special_case_ovs_upgrade_if_needed
yum -y install python-zaqarclient # needed for os-collect-config
yum -y -q update
diff --git a/extraconfig/tasks/major_upgrade_object_storage.sh b/extraconfig/tasks/major_upgrade_object_storage.sh
index 2667bb16..d9d1b4d5 100644
--- a/extraconfig/tasks/major_upgrade_object_storage.sh
+++ b/extraconfig/tasks/major_upgrade_object_storage.sh
@@ -23,19 +23,8 @@ function systemctl_swift {
done
}
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n \$(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun option"
- rpm -U --replacepkgs --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
+$(declare -f special_case_ovs_upgrade_if_needed)
+special_case_ovs_upgrade_if_needed
systemctl_swift stop
diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml
index b0418a56..a175a423 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker.yaml
+++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml
@@ -97,7 +97,11 @@ resources:
depends_on: ControllerPacemakerUpgradeDeployment_Step1
properties:
group: script
- config: {get_file: major_upgrade_block_storage.sh}
+ config:
+ list_join:
+ - ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_block_storage.sh
BlockStorageUpgradeDeployment:
type: OS::Heat::SoftwareDeploymentGroup
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml b/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml
index f6aa3066..8e9cbdb4 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml
+++ b/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml
@@ -54,19 +54,28 @@ resources:
upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
params:
UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
+ - get_file: pacemaker_common_functions.sh
- get_file: major_upgrade_compute.sh
ObjectStorageDeliverUpgradeScriptConfig:
type: OS::Heat::SoftwareConfig
properties:
group: script
- config: {get_file: major_upgrade_object_storage.sh}
+ config:
+ list_join:
+ - ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_object_storage.sh
CephStorageDeliverUpgradeScriptConfig:
type: OS::Heat::SoftwareConfig
properties:
group: script
- config: {get_file: major_upgrade_ceph_storage.sh}
+ config:
+ list_join:
+ - ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_ceph_storage.sh
{% for role in roles %}
UpgradeInit{{role.name}}Deployment:
diff --git a/extraconfig/tasks/pacemaker_common_functions.sh b/extraconfig/tasks/pacemaker_common_functions.sh
index 2c7dfc35..aae4a2de 100755
--- a/extraconfig/tasks/pacemaker_common_functions.sh
+++ b/extraconfig/tasks/pacemaker_common_functions.sh
@@ -297,3 +297,27 @@ function systemctl_swift {
manage_systemd_service $action $service
done
}
+
+# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+function special_case_ovs_upgrade_if_needed {
+ if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
+ echo "Manual upgrade of openvswitch - restart in postun detected"
+ rm -rf OVS_UPGRADE
+ mkdir OVS_UPGRADE && pushd OVS_UPGRADE
+ echo "Attempting to downloading latest openvswitch with yumdownloader"
+ yumdownloader --resolve openvswitch
+ for pkg in $(ls -1 *.rpm); do
+ if rpm -U --test $pkg 2>&1 | grep "already installed" ; then
+ echo "Looks like newer version of $pkg is already installed, skipping"
+ else
+ echo "Updating $pkg with nopostun option"
+ rpm -U --replacepkgs --nopostun $pkg
+ fi
+ done
+ popd
+ else
+ echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
+ fi
+
+}
+
diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh
index 8a88ee64..74af7b02 100755
--- a/extraconfig/tasks/yum_update.sh
+++ b/extraconfig/tasks/yum_update.sh
@@ -63,18 +63,7 @@ if [[ "$pacemaker_status" == "active" && \
fi
# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun option"
- rpm -U --replacepkgs --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
+special_case_ovs_upgrade_if_needed
if [[ "$pacemaker_status" == "active" ]] ; then
echo "Pacemaker running, stopping cluster node and doing full package update"
diff --git a/extraconfig/tasks/yum_update.yaml b/extraconfig/tasks/yum_update.yaml
index d313ca9f..f2de5acf 100644
--- a/extraconfig/tasks/yum_update.yaml
+++ b/extraconfig/tasks/yum_update.yaml
@@ -9,7 +9,12 @@ resources:
type: OS::Heat::SoftwareConfig
properties:
group: script
- config: {get_file: yum_update.sh}
+ config:
+ list_join:
+ - ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: yum_update.sh
+
inputs:
- name: update_identifier
description: yum will only run for previously unused values of update_identifier
diff --git a/network/scripts/run-os-net-config.sh b/network/scripts/run-os-net-config.sh
index fc1e6d54..5df67b78 100755
--- a/network/scripts/run-os-net-config.sh
+++ b/network/scripts/run-os-net-config.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-# Note this script expects the following environment variables to be set
-# normally these are provided by the calling SoftwareConfig resource, but
-# they may also be set manually for testing
+# The following environment variables may be set to substitute in a
+# custom bridge or interface name. Normally these are provided by the calling
+# SoftwareConfig resource, but they may also be set manually for testing.
# $bridge_name : The bridge device name to apply
# $interface_name : The interface name to apply
#
@@ -113,8 +113,8 @@ if [ -n '$network_config' ]; then
mkdir -p /etc/os-net-config
# Note these variables come from the calling heat SoftwareConfig
echo '$network_config' > /etc/os-net-config/config.json
- sed -i "s/bridge_name/$bridge_name/" /etc/os-net-config/config.json
- sed -i "s/interface_name/$interface_name/" /etc/os-net-config/config.json
+ sed -i "s/bridge_name/${bridge_name:-''}/" /etc/os-net-config/config.json
+ sed -i "s/interface_name/${interface_name:-''}/" /etc/os-net-config/config.json
os-net-config -c /etc/os-net-config/config.json -v --detailed-exit-codes
RETVAL=$?
diff --git a/network/service_net_map.j2.yaml b/network/service_net_map.j2.yaml
index 0cb6571f..5991b3bc 100644
--- a/network/service_net_map.j2.yaml
+++ b/network/service_net_map.j2.yaml
@@ -59,6 +59,7 @@ parameters:
PublicNetwork: external
OpendaylightApiNetwork: internal_api
MistralApiNetwork: internal_api
+ ZaqarApiNetwork: internal_api
# We special-case the default ResolveNetwork for the CephStorage role
# for backwards compatibility, all other roles default to internal_api
CephStorageHostnameResolveNetwork: storage
diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml
index 77a48658..ebbeef6e 100644
--- a/overcloud-resource-registry-puppet.j2.yaml
+++ b/overcloud-resource-registry-puppet.j2.yaml
@@ -90,6 +90,7 @@ resource_registry:
OS::TripleO::Network::Ports::StorageVipPort: network/ports/noop.yaml
OS::TripleO::Network::Ports::StorageMgmtVipPort: network/ports/noop.yaml
OS::TripleO::Network::Ports::RedisVipPort: network/ports/ctlplane_vip.yaml
+ OS::TripleO::Network::Ports::ControlPlaneVipPort: OS::Neutron::Port
# Service to network Mappings
OS::TripleO::ServiceNetMap: network/service_net_map.yaml
diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml
index 39a092b1..f7e6f37f 100644
--- a/overcloud.j2.yaml
+++ b/overcloud.j2.yaml
@@ -477,7 +477,7 @@ resources:
type: OS::TripleO::Network
ControlVirtualIP:
- type: OS::Neutron::Port
+ type: OS::TripleO::Network::Ports::ControlPlaneVipPort
depends_on: Networks
properties:
name: control_virtual_ip
@@ -587,9 +587,9 @@ resources:
servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
{% endfor %}
- # Post deployment steps for all roles
- AllNodesDeploySteps:
- type: OS::TripleO::PostDeploySteps
+ # Upgrade steps for all roles
+ AllNodesUpgradeSteps:
+ type: OS::TripleO::UpgradeSteps
depends_on:
{% for role in roles %}
- {{role.name}}AllNodesDeployment
@@ -604,10 +604,10 @@ resources:
{{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
{% endfor %}
- # Upgrade steps for all roles
- AllNodesUpgradeSteps:
- type: OS::TripleO::UpgradeSteps
- depends_on: AllNodesDeploySteps
+ # Post deployment steps for all roles
+ AllNodesDeploySteps:
+ type: OS::TripleO::PostDeploySteps
+ depends_on: AllNodesUpgradeSteps
properties:
servers:
{% for role in roles %}
@@ -618,7 +618,6 @@ resources:
{{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
{% endfor %}
-
outputs:
ManagedEndpoints:
description: Asserts that the keystone endpoints have been provisioned.
diff --git a/puppet/blockstorage-role.yaml b/puppet/blockstorage-role.yaml
index 36587a41..7d1f8d8f 100644
--- a/puppet/blockstorage-role.yaml
+++ b/puppet/blockstorage-role.yaml
@@ -71,11 +71,20 @@ parameters:
description: >
The DNS domain used for the hosts. This should match the dhcp_domain
configured in the Undercloud neutron. Defaults to localdomain.
+ BlockStorageServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API. This option is
+ role-specific and is merged with the values given to the ServerMetadata
+ parameter.
+ type: json
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
- the overcloud. It's accessible via the Nova metadata API.
+ the overcloud. It's accessible via the Nova metadata API. This applies to
+ all roles and is merged with a role-specific metadata parameter.
type: json
BlockStorageSchedulerHints:
type: json
@@ -97,6 +106,12 @@ parameters:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
resources:
BlockStorage:
@@ -118,7 +133,10 @@ resources:
template: {get_param: Hostname}
params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
- metadata: {get_param: ServerMetadata}
+ metadata:
+ map_merge:
+ - {get_param: ServerMetadata}
+ - {get_param: BlockStorageServerMetadata}
scheduler_hints: {get_param: BlockStorageSchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -315,9 +333,30 @@ resources:
server: {get_resource: BlockStorage}
actions: {get_param: NetworkDeploymentActions}
+ BlockStorageUpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ BlockStorageUpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: BlockStorageUpgradeInitDeployment
+ server: {get_resource: BlockStorage}
+ config: {get_resource: BlockStorageUpgradeInitConfig}
+
BlockStorageDeployment:
type: OS::Heat::StructuredDeployment
- depends_on: NetworkDeployment
+ depends_on: BlockStorageUpgradeInitDeployment
properties:
name: BlockStorageDeployment
server: {get_resource: BlockStorage}
diff --git a/puppet/cephstorage-role.yaml b/puppet/cephstorage-role.yaml
index 558f97d8..2c46bf1a 100644
--- a/puppet/cephstorage-role.yaml
+++ b/puppet/cephstorage-role.yaml
@@ -77,11 +77,20 @@ parameters:
description: >
The DNS domain used for the hosts. This should match the dhcp_domain
configured in the Undercloud neutron. Defaults to localdomain.
+ CephStorageServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API. This option is
+ role-specific and is merged with the values given to the ServerMetadata
+ parameter.
+ type: json
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
- the overcloud. It's accessible via the Nova metadata API.
+ the overcloud. It's accessible via the Nova metadata API. This applies to
+ all roles and is merged with a role-specific metadata parameter.
type: json
CephStorageSchedulerHints:
type: json
@@ -103,6 +112,12 @@ parameters:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
resources:
CephStorage:
@@ -124,7 +139,10 @@ resources:
template: {get_param: Hostname}
params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
- metadata: {get_param: ServerMetadata}
+ metadata:
+ map_merge:
+ - {get_param: ServerMetadata}
+ - {get_param: CephStorageServerMetadata}
scheduler_hints: {get_param: CephStorageSchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -321,9 +339,30 @@ resources:
server: {get_resource: CephStorage}
actions: {get_param: NetworkDeploymentActions}
+ CephStorageUpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ CephStorageUpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: CephStorageUpgradeInitDeployment
+ server: {get_resource: CephStorage}
+ config: {get_resource: CephStorageUpgradeInitConfig}
+
CephStorageDeployment:
type: OS::Heat::StructuredDeployment
- depends_on: NetworkDeployment
+ depends_on: CephStorageUpgradeInitDeployment
properties:
name: CephStorageDeployment
config: {get_resource: CephStorageConfig}
diff --git a/puppet/compute-role.yaml b/puppet/compute-role.yaml
index 818f18c8..0a2598c1 100644
--- a/puppet/compute-role.yaml
+++ b/puppet/compute-role.yaml
@@ -92,11 +92,20 @@ parameters:
description: >
The DNS domain used for the hosts. This should match the dhcp_domain
configured in the Undercloud neutron. Defaults to localdomain.
+ NovaComputeServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API. This option is
+ role-specific and is merged with the values given to the ServerMetadata
+ parameter.
+ type: json
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
- the overcloud. It's accessible via the Nova metadata API.
+ the overcloud. It's accessible via the Nova metadata API. This applies to
+ all roles and is merged with a role-specific metadata parameter.
type: json
NovaComputeSchedulerHints:
type: json
@@ -115,6 +124,12 @@ parameters:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
resources:
@@ -138,7 +153,10 @@ resources:
template: {get_param: Hostname}
params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
- metadata: {get_param: ServerMetadata}
+ metadata:
+ map_merge:
+ - {get_param: ServerMetadata}
+ - {get_param: NovaComputeServerMetadata}
scheduler_hints: {get_param: NovaComputeSchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -338,6 +356,27 @@ resources:
bridge_name: {get_param: NeutronPhysicalBridge}
interface_name: {get_param: NeutronPublicInterface}
+ NovaComputeUpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ NovaComputeUpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: NovaComputeUpgradeInitDeployment
+ server: {get_resource: NovaCompute}
+ config: {get_resource: NovaComputeUpgradeInitConfig}
+
NovaComputeConfig:
type: OS::Heat::StructuredConfig
properties:
@@ -383,7 +422,7 @@ resources:
NovaComputeDeployment:
type: OS::TripleO::SoftwareDeployment
- depends_on: NetworkDeployment
+ depends_on: NovaComputeUpgradeInitDeployment
properties:
name: NovaComputeDeployment
config: {get_resource: NovaComputeConfig}
diff --git a/puppet/controller-role.yaml b/puppet/controller-role.yaml
index 2781daa0..5e03adcd 100644
--- a/puppet/controller-role.yaml
+++ b/puppet/controller-role.yaml
@@ -106,11 +106,20 @@ parameters:
description: >
The DNS domain used for the hosts. This should match the dhcp_domain
configured in the Undercloud neutron. Defaults to localdomain.
+ ControllerServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API. This option is
+ role-specific and is merged with the values given to the ServerMetadata
+ parameter.
+ type: json
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
- the overcloud. It's accessible via the Nova metadata API.
+ the overcloud. It's accessible via the Nova metadata API. This applies to
+ all roles and is merged with a role-specific metadata parameter.
type: json
ControllerSchedulerHints:
type: json
@@ -129,6 +138,12 @@ parameters:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
parameter_groups:
- label: deprecated
@@ -157,7 +172,10 @@ resources:
template: {get_param: Hostname}
params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
- metadata: {get_param: ServerMetadata}
+ metadata:
+ map_merge:
+ - {get_param: ServerMetadata}
+ - {get_param: ControllerServerMetadata}
scheduler_hints: {get_param: ControllerSchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -372,10 +390,30 @@ resources:
server: {get_resource: Controller}
NodeIndex: {get_param: NodeIndex}
+ ControllerUpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ ControllerUpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: ControllerUpgradeInitDeployment
+ server: {get_resource: Controller}
+ config: {get_resource: ControllerUpgradeInitConfig}
ControllerDeployment:
type: OS::TripleO::SoftwareDeployment
- depends_on: NetworkDeployment
+ depends_on: ControllerUpgradeInitDeployment
properties:
name: ControllerDeployment
config: {get_resource: ControllerConfig}
diff --git a/puppet/major_upgrade_steps.j2.yaml b/puppet/major_upgrade_steps.j2.yaml
index f8dad433..8d954c09 100644
--- a/puppet/major_upgrade_steps.j2.yaml
+++ b/puppet/major_upgrade_steps.j2.yaml
@@ -15,36 +15,8 @@ parameters:
Setting to a previously unused value during stack-update will trigger
the Upgrade resources to re-run on all roles.
- UpgradeInitCommand:
- type: string
- description: |
- Command or script snippet to run on all overcloud nodes to
- initialize the upgrade process. E.g. a repository switch.
- default: ''
-
resources:
- # For the UpgradeInit also rename /etc/resolv.conf.save for +bug/1567004
- UpgradeInitConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - "#!/bin/bash\n\n"
- - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
- - get_param: UpgradeInitCommand
-
-{% for role in roles %}
- {{role.name}}Upgrade_Init:
- type: OS::Heat::StructuredDeploymentGroup
- properties:
- name: {{role.name}}Upgrade_Init
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: UpgradeInitConfig}
-{% endfor %}
-
# Upgrade Steps for all roles
# FIXME(shardy): would be nice to make the number of steps configurable
{% for step in range(1, 8) %}
@@ -56,10 +28,8 @@ resources:
# serialization, but the event output is easier to follow if we
# do, and there should be minimal performance hit (creating the
# config is cheap compared to the time to apply the deployment).
+ {% if step > 1 %}
depends_on:
- {% if step == 1 %}
- - {{role.name}}Upgrade_Init
- {% else %}
{% for dep in roles %}
- {{dep.name}}Upgrade_Step{{step -1}}
{% endfor %}
diff --git a/puppet/objectstorage-role.yaml b/puppet/objectstorage-role.yaml
index 2c76492a..088a2e3d 100644
--- a/puppet/objectstorage-role.yaml
+++ b/puppet/objectstorage-role.yaml
@@ -71,11 +71,20 @@ parameters:
description: >
The DNS domain used for the hosts. This should match the dhcp_domain
configured in the Undercloud neutron. Defaults to localdomain.
+ SwiftStorageServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API. This option is
+ role-specific and is merged with the values given to the ServerMetadata
+ parameter.
+ type: json
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
- the overcloud. It's accessible via the Nova metadata API.
+ the overcloud. It's accessible via the Nova metadata API. This applies to
+ all roles and is merged with a role-specific metadata parameter.
type: json
ObjectStorageSchedulerHints:
type: json
@@ -97,6 +106,12 @@ parameters:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
resources:
@@ -118,7 +133,10 @@ resources:
template: {get_param: Hostname}
params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
- metadata: {get_param: ServerMetadata}
+ metadata:
+ map_merge:
+ - {get_param: ServerMetadata}
+ - {get_param: SwiftStorageServerMetadata}
scheduler_hints: {get_param: ObjectStorageSchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -315,6 +333,27 @@ resources:
server: {get_resource: SwiftStorage}
actions: {get_param: NetworkDeploymentActions}
+ SwiftStorageUpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ SwiftStorageUpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: SwiftStorageUpgradeInitDeployment
+ server: {get_resource: SwiftStorage}
+ config: {get_resource: SwiftStorageUpgradeInitConfig}
+
SwiftStorageHieraConfig:
type: OS::Heat::StructuredConfig
properties:
@@ -354,7 +393,7 @@ resources:
SwiftStorageHieraDeploy:
type: OS::Heat::StructuredDeployment
- depends_on: NetworkDeployment
+ depends_on: SwiftStorageUpgradeInitDeployment
properties:
name: SwiftStorageHieraDeploy
server: {get_resource: SwiftStorage}
diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml
index 9726d978..acb7677a 100644
--- a/puppet/role.role.j2.yaml
+++ b/puppet/role.role.j2.yaml
@@ -28,6 +28,10 @@ parameters:
constraints:
- custom_constraint: nova.keypair
{% endif %}
+ NeutronPublicInterface:
+ default: nic1
+ description: What interface to bridge onto br-ex for network nodes.
+ type: string
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
@@ -83,11 +87,20 @@ parameters:
description: >
The DNS domain used for the hosts. This should match the dhcp_domain
configured in the Undercloud neutron. Defaults to localdomain.
+ {{role}}ServerMetadata:
+ default: {}
+ description: >
+ Extra properties or metadata passed to Nova for the created nodes in
+ the overcloud. It's accessible via the Nova metadata API. This option is
+ role-specific and is merged with the values given to the ServerMetadata
+ parameter.
+ type: json
ServerMetadata:
default: {}
description: >
Extra properties or metadata passed to Nova for the created nodes in
- the overcloud. It's accessible via the Nova metadata API.
+ the overcloud. It's accessible via the Nova metadata API. This applies to
+ all roles and is merged with a role-specific metadata parameter.
type: json
{{role}}SchedulerHints:
type: json
@@ -115,6 +128,13 @@ parameters:
LoggingGroups:
type: comma_delimited_list
default: []
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
+
resources:
{{role}}:
@@ -136,7 +156,10 @@ resources:
template: {get_param: Hostname}
params: {get_param: HostnameMap}
software_config_transport: {get_param: SoftwareConfigTransport}
- metadata: {get_param: ServerMetadata}
+ metadata:
+ map_merge:
+ - {get_param: ServerMetadata}
+ - {get_param: {{role}}ServerMetadata}
scheduler_hints: {get_param: {{role}}SchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -332,10 +355,34 @@ resources:
config: {get_resource: NetworkConfig}
server: {get_resource: {{role}}}
actions: {get_param: NetworkDeploymentActions}
+ input_values:
+ bridge_name: br-ex
+ interface_name: {get_param: NeutronPublicInterface}
+
+ {{role}}UpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ {{role}}UpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: {{role}}UpgradeInitDeployment
+ server: {get_resource: {{role}}}
+ config: {get_resource: {{role}}UpgradeInitConfig}
{{role}}Deployment:
type: OS::Heat::StructuredDeployment
- depends_on: NetworkDeployment
+ depends_on: {{role}}UpgradeInitDeployment
properties:
name: {{role}}Deployment
config: {get_resource: {{role}}Config}
diff --git a/puppet/services/ceilometer-base.yaml b/puppet/services/ceilometer-base.yaml
index 060ae32d..24c71cbb 100644
--- a/puppet/services/ceilometer-base.yaml
+++ b/puppet/services/ceilometer-base.yaml
@@ -50,6 +50,14 @@ parameters:
default: false
description: Whether to store events in ceilometer.
type: boolean
+ EnableLegacyCeilometerApi:
+ default: false
+ description: Enable legacy ceilometer Api service if needed.
+ type: boolean
+ EventPipelinePublishers:
+ default: ['notifier://?topic=alarm.all']
+ description: A list of publishers to put in event_pipeline.yaml.
+ type: comma_delimited_list
Debug:
default: ''
description: Set to True to enable debugging on all services.
@@ -93,6 +101,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ceilometer'
+ enable_legacy_ceilometer_api: {get_param: EnableLegacyCeilometerApi}
ceilometer_backend: {get_param: CeilometerBackend}
ceilometer::metering_secret: {get_param: CeilometerMeteringSecret}
# we include db_sync class in puppet-tripleo
@@ -104,6 +113,7 @@ outputs:
ceilometer::agent::auth::auth_password: {get_param: CeilometerPassword}
ceilometer::agent::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ceilometer::agent::notification::store_events: {get_param: CeilometerStoreEvents}
+ ceilometer::agent::notification::event_pipeline_publishers: {get_param: EventPipelinePublishers}
ceilometer::agent::auth::auth_region: {get_param: KeystoneRegion}
ceilometer::agent::auth::auth_tenant_name: 'service'
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
diff --git a/puppet/services/horizon.yaml b/puppet/services/horizon.yaml
index 8eaf4044..e59dc202 100644
--- a/puppet/services/horizon.yaml
+++ b/puppet/services/horizon.yaml
@@ -58,6 +58,7 @@ outputs:
dport:
- 80
- 443
+ horizon::enable_secure_proxy_ssl_header: true
horizon::disable_password_reveal: true
horizon::enforce_password_check: true
horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache
diff --git a/puppet/services/neutron-sriov-agent.yaml b/puppet/services/neutron-sriov-agent.yaml
index 44f7f242..0ab066d7 100644
--- a/puppet/services/neutron-sriov-agent.yaml
+++ b/puppet/services/neutron-sriov-agent.yaml
@@ -25,6 +25,7 @@ parameters:
All physical networks listed in network_vlan_ranges
on the server should have mappings to appropriate
interfaces on each agent.
+ Example "tenant0:ens2f0,tenant1:ens2f1"
type: comma_delimited_list
default: ""
NeutronExcludeDevices:
@@ -40,8 +41,8 @@ parameters:
NeutronSriovNumVFs:
description: >
Provide the list of VFs to be reserved for each SR-IOV interface.
- Format "<interface_name1>:<numvfs1>","<interface_name2>:<numvfs2>"
- Example "eth1:4096","eth2:128"
+ Format "<interface_name1>:<numvfs1>,<interface_name2>:<numvfs2>"
+ Example "eth1:4096,eth2:128"
type: comma_delimited_list
default: ""
diff --git a/puppet/services/panko-base.yaml b/puppet/services/panko-base.yaml
index 32754a55..af9c5353 100644
--- a/puppet/services/panko-base.yaml
+++ b/puppet/services/panko-base.yaml
@@ -37,7 +37,6 @@ outputs:
value:
service_name: panko_base
config_settings:
- panko_redis_password: {get_param: RedisPassword}
panko::db::database_connection:
list_join:
- ''
diff --git a/puppet/services/rabbitmq.yaml b/puppet/services/rabbitmq.yaml
index b77e0a91..08f3f6bc 100644
--- a/puppet/services/rabbitmq.yaml
+++ b/puppet/services/rabbitmq.yaml
@@ -73,6 +73,8 @@ outputs:
rabbitmq::repos_ensure: false
rabbitmq::tcp_keepalive: true
rabbitmq_environment:
+ NODE_PORT: ''
+ NODE_IP_ADDRESS: ''
RABBITMQ_NODENAME: "rabbit@%{::hostname}"
RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
rabbitmq_kernel_variables:
@@ -95,7 +97,7 @@ outputs:
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- rabbitmq::node_ip_address: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ rabbitmq::interface: {get_param: [ServiceNetMap, RabbitmqNetwork]}
rabbitmq::nr_ha_queues: {get_param: RabbitHAQueues}
step_config: |
include ::tripleo::profile::base::rabbitmq