aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ci/environments/scenario002-multinode.yaml1
-rw-r--r--ci/environments/scenario003-multinode.yaml1
-rw-r--r--ci/environments/scenario004-multinode.yaml1
-rw-r--r--deployed-server/ctlplane-port.yaml23
-rw-r--r--deployed-server/deployed-neutron-port.yaml67
-rw-r--r--deployed-server/deployed-server-config.yaml22
-rw-r--r--deployed-server/deployed-server.yaml28
-rwxr-xr-xdeployed-server/scripts/get-occ-config.sh13
-rw-r--r--environments/deployed-server-environment.yaml3
-rw-r--r--environments/deployed-server-noop-ctlplane.yaml3
-rw-r--r--environments/major-upgrade-all-in-one.yaml8
-rw-r--r--environments/tls-endpoints-public-dns.yaml6
-rw-r--r--environments/tls-endpoints-public-ip.yaml6
-rw-r--r--environments/tls-everywhere-endpoints-dns.yaml6
-rw-r--r--extraconfig/tasks/major_upgrade_block_storage.sh13
-rw-r--r--extraconfig/tasks/major_upgrade_ceph_storage.sh20
-rw-r--r--extraconfig/tasks/major_upgrade_compute.sh15
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_2.sh13
-rw-r--r--extraconfig/tasks/major_upgrade_object_storage.sh15
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker.yaml6
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml13
-rwxr-xr-xextraconfig/tasks/pacemaker_common_functions.sh24
-rwxr-xr-xextraconfig/tasks/yum_update.sh13
-rw-r--r--extraconfig/tasks/yum_update.yaml7
-rw-r--r--network/endpoints/endpoint_map.yaml6
-rw-r--r--network/ports/net_ip_list_map.yaml17
-rwxr-xr-xnetwork/scripts/run-os-net-config.sh10
-rw-r--r--overcloud-resource-registry-puppet.j2.yaml2
-rw-r--r--overcloud.j2.yaml24
-rw-r--r--puppet/all-nodes-config.yaml3
-rw-r--r--puppet/blockstorage-role.yaml33
-rw-r--r--puppet/cephstorage-role.yaml33
-rw-r--r--puppet/compute-role.yaml33
-rw-r--r--puppet/controller-role.yaml32
-rw-r--r--puppet/extraconfig/tls/freeipa-enroll.yaml21
-rw-r--r--puppet/major_upgrade_steps.j2.yaml32
-rw-r--r--puppet/objectstorage-role.yaml33
-rw-r--r--puppet/role.role.j2.yaml41
-rw-r--r--puppet/services/README.rst14
-rw-r--r--puppet/services/ceilometer-base.yaml10
-rw-r--r--puppet/services/opendaylight-ovs.yaml11
-rw-r--r--puppet/services/panko-base.yaml1
-rw-r--r--puppet/services/services.yaml6
-rw-r--r--puppet/services/swift-proxy.yaml64
44 files changed, 512 insertions, 241 deletions
diff --git a/ci/environments/scenario002-multinode.yaml b/ci/environments/scenario002-multinode.yaml
index 97fec24c..7875ef4e 100644
--- a/ci/environments/scenario002-multinode.yaml
+++ b/ci/environments/scenario002-multinode.yaml
@@ -49,3 +49,4 @@ parameter_defaults:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
Debug: true
+ SwiftCeilometerPipelineEnabled: false
diff --git a/ci/environments/scenario003-multinode.yaml b/ci/environments/scenario003-multinode.yaml
index 092426cb..26f94d03 100644
--- a/ci/environments/scenario003-multinode.yaml
+++ b/ci/environments/scenario003-multinode.yaml
@@ -50,3 +50,4 @@ parameter_defaults:
# we don't deploy Swift so we switch to file backend.
GlanceBackend: 'file'
KeystoneTokenProvider: 'fernet'
+ SwiftCeilometerPipelineEnabled: false
diff --git a/ci/environments/scenario004-multinode.yaml b/ci/environments/scenario004-multinode.yaml
index 4aa18709..0d94cea0 100644
--- a/ci/environments/scenario004-multinode.yaml
+++ b/ci/environments/scenario004-multinode.yaml
@@ -60,3 +60,4 @@ parameter_defaults:
CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
+ SwiftCeilometerPipelineEnabled: false
diff --git a/deployed-server/ctlplane-port.yaml b/deployed-server/ctlplane-port.yaml
deleted file mode 100644
index eb10fba0..00000000
--- a/deployed-server/ctlplane-port.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-heat_template_version: 2014-10-16
-
-parameters:
- Hostname:
- type: string
-
-resources:
-
- ControlPlanePort:
- type: OS::Neutron::Port
- properties:
- network: ctlplane
- name:
- list_join:
- - '-'
- - - {get_param: Hostname}
- - ctlplane
- - port
- replacement_policy: AUTO
-
-outputs:
- ip_address:
- value: {get_attr: [ControlPlanePort, fixed_ips, 0, ip_address]}
diff --git a/deployed-server/deployed-neutron-port.yaml b/deployed-server/deployed-neutron-port.yaml
new file mode 100644
index 00000000..7855be89
--- /dev/null
+++ b/deployed-server/deployed-neutron-port.yaml
@@ -0,0 +1,67 @@
+heat_template_version: 2016-10-14
+
+description: "
+ A fake OS::Neutron::Port stack which outputs fixed_ips and subnets based on
+ the input from the DeployedServerPortMap (set via parameter_defaults). This
+ lookup requires the use of port naming conventions. In order for this to work
+ with deployed-server the keys should be <hostname>-<network>.
+ Example:
+ parameter_defaults:
+ DeployedServerPortMap:
+ gatsby_ctlplane:
+ fixed_ips:
+ - ip_address: 127.0.0.1
+ subnets:
+ - cidr: 24"
+
+parameters:
+ name:
+ default: ''
+ type: string
+ network:
+ default: ''
+ type: string
+ fixed_ips:
+ default: ''
+ type: comma_delimited_list
+ replacement_policy:
+ default: ''
+ type: string
+ DeployedServerPortMap:
+ default: {}
+ type: json
+
+
+outputs:
+ fixed_ips:
+ value:
+ {get_param: [DeployedServerPortMap, {get_param: name}, fixed_ips]}
+ subnets:
+ value:
+ {get_param: [DeployedServerPortMap, {get_param: name}, subnets]}
+ name:
+ value: {get_param: name}
+ status:
+ value: DOWN
+ allowed_address_pairs:
+ value: {}
+ device_id:
+ value: ''
+ device_owner:
+ value: {get_param: network}
+ dns_assignment:
+ value: ''
+ port_security_enabled:
+ value: False
+ admin_state_up:
+ value: False
+ security_groups:
+ value: {}
+ network_id:
+ value: ''
+ tenant_id:
+ value: ''
+ qos_policy_id:
+ value: ''
+ mac_address:
+ value: ''
diff --git a/deployed-server/deployed-server-config.yaml b/deployed-server/deployed-server-config.yaml
deleted file mode 100644
index 8c59dc72..00000000
--- a/deployed-server/deployed-server-config.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-heat_template_version: 2014-10-16
-parameters:
- user_data_format:
- type: string
- default: SOFTWARE_CONFIG
-
-resources:
- # We just need something which returns a unique ID, but we can't
- # use RandomString because RefId returns the value, not the physical
- # resource ID, SoftwareConfig should work as it returns a UUID
- deployed-server-config:
- type: OS::Heat::SoftwareConfig
-
-outputs:
- # FIXME(shardy) this is needed because TemplateResource returns an
- # ARN not a UUID, which overflows the Deployment server_id column..
- user_data_format:
- value: SOFTWARE_CONFIG
- OS::stack_id:
- value: {get_resource: deployed-server-config}
-
-
diff --git a/deployed-server/deployed-server.yaml b/deployed-server/deployed-server.yaml
index 73b71588..e4f35507 100644
--- a/deployed-server/deployed-server.yaml
+++ b/deployed-server/deployed-server.yaml
@@ -21,7 +21,7 @@ parameters:
default: ''
name:
type: string
- default: ''
+ default: 'deployed-server'
image_update_policy:
type: string
default: ''
@@ -40,20 +40,18 @@ parameters:
default: {}
resources:
- # We just need something which returns a unique ID, but we can't
- # use RandomString because RefId returns the value, not the physical
- # resource ID, SoftwareConfig should work as it returns a UUID
deployed-server:
- type: OS::TripleO::DeployedServerConfig
+ type: OS::Heat::DeployedServer
properties:
- user_data_format: SOFTWARE_CONFIG
+ name: {get_param: name}
+ software_config_transport: {get_param: software_config_transport}
InstanceIdConfig:
type: OS::Heat::StructuredConfig
properties:
group: apply-config
config:
- instance-id: {get_attr: [deployed-server, "OS::stack_id"]}
+ instance-id: {get_resource: deployed-server}
InstanceIdDeployment:
type: OS::Heat::StructuredDeployment
@@ -82,19 +80,23 @@ resources:
config: {get_resource: HostsEntryConfig}
server: {get_resource: deployed-server}
- ControlPlanePort:
+ ControlPlanePortImpl:
type: OS::TripleO::DeployedServer::ControlPlanePort
properties:
- Hostname: {get_attr: [HostsEntryDeployment, hostname]}
+ network: ctlplane
+ name:
+ list_join:
+ - '-'
+ - - {get_attr: [HostsEntryDeployment, hostname]}
+ - ctlplane
+ replacement_policy: AUTO
outputs:
- # FIXME(shardy) this is needed because TemplateResource returns an
- # ARN not a UUID, which overflows the Deployment server_id column..
OS::stack_id:
- value: {get_attr: [deployed-server, "OS::stack_id"]}
+ value: {get_resource: deployed-server}
networks:
value:
ctlplane:
- - {get_attr: [ControlPlanePort, ip_address]}
+ - {get_attr: [ControlPlanePortImpl, fixed_ips, 0, ip_address]}
name:
value: {get_attr: [HostsEntryDeployment, hostname]}
diff --git a/deployed-server/scripts/get-occ-config.sh b/deployed-server/scripts/get-occ-config.sh
index c3ce7183..404244b1 100755
--- a/deployed-server/scripts/get-occ-config.sh
+++ b/deployed-server/scripts/get-occ-config.sh
@@ -79,24 +79,19 @@ for role in $OVERCLOUD_ROLES; do
server_stack=$(openstack stack resource show $stack $server_resource_name -c physical_resource_id -f value)
done
- deployed_server_stack=$(openstack stack resource show $server_stack deployed-server -c physical_resource_id -f value)
+ deployed_server_metadata_url=$(openstack stack resource metadata $server_stack deployed-server | jq -r '.["os-collect-config"].request.metadata_url')
echo "======================"
echo "$role$i os-collect-config.conf configuration:"
config="
[DEFAULT]
-collectors=heat
+collectors=request
command=os-refresh-config
polling_interval=30
-[heat]
-user_id=$admin_user_id
-password=$OS_PASSWORD
-auth_url=$OS_AUTH_URL
-project_id=$admin_project_id
-stack_id=$deployed_server_stack
-resource_name=deployed-server-config"
+[request]
+metadata_url=$deployed_server_metadata_url"
echo "$config"
echo "======================"
diff --git a/environments/deployed-server-environment.yaml b/environments/deployed-server-environment.yaml
index c63d399a..7a6639f9 100644
--- a/environments/deployed-server-environment.yaml
+++ b/environments/deployed-server-environment.yaml
@@ -1,4 +1,3 @@
resource_registry:
OS::TripleO::Server: ../deployed-server/deployed-server.yaml
- OS::TripleO::DeployedServerConfig: ../deployed-server/deployed-server-config.yaml
- OS::TripleO::DeployedServer::ControlPlanePort: ../deployed-server/ctlplane-port.yaml
+ OS::TripleO::DeployedServer::ControlPlanePort: OS::Neutron::Port
diff --git a/environments/deployed-server-noop-ctlplane.yaml b/environments/deployed-server-noop-ctlplane.yaml
index cfda314d..54f5e41d 100644
--- a/environments/deployed-server-noop-ctlplane.yaml
+++ b/environments/deployed-server-noop-ctlplane.yaml
@@ -1,4 +1,3 @@
resource_registry:
OS::TripleO::Server: ../deployed-server/deployed-server.yaml
- OS::TripleO::DeployedServerConfig: ../deployed-server/deployed-server-config.yaml
- OS::TripleO::DeployedServer::ControlPlanePort: OS::Heat::None
+ OS::TripleO::DeployedServer::ControlPlanePort: ../deployed-server/deployed-neutron-port.yaml
diff --git a/environments/major-upgrade-all-in-one.yaml b/environments/major-upgrade-all-in-one.yaml
new file mode 100644
index 00000000..69d72edd
--- /dev/null
+++ b/environments/major-upgrade-all-in-one.yaml
@@ -0,0 +1,8 @@
+# We run the upgrade steps without disabling the OS::TripleO::PostDeploySteps
+# this means you can do a major upgrade in one pass, which may be useful
+# e.g for all-in-one deployments where we can upgrade the compute services
+# at the same time as the controlplane
+# Note that it will be necessary to pass a mapping of OS::Heat::None again for
+# any subsequent updates, or the upgrade steps will run again.
+resource_registry:
+ OS::TripleO::UpgradeSteps: ../puppet/major_upgrade_steps.yaml
diff --git a/environments/tls-endpoints-public-dns.yaml b/environments/tls-endpoints-public-dns.yaml
index e91c7bc3..fb66b38a 100644
--- a/environments/tls-endpoints-public-dns.yaml
+++ b/environments/tls-endpoints-public-dns.yaml
@@ -65,6 +65,6 @@ parameter_defaults:
ZaqarAdmin: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
ZaqarInternal: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
ZaqarPublic: {protocol: 'https', port: '13888', host: 'CLOUDNAME'}
- ZaqarWebSocketAdmin: {protocol: 'http', port: '9000', host: 'IP_ADDRESS'}
- ZaqarWebSocketInternal: {protocol: 'http', port: '9000', host: 'IP_ADDRESS'}
- ZaqarWebSocketPublic: {protocol: 'https', port: '9000', host: 'CLOUDNAME'}
+ ZaqarWebSocketAdmin: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketInternal: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
diff --git a/environments/tls-endpoints-public-ip.yaml b/environments/tls-endpoints-public-ip.yaml
index c9096f44..6586a547 100644
--- a/environments/tls-endpoints-public-ip.yaml
+++ b/environments/tls-endpoints-public-ip.yaml
@@ -65,6 +65,6 @@ parameter_defaults:
ZaqarAdmin: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
ZaqarInternal: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
ZaqarPublic: {protocol: 'https', port: '13888', host: 'IP_ADDRESS'}
- ZaqarWebSocketAdmin: {protocol: 'http', port: '9000', host: 'IP_ADDRESS'}
- ZaqarWebSocketInternal: {protocol: 'http', port: '9000', host: 'IP_ADDRESS'}
- ZaqarWebSocketPublic: {protocol: 'https', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketAdmin: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketInternal: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'IP_ADDRESS'}
diff --git a/environments/tls-everywhere-endpoints-dns.yaml b/environments/tls-everywhere-endpoints-dns.yaml
index 365b0a54..ebb491f0 100644
--- a/environments/tls-everywhere-endpoints-dns.yaml
+++ b/environments/tls-everywhere-endpoints-dns.yaml
@@ -65,6 +65,6 @@ parameter_defaults:
ZaqarAdmin: {protocol: 'https', port: '8888', host: 'CLOUDNAME'}
ZaqarInternal: {protocol: 'https', port: '8888', host: 'CLOUDNAME'}
ZaqarPublic: {protocol: 'https', port: '13888', host: 'CLOUDNAME'}
- ZaqarWebSocketAdmin: {protocol: 'https', port: '9000', host: 'CLOUDNAME'}
- ZaqarWebSocketInternal: {protocol: 'https', port: '9000', host: 'CLOUDNAME'}
- ZaqarWebSocketPublic: {protocol: 'https', port: '9000', host: 'CLOUDNAME'}
+ ZaqarWebSocketAdmin: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+ ZaqarWebSocketInternal: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
diff --git a/extraconfig/tasks/major_upgrade_block_storage.sh b/extraconfig/tasks/major_upgrade_block_storage.sh
index 39861826..64c4457e 100644
--- a/extraconfig/tasks/major_upgrade_block_storage.sh
+++ b/extraconfig/tasks/major_upgrade_block_storage.sh
@@ -5,18 +5,7 @@
set -eu
# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun option"
- rpm -U --replacepkgs --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
+special_case_ovs_upgrade_if_needed
yum -y install python-zaqarclient # needed for os-collect-config
yum -y -q update
diff --git a/extraconfig/tasks/major_upgrade_ceph_storage.sh b/extraconfig/tasks/major_upgrade_ceph_storage.sh
index d84cad45..a745e723 100644
--- a/extraconfig/tasks/major_upgrade_ceph_storage.sh
+++ b/extraconfig/tasks/major_upgrade_ceph_storage.sh
@@ -8,7 +8,9 @@ set -o pipefail
UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh
-cat > $UPGRADE_SCRIPT << 'ENDOFCAT'
+declare -f special_case_ovs_upgrade_if_needed > $UPGRADE_SCRIPT
+# use >> here so we don't lose the declaration we added above
+cat >> $UPGRADE_SCRIPT << 'ENDOFCAT'
#!/bin/bash
### DO NOT MODIFY THIS FILE
### This file is automatically delivered to the ceph-storage nodes as part of the
@@ -49,19 +51,7 @@ timeout 60 bash -c "while kill -0 ${OSD_PIDS} 2> /dev/null; do
sleep 2;
done"
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun option"
- rpm -U --replacepkgs --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
+special_case_ovs_upgrade_if_needed
# Update (Ceph to Jewel)
yum -y install python-zaqarclient # needed for os-collect-config
@@ -86,7 +76,7 @@ elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then
# If on ext4, we need to enforce lower values for name and namespace len
# or ceph-osd will refuse to start, see: http://tracker.ceph.com/issues/16187
for OSD_ID in $OSD_IDS; do
- OSD_FS=$(findmnt -n -o FSTYPE -T /var/lib/ceph/osd/ceph-${OSD_ID})
+ OSD_FS=$(df -l --output=fstype /var/lib/ceph/osd/ceph-${OSD_ID} | tail -n +2)
if [ ${OSD_FS} = ext4 ]; then
crudini --set /etc/ceph/ceph.conf global osd_max_object_name_len 256
crudini --set /etc/ceph/ceph.conf global osd_max_object_namespace_len 64
diff --git a/extraconfig/tasks/major_upgrade_compute.sh b/extraconfig/tasks/major_upgrade_compute.sh
index b0d42806..7a3e1073 100644
--- a/extraconfig/tasks/major_upgrade_compute.sh
+++ b/extraconfig/tasks/major_upgrade_compute.sh
@@ -18,19 +18,8 @@ set -eu
crudini --set /etc/nova/nova.conf upgrade_levels compute $upgrade_level_nova_compute
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n \$(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun option"
- rpm -U --replacepkgs --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
+$(declare -f special_case_ovs_upgrade_if_needed)
+special_case_ovs_upgrade_if_needed
yum -y install python-zaqarclient # needed for os-collect-config
yum -y update
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
index 7cc6735f..6bfe1239 100755
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
+++ b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
@@ -100,18 +100,7 @@ if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
fi
# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun option"
- rpm -U --replacepkgs --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
+special_case_ovs_upgrade_if_needed
yum -y install python-zaqarclient # needed for os-collect-config
yum -y -q update
diff --git a/extraconfig/tasks/major_upgrade_object_storage.sh b/extraconfig/tasks/major_upgrade_object_storage.sh
index 2667bb16..d9d1b4d5 100644
--- a/extraconfig/tasks/major_upgrade_object_storage.sh
+++ b/extraconfig/tasks/major_upgrade_object_storage.sh
@@ -23,19 +23,8 @@ function systemctl_swift {
done
}
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n \$(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun option"
- rpm -U --replacepkgs --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
+$(declare -f special_case_ovs_upgrade_if_needed)
+special_case_ovs_upgrade_if_needed
systemctl_swift stop
diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml
index b0418a56..a175a423 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker.yaml
+++ b/extraconfig/tasks/major_upgrade_pacemaker.yaml
@@ -97,7 +97,11 @@ resources:
depends_on: ControllerPacemakerUpgradeDeployment_Step1
properties:
group: script
- config: {get_file: major_upgrade_block_storage.sh}
+ config:
+ list_join:
+ - ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_block_storage.sh
BlockStorageUpgradeDeployment:
type: OS::Heat::SoftwareDeploymentGroup
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml b/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml
index f6aa3066..8e9cbdb4 100644
--- a/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml
+++ b/extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml
@@ -54,19 +54,28 @@ resources:
upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
params:
UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
+ - get_file: pacemaker_common_functions.sh
- get_file: major_upgrade_compute.sh
ObjectStorageDeliverUpgradeScriptConfig:
type: OS::Heat::SoftwareConfig
properties:
group: script
- config: {get_file: major_upgrade_object_storage.sh}
+ config:
+ list_join:
+ - ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_object_storage.sh
CephStorageDeliverUpgradeScriptConfig:
type: OS::Heat::SoftwareConfig
properties:
group: script
- config: {get_file: major_upgrade_ceph_storage.sh}
+ config:
+ list_join:
+ - ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: major_upgrade_ceph_storage.sh
{% for role in roles %}
UpgradeInit{{role.name}}Deployment:
diff --git a/extraconfig/tasks/pacemaker_common_functions.sh b/extraconfig/tasks/pacemaker_common_functions.sh
index 2c7dfc35..aae4a2de 100755
--- a/extraconfig/tasks/pacemaker_common_functions.sh
+++ b/extraconfig/tasks/pacemaker_common_functions.sh
@@ -297,3 +297,27 @@ function systemctl_swift {
manage_systemd_service $action $service
done
}
+
+# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+function special_case_ovs_upgrade_if_needed {
+ if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
+ echo "Manual upgrade of openvswitch - restart in postun detected"
+ rm -rf OVS_UPGRADE
+ mkdir OVS_UPGRADE && pushd OVS_UPGRADE
+ echo "Attempting to downloading latest openvswitch with yumdownloader"
+ yumdownloader --resolve openvswitch
+ for pkg in $(ls -1 *.rpm); do
+ if rpm -U --test $pkg 2>&1 | grep "already installed" ; then
+ echo "Looks like newer version of $pkg is already installed, skipping"
+ else
+ echo "Updating $pkg with nopostun option"
+ rpm -U --replacepkgs --nopostun $pkg
+ fi
+ done
+ popd
+ else
+ echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
+ fi
+
+}
+
diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh
index 8a88ee64..74af7b02 100755
--- a/extraconfig/tasks/yum_update.sh
+++ b/extraconfig/tasks/yum_update.sh
@@ -63,18 +63,7 @@ if [[ "$pacemaker_status" == "active" && \
fi
# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
- mkdir OVS_UPGRADE || true
- pushd OVS_UPGRADE
- echo "Attempting to downloading latest openvswitch with yumdownloader"
- yumdownloader --resolve openvswitch
- echo "Updating openvswitch with nopostun option"
- rpm -U --replacepkgs --nopostun ./*.rpm
- popd
-else
- echo "Skipping manual upgrade of openvswitch - no restart in postun detected"
-fi
+special_case_ovs_upgrade_if_needed
if [[ "$pacemaker_status" == "active" ]] ; then
echo "Pacemaker running, stopping cluster node and doing full package update"
diff --git a/extraconfig/tasks/yum_update.yaml b/extraconfig/tasks/yum_update.yaml
index d313ca9f..f2de5acf 100644
--- a/extraconfig/tasks/yum_update.yaml
+++ b/extraconfig/tasks/yum_update.yaml
@@ -9,7 +9,12 @@ resources:
type: OS::Heat::SoftwareConfig
properties:
group: script
- config: {get_file: yum_update.sh}
+ config:
+ list_join:
+ - ''
+ - - get_file: pacemaker_common_functions.sh
+ - get_file: yum_update.sh
+
inputs:
- name: update_identifier
description: yum will only run for previously unused values of update_identifier
diff --git a/network/endpoints/endpoint_map.yaml b/network/endpoints/endpoint_map.yaml
index 7ebb318f..95791677 100644
--- a/network/endpoints/endpoint_map.yaml
+++ b/network/endpoints/endpoint_map.yaml
@@ -82,9 +82,9 @@ parameters:
ZaqarAdmin: {protocol: http, port: '8888', host: IP_ADDRESS}
ZaqarInternal: {protocol: http, port: '8888', host: IP_ADDRESS}
ZaqarPublic: {protocol: http, port: '8888', host: IP_ADDRESS}
- ZaqarWebSocketAdmin: {protocol: http, port: '9000', host: IP_ADDRESS}
- ZaqarWebSocketInternal: {protocol: http, port: '9000', host: IP_ADDRESS}
- ZaqarWebSocketPublic: {protocol: http, port: '9000', host: IP_ADDRESS}
+ ZaqarWebSocketAdmin: {protocol: ws, port: '9000', host: IP_ADDRESS}
+ ZaqarWebSocketInternal: {protocol: ws, port: '9000', host: IP_ADDRESS}
+ ZaqarWebSocketPublic: {protocol: ws, port: '9000', host: IP_ADDRESS}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
CloudEndpoints:
diff --git a/network/ports/net_ip_list_map.yaml b/network/ports/net_ip_list_map.yaml
index d7863e02..263eccd8 100644
--- a/network/ports/net_ip_list_map.yaml
+++ b/network/ports/net_ip_list_map.yaml
@@ -138,3 +138,20 @@ outputs:
SERVICE_short_node_names: {get_param: ServiceHostnameList}
for_each:
SERVICE: {get_attr: [EnabledServicesValue, value]}
+ short_service_bootstrap_hostnames:
+ description: >
+ Map of enabled services to a list of hostnames where they're running regardless of the network
+ Used for bootstrap purposes
+ value:
+ yaql:
+ # If ServiceHostnameList is empty the role is deployed with zero nodes
+ # therefore we don't want to add any *_node_names to the map
+ expression: dict($.data.map.items().where(len($[1]) > 0))
+ data:
+ map:
+ map_merge:
+ repeat:
+ template:
+ SERVICE_short_bootstrap_node_name: {get_param: ServiceHostnameList}
+ for_each:
+ SERVICE: {get_attr: [EnabledServicesValue, value]}
diff --git a/network/scripts/run-os-net-config.sh b/network/scripts/run-os-net-config.sh
index fc1e6d54..5df67b78 100755
--- a/network/scripts/run-os-net-config.sh
+++ b/network/scripts/run-os-net-config.sh
@@ -1,7 +1,7 @@
#!/bin/bash
-# Note this script expects the following environment variables to be set
-# normally these are provided by the calling SoftwareConfig resource, but
-# they may also be set manually for testing
+# The following environment variables may be set to substitute in a
+# custom bridge or interface name. Normally these are provided by the calling
+# SoftwareConfig resource, but they may also be set manually for testing.
# $bridge_name : The bridge device name to apply
# $interface_name : The interface name to apply
#
@@ -113,8 +113,8 @@ if [ -n '$network_config' ]; then
mkdir -p /etc/os-net-config
# Note these variables come from the calling heat SoftwareConfig
echo '$network_config' > /etc/os-net-config/config.json
- sed -i "s/bridge_name/$bridge_name/" /etc/os-net-config/config.json
- sed -i "s/interface_name/$interface_name/" /etc/os-net-config/config.json
+ sed -i "s/bridge_name/${bridge_name:-''}/" /etc/os-net-config/config.json
+ sed -i "s/interface_name/${interface_name:-''}/" /etc/os-net-config/config.json
os-net-config -c /etc/os-net-config/config.json -v --detailed-exit-codes
RETVAL=$?
diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml
index a267ac66..3c1c435d 100644
--- a/overcloud-resource-registry-puppet.j2.yaml
+++ b/overcloud-resource-registry-puppet.j2.yaml
@@ -41,6 +41,8 @@ resource_registry:
# in the jinja loop
OS::TripleO::Controller::Net::SoftwareConfig: net-config-bridge.yaml
+ OS::TripleO::ServiceServerMetadataHook: OS::Heat::None
+
OS::TripleO::Server: OS::Nova::Server
# This creates the "heat-admin" user for all OS images by default
diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml
index 80936497..316aeb6d 100644
--- a/overcloud.j2.yaml
+++ b/overcloud.j2.yaml
@@ -363,6 +363,7 @@ resources:
services: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
ServiceNames: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChain, role_data, monitoring_subscriptions]}
+ ServiceMetadataSettings: {get_attr: [{{role.name}}ServiceChain, role_data, service_metadata_settings]}
{% endfor %}
hostsConfig:
@@ -444,6 +445,14 @@ resources:
{% for role in roles %}
- {get_attr: [{{role.name}}IpListMap, short_service_hostnames]}
{% endfor %}
+ short_service_bootstrap_node:
+ yaql:
+ expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten().first()]))
+ data:
+ l:
+{% for role in roles %}
+ - {get_attr: [{{role.name}}IpListMap, short_service_bootstrap_hostnames]}
+{% endfor %}
# FIXME(shardy): These require further work to move into service_ips
memcache_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, MemcachedNetwork]}]}
NetVipMap: {get_attr: [VipMap, net_ip_map]}
@@ -587,9 +596,9 @@ resources:
servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
{% endfor %}
- # Post deployment steps for all roles
- AllNodesDeploySteps:
- type: OS::TripleO::PostDeploySteps
+ # Upgrade steps for all roles
+ AllNodesUpgradeSteps:
+ type: OS::TripleO::UpgradeSteps
depends_on:
{% for role in roles %}
- {{role.name}}AllNodesDeployment
@@ -604,10 +613,10 @@ resources:
{{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
{% endfor %}
- # Upgrade steps for all roles
- AllNodesUpgradeSteps:
- type: OS::TripleO::UpgradeSteps
- depends_on: AllNodesDeploySteps
+ # Post deployment steps for all roles
+ AllNodesDeploySteps:
+ type: OS::TripleO::PostDeploySteps
+ depends_on: AllNodesUpgradeSteps
properties:
servers:
{% for role in roles %}
@@ -618,7 +627,6 @@ resources:
{{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
{% endfor %}
-
outputs:
ManagedEndpoints:
description: Asserts that the keystone endpoints have been provisioned.
diff --git a/puppet/all-nodes-config.yaml b/puppet/all-nodes-config.yaml
index 0a8bec6e..5f56fe28 100644
--- a/puppet/all-nodes-config.yaml
+++ b/puppet/all-nodes-config.yaml
@@ -28,6 +28,8 @@ parameters:
type: json
short_service_node_names:
type: json
+ short_service_bootstrap_node:
+ type: json
controller_names:
type: comma_delimited_list
memcache_node_ips:
@@ -125,6 +127,7 @@ resources:
- {get_param: service_ips}
- {get_param: service_node_names}
- {get_param: short_service_node_names}
+ - {get_param: short_service_bootstrap_node}
- controller_node_ips:
list_join:
- ','
diff --git a/puppet/blockstorage-role.yaml b/puppet/blockstorage-role.yaml
index e788e790..e74beb2d 100644
--- a/puppet/blockstorage-role.yaml
+++ b/puppet/blockstorage-role.yaml
@@ -102,10 +102,19 @@ parameters:
MonitoringSubscriptions:
type: comma_delimited_list
default: []
+ ServiceMetadataSettings:
+ type: json
+ default: {}
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
resources:
BlockStorage:
@@ -131,6 +140,7 @@ resources:
map_merge:
- {get_param: ServerMetadata}
- {get_param: BlockStorageServerMetadata}
+ - {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: BlockStorageSchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -334,9 +344,30 @@ resources:
server: {get_resource: BlockStorage}
actions: {get_param: NetworkDeploymentActions}
+ BlockStorageUpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ BlockStorageUpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: BlockStorageUpgradeInitDeployment
+ server: {get_resource: BlockStorage}
+ config: {get_resource: BlockStorageUpgradeInitConfig}
+
BlockStorageDeployment:
type: OS::Heat::StructuredDeployment
- depends_on: NetworkDeployment
+ depends_on: BlockStorageUpgradeInitDeployment
properties:
name: BlockStorageDeployment
server: {get_resource: BlockStorage}
diff --git a/puppet/cephstorage-role.yaml b/puppet/cephstorage-role.yaml
index 1d6cbaeb..75f58012 100644
--- a/puppet/cephstorage-role.yaml
+++ b/puppet/cephstorage-role.yaml
@@ -108,10 +108,19 @@ parameters:
MonitoringSubscriptions:
type: comma_delimited_list
default: []
+ ServiceMetadataSettings:
+ type: json
+ default: {}
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
resources:
CephStorage:
@@ -137,6 +146,7 @@ resources:
map_merge:
- {get_param: ServerMetadata}
- {get_param: CephStorageServerMetadata}
+ - {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: CephStorageSchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -340,9 +350,30 @@ resources:
server: {get_resource: CephStorage}
actions: {get_param: NetworkDeploymentActions}
+ CephStorageUpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ CephStorageUpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: CephStorageUpgradeInitDeployment
+ server: {get_resource: CephStorage}
+ config: {get_resource: CephStorageUpgradeInitConfig}
+
CephStorageDeployment:
type: OS::Heat::StructuredDeployment
- depends_on: NetworkDeployment
+ depends_on: CephStorageUpgradeInitDeployment
properties:
name: CephStorageDeployment
config: {get_resource: CephStorageConfig}
diff --git a/puppet/compute-role.yaml b/puppet/compute-role.yaml
index 2eb287cd..cedab0e6 100644
--- a/puppet/compute-role.yaml
+++ b/puppet/compute-role.yaml
@@ -120,10 +120,19 @@ parameters:
MonitoringSubscriptions:
type: comma_delimited_list
default: []
+ ServiceMetadataSettings:
+ type: json
+ default: {}
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
resources:
@@ -151,6 +160,7 @@ resources:
map_merge:
- {get_param: ServerMetadata}
- {get_param: NovaComputeServerMetadata}
+ - {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: NovaComputeSchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -357,6 +367,27 @@ resources:
bridge_name: {get_param: NeutronPhysicalBridge}
interface_name: {get_param: NeutronPublicInterface}
+ NovaComputeUpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ NovaComputeUpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: NovaComputeUpgradeInitDeployment
+ server: {get_resource: NovaCompute}
+ config: {get_resource: NovaComputeUpgradeInitConfig}
+
NovaComputeConfig:
type: OS::Heat::StructuredConfig
properties:
@@ -402,7 +433,7 @@ resources:
NovaComputeDeployment:
type: OS::TripleO::SoftwareDeployment
- depends_on: NetworkDeployment
+ depends_on: NovaComputeUpgradeInitDeployment
properties:
name: NovaComputeDeployment
config: {get_resource: NovaComputeConfig}
diff --git a/puppet/controller-role.yaml b/puppet/controller-role.yaml
index c286f26c..05527b63 100644
--- a/puppet/controller-role.yaml
+++ b/puppet/controller-role.yaml
@@ -134,10 +134,19 @@ parameters:
MonitoringSubscriptions:
type: comma_delimited_list
default: []
+ ServiceMetadataSettings:
+ type: json
+ default: {}
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
parameter_groups:
- label: deprecated
@@ -170,6 +179,7 @@ resources:
map_merge:
- {get_param: ServerMetadata}
- {get_param: ControllerServerMetadata}
+ - {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: ControllerSchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -391,10 +401,30 @@ resources:
server: {get_resource: Controller}
NodeIndex: {get_param: NodeIndex}
+ ControllerUpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ ControllerUpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: ControllerUpgradeInitDeployment
+ server: {get_resource: Controller}
+ config: {get_resource: ControllerUpgradeInitConfig}
ControllerDeployment:
type: OS::TripleO::SoftwareDeployment
- depends_on: NetworkDeployment
+ depends_on: ControllerUpgradeInitDeployment
properties:
name: ControllerDeployment
config: {get_resource: ControllerConfig}
diff --git a/puppet/extraconfig/tls/freeipa-enroll.yaml b/puppet/extraconfig/tls/freeipa-enroll.yaml
index 44be7c65..84d431fb 100644
--- a/puppet/extraconfig/tls/freeipa-enroll.yaml
+++ b/puppet/extraconfig/tls/freeipa-enroll.yaml
@@ -13,10 +13,12 @@ parameters:
type: string
FreeIPAOTP:
+ default: ''
description: 'OTP that will be used for FreeIPA enrollment'
type: string
hidden: true
FreeIPAServer:
+ default: ''
description: 'FreeIPA server DNS name'
type: string
FreeIPAIPAddress:
@@ -36,18 +38,27 @@ resources:
- name: ipa_ip
config: |
#!/bin/sh
- sed -i "/${ipa_server}/d" /etc/hosts
- # Optionally add the FreeIPA server IP to /etc/hosts
- if [ -n "${ipa_ip}" ]; then
- echo "${ipa_ip} ${ipa_server}" >> /etc/hosts
+ # If no IPA server was given as a parameter, it will be assumed from
+ # DNS.
+ if [ -n "${ipa_server}" ]; then
+ sed -i "/${ipa_server}/d" /etc/hosts
+ # Optionally add the FreeIPA server IP to /etc/hosts
+ if [ -n "${ipa_ip}" ]; then
+ echo "${ipa_ip} ${ipa_server}" >> /etc/hosts
+ fi
fi
# Set the node's domain if needed
if [ ! $(hostname -f | grep "${ipa_domain}$") ]; then
hostnamectl set-hostname "$(hostname).${ipa_domain}"
fi
yum install -y ipa-client
- # Enroll. If there is already keytab, we have already done this.
+ # Enroll. If there is already keytab, we have already done this. If
+ # this node hasn't enrolled and the OTP is missing, fail.
if [ ! -f /etc/krb5.keytab ]; then
+ if [ -z "${otp}" ]; then
+ echo "OTP is missing"
+ exit 1
+ fi
ipa-client-install --server ${ipa_server} -w ${otp} \
--domain=${ipa_domain} -U
fi
diff --git a/puppet/major_upgrade_steps.j2.yaml b/puppet/major_upgrade_steps.j2.yaml
index f8dad433..8d954c09 100644
--- a/puppet/major_upgrade_steps.j2.yaml
+++ b/puppet/major_upgrade_steps.j2.yaml
@@ -15,36 +15,8 @@ parameters:
Setting to a previously unused value during stack-update will trigger
the Upgrade resources to re-run on all roles.
- UpgradeInitCommand:
- type: string
- description: |
- Command or script snippet to run on all overcloud nodes to
- initialize the upgrade process. E.g. a repository switch.
- default: ''
-
resources:
- # For the UpgradeInit also rename /etc/resolv.conf.save for +bug/1567004
- UpgradeInitConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - "#!/bin/bash\n\n"
- - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
- - get_param: UpgradeInitCommand
-
-{% for role in roles %}
- {{role.name}}Upgrade_Init:
- type: OS::Heat::StructuredDeploymentGroup
- properties:
- name: {{role.name}}Upgrade_Init
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: UpgradeInitConfig}
-{% endfor %}
-
# Upgrade Steps for all roles
# FIXME(shardy): would be nice to make the number of steps configurable
{% for step in range(1, 8) %}
@@ -56,10 +28,8 @@ resources:
# serialization, but the event output is easier to follow if we
# do, and there should be minimal performance hit (creating the
# config is cheap compared to the time to apply the deployment).
+ {% if step > 1 %}
depends_on:
- {% if step == 1 %}
- - {{role.name}}Upgrade_Init
- {% else %}
{% for dep in roles %}
- {{dep.name}}Upgrade_Step{{step -1}}
{% endfor %}
diff --git a/puppet/objectstorage-role.yaml b/puppet/objectstorage-role.yaml
index 61be37f4..77a60510 100644
--- a/puppet/objectstorage-role.yaml
+++ b/puppet/objectstorage-role.yaml
@@ -102,10 +102,19 @@ parameters:
MonitoringSubscriptions:
type: comma_delimited_list
default: []
+ ServiceMetadataSettings:
+ type: json
+ default: {}
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
resources:
@@ -131,6 +140,7 @@ resources:
map_merge:
- {get_param: ServerMetadata}
- {get_param: SwiftStorageServerMetadata}
+ - {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: ObjectStorageSchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -334,6 +344,27 @@ resources:
server: {get_resource: SwiftStorage}
actions: {get_param: NetworkDeploymentActions}
+ SwiftStorageUpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ SwiftStorageUpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: SwiftStorageUpgradeInitDeployment
+ server: {get_resource: SwiftStorage}
+ config: {get_resource: SwiftStorageUpgradeInitConfig}
+
SwiftStorageHieraConfig:
type: OS::Heat::StructuredConfig
properties:
@@ -373,7 +404,7 @@ resources:
SwiftStorageHieraDeploy:
type: OS::Heat::StructuredDeployment
- depends_on: NetworkDeployment
+ depends_on: SwiftStorageUpgradeInitDeployment
properties:
name: SwiftStorageHieraDeploy
server: {get_resource: SwiftStorage}
diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml
index ac31a9da..22370a7d 100644
--- a/puppet/role.role.j2.yaml
+++ b/puppet/role.role.j2.yaml
@@ -28,6 +28,10 @@ parameters:
constraints:
- custom_constraint: nova.keypair
{% endif %}
+ NeutronPublicInterface:
+ default: nic1
+ description: What interface to bridge onto br-ex for network nodes.
+ type: string
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
@@ -114,6 +118,9 @@ parameters:
MonitoringSubscriptions:
type: comma_delimited_list
default: []
+ ServiceMetadataSettings:
+ type: json
+ default: {}
ConfigCommand:
type: string
description: Command which will be run whenever configuration data changes
@@ -124,6 +131,13 @@ parameters:
LoggingGroups:
type: comma_delimited_list
default: []
+ UpgradeInitCommand:
+ type: string
+ description: |
+ Command or script snippet to run on all overcloud nodes to
+ initialize the upgrade process. E.g. a repository switch.
+ default: ''
+
resources:
{{role}}:
@@ -149,6 +163,7 @@ resources:
map_merge:
- {get_param: ServerMetadata}
- {get_param: {{role}}ServerMetadata}
+ - {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: {{role}}SchedulerHints}
# Combine the NodeAdminUserData and NodeUserData mime archives
@@ -351,10 +366,34 @@ resources:
config: {get_resource: NetworkConfig}
server: {get_resource: {{role}}}
actions: {get_param: NetworkDeploymentActions}
+ input_values:
+ bridge_name: br-ex
+ interface_name: {get_param: NeutronPublicInterface}
+
+ {{role}}UpgradeInitConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config:
+ list_join:
+ - ''
+ - - "#!/bin/bash\n\n"
+ - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
+ - get_param: UpgradeInitCommand
+
+ # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
+ # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
+ {{role}}UpgradeInitDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: NetworkDeployment
+ properties:
+ name: {{role}}UpgradeInitDeployment
+ server: {get_resource: {{role}}}
+ config: {get_resource: {{role}}UpgradeInitConfig}
{{role}}Deployment:
type: OS::Heat::StructuredDeployment
- depends_on: NetworkDeployment
+ depends_on: {{role}}UpgradeInitDeployment
properties:
name: {{role}}Deployment
config: {get_resource: {{role}}Config}
diff --git a/puppet/services/README.rst b/puppet/services/README.rst
index 856b306e..6e4e9c1d 100644
--- a/puppet/services/README.rst
+++ b/puppet/services/README.rst
@@ -74,3 +74,17 @@ step, "step2" for the second, etc.
6) Start control-plane services
7) Any additional online migration tasks (e.g data migrations)
+
+Nova Server Metadata Settings
+-----------------------------
+
+One can use the hook of type `OS::TripleO::ServiceServerMetadataHook` to pass
+entries to the nova instances' metadata. It is, however, disabled by default.
+In order to overwrite it one needs to define it in the resource registry. An
+implementation of this hook needs to conform to the following:
+
+* It needs to define an input called `RoleData` of json type. This gets as
+ input the contents of the `role_data` for each role's ServiceChain.
+
+* This needs to define an output called `metadata` which will be given to the
+ Nova Server resource as the instance's metadata.
diff --git a/puppet/services/ceilometer-base.yaml b/puppet/services/ceilometer-base.yaml
index 060ae32d..24c71cbb 100644
--- a/puppet/services/ceilometer-base.yaml
+++ b/puppet/services/ceilometer-base.yaml
@@ -50,6 +50,14 @@ parameters:
default: false
description: Whether to store events in ceilometer.
type: boolean
+ EnableLegacyCeilometerApi:
+ default: false
+ description: Enable legacy ceilometer Api service if needed.
+ type: boolean
+ EventPipelinePublishers:
+ default: ['notifier://?topic=alarm.all']
+ description: A list of publishers to put in event_pipeline.yaml.
+ type: comma_delimited_list
Debug:
default: ''
description: Set to True to enable debugging on all services.
@@ -93,6 +101,7 @@ outputs:
- '@'
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ceilometer'
+ enable_legacy_ceilometer_api: {get_param: EnableLegacyCeilometerApi}
ceilometer_backend: {get_param: CeilometerBackend}
ceilometer::metering_secret: {get_param: CeilometerMeteringSecret}
# we include db_sync class in puppet-tripleo
@@ -104,6 +113,7 @@ outputs:
ceilometer::agent::auth::auth_password: {get_param: CeilometerPassword}
ceilometer::agent::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ceilometer::agent::notification::store_events: {get_param: CeilometerStoreEvents}
+ ceilometer::agent::notification::event_pipeline_publishers: {get_param: EventPipelinePublishers}
ceilometer::agent::auth::auth_region: {get_param: KeystoneRegion}
ceilometer::agent::auth::auth_tenant_name: 'service'
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
diff --git a/puppet/services/opendaylight-ovs.yaml b/puppet/services/opendaylight-ovs.yaml
index 907ecddc..63d12fe2 100644
--- a/puppet/services/opendaylight-ovs.yaml
+++ b/puppet/services/opendaylight-ovs.yaml
@@ -8,6 +8,15 @@ parameters:
default: 8081
description: Set opendaylight service port
type: number
+ OpenDaylightUsername:
+ default: 'admin'
+ description: The username for the opendaylight server.
+ type: string
+ OpenDaylightPassword:
+ default: 'admin'
+ type: string
+ description: The password for the opendaylight server.
+ hidden: true
OpenDaylightConnectionProtocol:
description: L7 protocol used for REST access
type: string
@@ -46,6 +55,8 @@ outputs:
service_name: opendaylight_ovs
config_settings:
opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
+ opendaylight::username: {get_param: OpenDaylightUsername}
+ opendaylight::password: {get_param: OpenDaylightPassword}
opendaylight_check_url: {get_param: OpenDaylightCheckURL}
opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
diff --git a/puppet/services/panko-base.yaml b/puppet/services/panko-base.yaml
index 32754a55..af9c5353 100644
--- a/puppet/services/panko-base.yaml
+++ b/puppet/services/panko-base.yaml
@@ -37,7 +37,6 @@ outputs:
value:
service_name: panko_base
config_settings:
- panko_redis_password: {get_param: RedisPassword}
panko::db::database_connection:
list_join:
- ''
diff --git a/puppet/services/services.yaml b/puppet/services/services.yaml
index 13df5bbe..97f87061 100644
--- a/puppet/services/services.yaml
+++ b/puppet/services/services.yaml
@@ -42,6 +42,11 @@ resources:
LoggingConfiguration:
type: OS::TripleO::LoggingConfiguration
+ ServiceServerMetadataHook:
+ type: OS::TripleO::ServiceServerMetadataHook
+ properties:
+ RoleData: {get_attr: [ServiceChain, role_data]}
+
outputs:
role_data:
description: Combined Role data for this set of services.
@@ -113,3 +118,4 @@ outputs:
# Note we use distinct() here to filter any identical tasks, e.g yum update for all services
expression: $.data.where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
+ service_metadata_settings: {get_attr: [ServiceServerMetadataHook, metadata]}
diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml
index ba184ab0..94db9e41 100644
--- a/puppet/services/swift-proxy.yaml
+++ b/puppet/services/swift-proxy.yaml
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: 2016-10-14
description: >
OpenStack Swift Proxy service configured with Puppet
@@ -49,6 +49,18 @@ parameters:
default: guest
description: The username for RabbitMQ
type: string
+ SwiftCeilometerPipelineEnabled:
+ description: Set to False to disable the swift proxy ceilometer pipeline.
+ default: True
+ type: boolean
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
+
+conditions:
+
+ ceilometer_pipeline_enabled: {equals : [{get_param: SwiftCeilometerPipelineEnabled}, True]}
resources:
SwiftBase:
@@ -78,6 +90,8 @@ outputs:
swift::proxy::ceilometer::rabbit_password: {get_param: RabbitPassword}
swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]}
swift::proxy::ceilometer::nonblocking_notify: true
+ tripleo::profile::base::swift::proxy::rabbit_port: {get_param: RabbitClientPort}
+ tripleo::profile::base::swift::proxy::ceilometer_enabled: {get_param: SwiftCeilometerPipelineEnabled}
tripleo.swift_proxy.firewall_rules:
'122 swift proxy':
dport:
@@ -89,26 +103,34 @@ outputs:
- ResellerAdmin
swift::proxy::versioned_writes::allow_versioned_writes: true
swift::proxy::pipeline:
- - 'catch_errors'
- - 'healthcheck'
- - 'proxy-logging'
- - 'cache'
- - 'ratelimit'
- - 'bulk'
- - 'tempurl'
- - 'formpost'
- - 'authtoken'
- - 'keystone'
- - 'staticweb'
- - 'copy'
- - 'container-quotas'
- - 'account-quotas'
- - 'slo'
- - 'dlo'
- - 'versioned_writes'
- - 'ceilometer'
- - 'proxy-logging'
- - 'proxy-server'
+ yaql:
+ expression: $.data.pipeline.where($ != '')
+ data:
+ pipeline:
+ - 'catch_errors'
+ - 'healthcheck'
+ - 'proxy-logging'
+ - 'cache'
+ - 'ratelimit'
+ - 'bulk'
+ - 'tempurl'
+ - 'formpost'
+ - 'authtoken'
+ - 'keystone'
+ - 'staticweb'
+ - 'copy'
+ - 'container-quotas'
+ - 'account-quotas'
+ - 'slo'
+ - 'dlo'
+ - 'versioned_writes'
+ -
+ if:
+ - ceilometer_pipeline_enabled
+ - 'ceilometer'
+ - ''
+ - 'proxy-logging'
+ - 'proxy-server'
swift::proxy::account_autocreate: true
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples