aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--all-nodes-validation.yaml6
-rw-r--r--bindep.txt2
-rw-r--r--ci/environments/multinode-3nodes.yaml2
-rw-r--r--ci/environments/multinode.yaml1
-rw-r--r--ci/environments/multinode_major_upgrade.yaml1
-rw-r--r--ci/environments/scenario002-multinode.yaml1
-rw-r--r--ci/environments/scenario003-multinode.yaml1
-rw-r--r--ci/environments/scenario004-multinode.yaml1
-rw-r--r--deployed-server/README.rst4
-rw-r--r--deployed-server/deployed-server-bootstrap-centos.sh6
-rw-r--r--deployed-server/deployed-server-bootstrap-rhel.sh6
-rwxr-xr-xdeployed-server/scripts/get-occ-config.sh2
-rw-r--r--environments/cinder-netapp-config.yaml2
-rw-r--r--environments/collectd-environment.yaml32
-rw-r--r--environments/contrail/contrail-net.yaml4
-rw-r--r--environments/deployed-server-environment.j2.yaml11
-rw-r--r--environments/deployed-server-environment.yaml4
-rw-r--r--environments/deployed-server-pacemaker-environment.yaml4
-rw-r--r--environments/external-loadbalancer-vip-v6.yaml2
-rw-r--r--environments/external-loadbalancer-vip.yaml2
-rw-r--r--environments/logging-environment.yaml2
-rw-r--r--environments/major-upgrade-aodh-migration.yaml6
-rw-r--r--environments/major-upgrade-ceilometer-wsgi-mitaka-newton.yaml7
-rw-r--r--environments/major-upgrade-composable-steps.yaml1
-rw-r--r--environments/major-upgrade-converge.yaml1
-rw-r--r--environments/major-upgrade-pacemaker-converge.yaml6
-rw-r--r--environments/major-upgrade-pacemaker-init.yaml6
-rw-r--r--environments/major-upgrade-pacemaker.yaml6
-rw-r--r--environments/major-upgrade-remove-sahara.yaml6
-rw-r--r--environments/network-environment.yaml4
-rw-r--r--environments/neutron-ml2-bigswitch.yaml13
-rw-r--r--environments/neutron-ml2-cisco-n1kv.yaml4
-rw-r--r--environments/neutron-opendaylight.yaml1
-rw-r--r--environments/services/disable-ceilometer-api.yaml3
-rw-r--r--environments/services/keystone_domain_specific_ldap_backend.yaml18
-rw-r--r--environments/sshd-banner.yaml6
-rw-r--r--environments/updates/update-from-192_0_2-subnet.yaml3
-rw-r--r--extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration50
-rw-r--r--extraconfig/tasks/aodh_data_migration.sh19
-rw-r--r--extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml62
-rwxr-xr-xextraconfig/tasks/major_upgrade_check.sh109
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_1.sh36
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_2.sh176
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_3.sh68
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_4.sh17
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_5.sh8
-rwxr-xr-xextraconfig/tasks/major_upgrade_controller_pacemaker_6.sh15
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker.yaml175
-rw-r--r--extraconfig/tasks/major_upgrade_pacemaker_migrations.sh200
-rw-r--r--extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml25
-rw-r--r--extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp103
-rwxr-xr-xextraconfig/tasks/pacemaker_common_functions.sh9
-rwxr-xr-xextraconfig/tasks/run_puppet.sh5
-rw-r--r--extraconfig/tasks/ssh/host_public_key.yaml42
-rw-r--r--extraconfig/tasks/ssh/known_hosts_config.yaml36
-rw-r--r--extraconfig/tasks/swift-ring-deploy.yaml31
-rw-r--r--extraconfig/tasks/swift-ring-update.yaml42
-rw-r--r--extraconfig/tasks/tripleo_upgrade_node.sh3
-rwxr-xr-xextraconfig/tasks/yum_update.sh59
-rw-r--r--net-config-linux-bridge.yaml2
-rw-r--r--overcloud-resource-registry-puppet.j2.yaml17
-rw-r--r--overcloud.j2.yaml41
-rw-r--r--puppet/blockstorage-role.yaml37
-rw-r--r--puppet/cephstorage-role.yaml37
-rw-r--r--puppet/compute-role.yaml39
-rw-r--r--puppet/controller-role.yaml38
-rw-r--r--puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml9
-rw-r--r--puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml157
-rw-r--r--puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml12
-rw-r--r--puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml4
-rw-r--r--puppet/major_upgrade_steps.j2.yaml57
-rw-r--r--puppet/objectstorage-role.yaml37
-rw-r--r--puppet/puppet-steps.j240
-rw-r--r--puppet/role.role.j2.yaml37
-rw-r--r--puppet/services/ceilometer-base.yaml9
-rw-r--r--puppet/services/ceph-rgw.yaml2
-rw-r--r--puppet/services/cinder-backend-netapp.yaml129
-rw-r--r--puppet/services/cinder-backend-scaleio.yaml2
-rw-r--r--puppet/services/cinder-volume.yaml6
-rw-r--r--puppet/services/congress.yaml1
-rw-r--r--puppet/services/database/mongodb.yaml5
-rw-r--r--puppet/services/database/mysql.yaml6
-rw-r--r--puppet/services/ec2-api.yaml5
-rw-r--r--puppet/services/etcd.yaml2
-rw-r--r--puppet/services/ironic-conductor.yaml43
-rw-r--r--puppet/services/keystone.yaml40
-rw-r--r--puppet/services/metrics/collectd.yaml4
-rw-r--r--puppet/services/monitoring/sensu-client.yaml2
-rw-r--r--puppet/services/network/contrail-vrouter.yaml2
-rw-r--r--puppet/services/neutron-base.yaml45
-rw-r--r--puppet/services/neutron-bigswitch-agent.yaml31
-rw-r--r--puppet/services/neutron-ovs-agent.yaml35
-rw-r--r--puppet/services/neutron-ovs-dpdk-agent.yaml10
-rw-r--r--puppet/services/neutron-plugin-ml2-odl.yaml45
-rw-r--r--puppet/services/nova-api.yaml12
-rw-r--r--puppet/services/nova-base.yaml16
-rw-r--r--puppet/services/nova-compute.yaml16
-rw-r--r--puppet/services/nova-ironic.yaml4
-rw-r--r--puppet/services/nova-libvirt.yaml1
-rw-r--r--puppet/services/octavia-base.yaml6
-rw-r--r--puppet/services/opendaylight-api.yaml5
-rw-r--r--puppet/services/opendaylight-ovs.yaml42
-rw-r--r--puppet/services/openvswitch-upgrade.yaml50
-rw-r--r--puppet/services/pacemaker.yaml18
-rw-r--r--puppet/services/sshd.yaml31
-rw-r--r--puppet/services/swift-ringbuilder.yaml10
-rw-r--r--puppet/services/tacker.yaml1
-rw-r--r--puppet/services/tripleo-firewall.yaml6
-rw-r--r--releasenotes/notes/add-ldap-backend-0bda702fb0aa24bf.yaml5
-rw-r--r--releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml8
-rw-r--r--releasenotes/notes/big-switch-agent-4c743a2112251234.yaml5
-rw-r--r--releasenotes/notes/deployed-server-firewall-purge-9d9fe73faf925056.yaml6
-rw-r--r--releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml6
-rw-r--r--releasenotes/notes/fix-cinder-nfs-share-usage-0968f88eff7ffb99.yaml6
-rw-r--r--releasenotes/notes/fix-neutron-dpdk-firewall-436aee39a0d7ed65.yaml5
-rw-r--r--releasenotes/notes/fix-odl-provider-mapping-hiera-5b3472184be490e2.yaml4
-rw-r--r--releasenotes/notes/install-openstack-selinux-d14b2e26feb6d04e.yaml6
-rw-r--r--releasenotes/notes/make-panko-default-8d0e824fc91cef56.yaml4
-rw-r--r--releasenotes/notes/migration_over_ssh-003e2a92f5f5374d.yaml14
-rw-r--r--releasenotes/notes/ovs-2.5-2.6-composable-upgrades-workaround-73f4e56127c910b4.yaml12
-rw-r--r--releasenotes/notes/replace-references-to-old-ctlplane-0df7f2ae8910559c.yaml20
-rw-r--r--releasenotes/notes/restrict-mongodb-memory-de7bf6754d7234d9.yaml3
-rw-r--r--releasenotes/notes/set-ceilometer-auth-flag-382f68ddb2cbcb6b.yaml5
-rw-r--r--releasenotes/notes/sriov-pci-passthrough-8f28719b889bdaf7.yaml4
-rw-r--r--releasenotes/notes/ssh_known_hosts-287563590632d1aa.yaml4
-rw-r--r--releasenotes/notes/sshd-service-extensions-0c4d0879942a2052.yaml5
-rw-r--r--releasenotes/notes/token-flush-twice-a-day-d4b00a2953a6b383.yaml7
-rw-r--r--requirements.txt2
-rw-r--r--roles_data.yaml4
-rw-r--r--validation-scripts/all-nodes.sh18
130 files changed, 1314 insertions, 1522 deletions
diff --git a/all-nodes-validation.yaml b/all-nodes-validation.yaml
index 65d01d0f..eea3e40a 100644
--- a/all-nodes-validation.yaml
+++ b/all-nodes-validation.yaml
@@ -10,6 +10,10 @@ parameters:
default: ''
description: A string containing a space separated list of IP addresses used to ping test each available network interface.
type: string
+ ValidateFqdn:
+ default: false
+ description: Optional validation to ensure FQDN as set by Nova matches the name set in /etc/hosts.
+ type: boolean
resources:
AllNodesValidationsImpl:
@@ -19,6 +23,8 @@ resources:
inputs:
- name: ping_test_ips
default: {get_param: PingTestIps}
+ - name: validate_fqdn
+ default: {get_param: ValidateFqdn}
config: {get_file: ./validation-scripts/all-nodes.sh}
outputs:
diff --git a/bindep.txt b/bindep.txt
new file mode 100644
index 00000000..4f9b4254
--- /dev/null
+++ b/bindep.txt
@@ -0,0 +1,2 @@
+# This is a cross-platform list tracking distribution packages needed by tests;
+# see http://docs.openstack.org/infra/bindep/ for additional information.
diff --git a/ci/environments/multinode-3nodes.yaml b/ci/environments/multinode-3nodes.yaml
index 03065c6a..ec9af4a3 100644
--- a/ci/environments/multinode-3nodes.yaml
+++ b/ci/environments/multinode-3nodes.yaml
@@ -55,6 +55,7 @@
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::Sshd
- name: Controller
CountDefault: 1
@@ -76,3 +77,4 @@
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::Sshd
diff --git a/ci/environments/multinode.yaml b/ci/environments/multinode.yaml
index c946ec8a..daa2d6f0 100644
--- a/ci/environments/multinode.yaml
+++ b/ci/environments/multinode.yaml
@@ -51,6 +51,7 @@ parameter_defaults:
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::Sshd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
diff --git a/ci/environments/multinode_major_upgrade.yaml b/ci/environments/multinode_major_upgrade.yaml
index 2251cc0c..a0f9b093 100644
--- a/ci/environments/multinode_major_upgrade.yaml
+++ b/ci/environments/multinode_major_upgrade.yaml
@@ -55,6 +55,7 @@ parameter_defaults:
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::Horizon
+ - OS::TripleO::Services::Sshd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
diff --git a/ci/environments/scenario002-multinode.yaml b/ci/environments/scenario002-multinode.yaml
index cbcfa9b3..f53ec1f6 100644
--- a/ci/environments/scenario002-multinode.yaml
+++ b/ci/environments/scenario002-multinode.yaml
@@ -60,6 +60,7 @@ parameter_defaults:
- OS::TripleO::Services::Ec2Api
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::Sshd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
diff --git a/ci/environments/scenario003-multinode.yaml b/ci/environments/scenario003-multinode.yaml
index 6e926f74..035f7492 100644
--- a/ci/environments/scenario003-multinode.yaml
+++ b/ci/environments/scenario003-multinode.yaml
@@ -54,6 +54,7 @@ parameter_defaults:
- OS::TripleO::Services::MistralExecutor
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::Sshd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
diff --git a/ci/environments/scenario004-multinode.yaml b/ci/environments/scenario004-multinode.yaml
index 67515284..a914c97c 100644
--- a/ci/environments/scenario004-multinode.yaml
+++ b/ci/environments/scenario004-multinode.yaml
@@ -65,6 +65,7 @@ parameter_defaults:
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::Sshd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
diff --git a/deployed-server/README.rst b/deployed-server/README.rst
index e4d8299b..8638818b 100644
--- a/deployed-server/README.rst
+++ b/deployed-server/README.rst
@@ -67,11 +67,11 @@ example:
parameter_defaults:
ControlPlaneDefaultRoute: 192.168.122.130
ControlPlaneSubnetCidr: "24"
- EC2MetadataIp: "192.0.2.1"
+ EC2MetadataIp: "192.168.24.1"
In this example, 192.168.122.130 is the external management IP of an
undercloud, thus it is the default route for the configured local_ip value of
-192.0.2.1.
+192.168.24.1.
os-collect-config
diff --git a/deployed-server/deployed-server-bootstrap-centos.sh b/deployed-server/deployed-server-bootstrap-centos.sh
index 7266ca57..6f2bb124 100644
--- a/deployed-server/deployed-server-bootstrap-centos.sh
+++ b/deployed-server/deployed-server-bootstrap-centos.sh
@@ -8,9 +8,13 @@ yum install -y \
openstack-puppet-modules \
os-net-config \
openvswitch \
- python-heat-agent*
+ python-heat-agent* \
+ openstack-selinux
ln -s -f /usr/share/openstack-puppet/modules/* /etc/puppet/modules
setenforce 0
sed -i 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config
+
+echo '# empty ruleset created by deployed-server bootstrap' > /etc/sysconfig/iptables
+echo '# empty ruleset created by deployed-server bootstrap' > /etc/sysconfig/ip6tables
diff --git a/deployed-server/deployed-server-bootstrap-rhel.sh b/deployed-server/deployed-server-bootstrap-rhel.sh
index 36ff0077..9e9e9b3b 100644
--- a/deployed-server/deployed-server-bootstrap-rhel.sh
+++ b/deployed-server/deployed-server-bootstrap-rhel.sh
@@ -8,6 +8,10 @@ yum install -y \
openstack-puppet-modules \
os-net-config \
openvswitch \
- python-heat-agent*
+ python-heat-agent* \
+ openstack-selinux
ln -s -f /usr/share/openstack-puppet/modules/* /etc/puppet/modules
+
+echo '# empty ruleset created by deployed-server bootstrap' > /etc/sysconfig/iptables
+echo '# empty ruleset created by deployed-server bootstrap' > /etc/sysconfig/ip6tables
diff --git a/deployed-server/scripts/get-occ-config.sh b/deployed-server/scripts/get-occ-config.sh
index 6c196f97..d0cc4dff 100755
--- a/deployed-server/scripts/get-occ-config.sh
+++ b/deployed-server/scripts/get-occ-config.sh
@@ -63,7 +63,7 @@ for role in $OVERCLOUD_ROLES; do
rg_stack=$(openstack stack resource show overcloud $role -c physical_resource_id -f value)
done
- stacks=$(openstack stack resource list $rg_stack -c physical_resource_id -f value)
+ stacks=$(openstack stack resource list $rg_stack -c resource_name -c physical_resource_id -f json | jq -r "sort_by(.resource_name) | .[] | .physical_resource_id")
i=0
diff --git a/environments/cinder-netapp-config.yaml b/environments/cinder-netapp-config.yaml
index b9a84342..dfd15893 100644
--- a/environments/cinder-netapp-config.yaml
+++ b/environments/cinder-netapp-config.yaml
@@ -1,7 +1,7 @@
# A Heat environment file which can be used to enable a
# a Cinder NetApp backend, configured via puppet
resource_registry:
- OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
+ OS::TripleO::Services::CinderBackendNetApp: ../puppet/services/cinder-backend-netapp.yaml
parameter_defaults:
CinderEnableNetappBackend: true
diff --git a/environments/collectd-environment.yaml b/environments/collectd-environment.yaml
index 7780530c..6071dccc 100644
--- a/environments/collectd-environment.yaml
+++ b/environments/collectd-environment.yaml
@@ -3,8 +3,36 @@ resource_registry:
# parameter_defaults:
#
-## You can specify additional plugins to load using the
-## CollectdExtraPlugins key:
+## Collectd server configuration
+# CollectdServer: collectd0.example.com
+#
+################
+#### Other config parameters, the values shown here are the defaults
+################
+#
+# CollectdServerPort: 25826
+# CollectdSecurityLevel: None
+#
+################
+#### If CollectdSecurityLevel is set to Encrypt or Sign
+#### the following parameters are also needed
+###############
+#
+# CollectdUsername: user
+# CollectdPassword: password
+#
+## CollectdDefaultPlugins, These are the default plugins used by collectd
+#
+# CollectdDefaultPlugins:
+# - disk
+# - interface
+# - load
+# - memory
+# - processes
+# - tcpconns
+#
+## Extra plugins can be enabled by the CollectdExtraPlugins parameter:
+## All the plugins availables are:
#
# CollectdExtraPlugins:
# - disk
diff --git a/environments/contrail/contrail-net.yaml b/environments/contrail/contrail-net.yaml
index 1e64f91d..cca9beac 100644
--- a/environments/contrail/contrail-net.yaml
+++ b/environments/contrail/contrail-net.yaml
@@ -8,7 +8,7 @@ resource_registry:
parameter_defaults:
ControlPlaneSubnetCidr: '24'
- ControlPlaneDefaultRoute: 192.0.2.254
+ ControlPlaneDefaultRoute: 192.168.24.254
InternalApiNetCidr: 10.0.0.0/24
InternalApiAllocationPools: [{'start': '10.0.0.10', 'end': '10.0.0.200'}]
InternalApiDefaultRoute: 10.0.0.1
@@ -17,7 +17,7 @@ parameter_defaults:
ManagementInterfaceDefaultRoute: 10.1.0.1
ExternalNetCidr: 10.2.0.0/24
ExternalAllocationPools: [{'start': '10.2.0.10', 'end': '10.2.0.200'}]
- EC2MetadataIp: 192.0.2.1 # Generally the IP of the Undercloud
+ EC2MetadataIp: 192.168.24.1 # Generally the IP of the Undercloud
DnsServers: ["8.8.8.8","8.8.4.4"]
VrouterPhysicalInterface: eth1
VrouterGateway: 10.0.0.1
diff --git a/environments/deployed-server-environment.j2.yaml b/environments/deployed-server-environment.j2.yaml
new file mode 100644
index 00000000..327934da
--- /dev/null
+++ b/environments/deployed-server-environment.j2.yaml
@@ -0,0 +1,11 @@
+resource_registry:
+ OS::TripleO::Server: ../deployed-server/deployed-server.yaml
+ OS::TripleO::DeployedServer::ControlPlanePort: OS::Neutron::Port
+ OS::TripleO::DeployedServer::Bootstrap: OS::Heat::None
+
+{% for role in roles %}
+ # Default nic config mappings
+ OS::TripleO::{{role.name}}::Net::SoftwareConfig: ../net-config-static.yaml
+{% endfor %}
+
+ OS::TripleO::ControllerDeployedServer::Net::SoftwareConfig: ../net-config-static-bridge.yaml
diff --git a/environments/deployed-server-environment.yaml b/environments/deployed-server-environment.yaml
deleted file mode 100644
index 7bc1bd9b..00000000
--- a/environments/deployed-server-environment.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-resource_registry:
- OS::TripleO::Server: ../deployed-server/deployed-server.yaml
- OS::TripleO::DeployedServer::ControlPlanePort: OS::Neutron::Port
- OS::TripleO::DeployedServer::Bootstrap: OS::Heat::None
diff --git a/environments/deployed-server-pacemaker-environment.yaml b/environments/deployed-server-pacemaker-environment.yaml
new file mode 100644
index 00000000..85fa7d2f
--- /dev/null
+++ b/environments/deployed-server-pacemaker-environment.yaml
@@ -0,0 +1,4 @@
+resource_registry:
+ OS::TripleO::Tasks::ControllerDeployedServerPrePuppet: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerDeployedServerPostPuppet: ../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerDeployedServerPostPuppetRestart: ../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
diff --git a/environments/external-loadbalancer-vip-v6.yaml b/environments/external-loadbalancer-vip-v6.yaml
index fbd1fb98..bd455175 100644
--- a/environments/external-loadbalancer-vip-v6.yaml
+++ b/environments/external-loadbalancer-vip-v6.yaml
@@ -13,7 +13,7 @@ parameter_defaults:
# to control your VIPs (currently one per network)
# NOTE: we will eventually move to one VIP per service
#
- ControlFixedIPs: [{'ip_address':'192.0.2.251'}]
+ ControlFixedIPs: [{'ip_address':'192.168.24.251'}]
PublicVirtualFixedIPs: [{'ip_address':'2001:db8:fd00:1000:0000:0000:0000:0005'}]
InternalApiVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:2000:0000:0000:0000:0005'}]
StorageVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:3000:0000:0000:0000:0005'}]
diff --git a/environments/external-loadbalancer-vip.yaml b/environments/external-loadbalancer-vip.yaml
index 1759c04c..dec9b835 100644
--- a/environments/external-loadbalancer-vip.yaml
+++ b/environments/external-loadbalancer-vip.yaml
@@ -12,7 +12,7 @@ parameter_defaults:
# to control your VIPs (currently one per network)
# NOTE: we will eventually move to one VIP per service
#
- ControlFixedIPs: [{'ip_address':'192.0.2.251'}]
+ ControlFixedIPs: [{'ip_address':'192.168.24.251'}]
PublicVirtualFixedIPs: [{'ip_address':'10.0.0.251'}]
InternalApiVirtualFixedIPs: [{'ip_address':'172.16.2.251'}]
StorageVirtualFixedIPs: [{'ip_address':'172.16.1.251'}]
diff --git a/environments/logging-environment.yaml b/environments/logging-environment.yaml
index c583ca79..ae8bd7b9 100644
--- a/environments/logging-environment.yaml
+++ b/environments/logging-environment.yaml
@@ -18,7 +18,7 @@ resource_registry:
## (note the use of port 24284 for ssl connections)
#
# LoggingServers:
-# - host: 192.0.2.11
+# - host: 192.168.24.11
# port: 24284
# LoggingUsesSSL: true
# LoggingSharedKey: secret
diff --git a/environments/major-upgrade-aodh-migration.yaml b/environments/major-upgrade-aodh-migration.yaml
deleted file mode 100644
index 9d6ce73e..00000000
--- a/environments/major-upgrade-aodh-migration.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-resource_registry:
- # aodh data migration
- OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
-
- # no-op the rest
- OS::TripleO::PostDeploySteps: OS::Heat::None
diff --git a/environments/major-upgrade-ceilometer-wsgi-mitaka-newton.yaml b/environments/major-upgrade-ceilometer-wsgi-mitaka-newton.yaml
deleted file mode 100644
index 6798c255..00000000
--- a/environments/major-upgrade-ceilometer-wsgi-mitaka-newton.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-resource_registry:
-
- # This initiates the upgrades for ceilometer api to run under apache wsgi
- OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
-
- # no-op the rest
- OS::TripleO::PostDeploySteps: OS::Heat::None
diff --git a/environments/major-upgrade-composable-steps.yaml b/environments/major-upgrade-composable-steps.yaml
index 9ecc2251..3bc9faa2 100644
--- a/environments/major-upgrade-composable-steps.yaml
+++ b/environments/major-upgrade-composable-steps.yaml
@@ -1,6 +1,7 @@
resource_registry:
OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml
parameter_defaults:
+ EnableConfigPurge: true
UpgradeLevelNovaCompute: auto
UpgradeInitCommonCommand: |
#!/bin/bash
diff --git a/environments/major-upgrade-converge.yaml b/environments/major-upgrade-converge.yaml
index f09fb20e..4e8bf46b 100644
--- a/environments/major-upgrade-converge.yaml
+++ b/environments/major-upgrade-converge.yaml
@@ -3,5 +3,6 @@
resource_registry:
OS::TripleO::PostDeploySteps: ../puppet/post.yaml
parameter_defaults:
+ EnableConfigPurge: false
UpgradeLevelNovaCompute: ''
UpgradeInitCommonCommand: ''
diff --git a/environments/major-upgrade-pacemaker-converge.yaml b/environments/major-upgrade-pacemaker-converge.yaml
deleted file mode 100644
index e9a5f9be..00000000
--- a/environments/major-upgrade-pacemaker-converge.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-parameter_defaults:
- UpgradeLevelNovaCompute: ''
-
-resource_registry:
- OS::TripleO::Services::SaharaApi: ../puppet/services/sahara-api.yaml
- OS::TripleO::Services::SaharaEngine: ../puppet/services/sahara-engine.yaml
diff --git a/environments/major-upgrade-pacemaker-init.yaml b/environments/major-upgrade-pacemaker-init.yaml
deleted file mode 100644
index f4f361df..00000000
--- a/environments/major-upgrade-pacemaker-init.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-parameter_defaults:
- UpgradeLevelNovaCompute: mitaka
-
-resource_registry:
- OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker_init.yaml
- OS::TripleO::PostDeploySteps: OS::Heat::None
diff --git a/environments/major-upgrade-pacemaker.yaml b/environments/major-upgrade-pacemaker.yaml
deleted file mode 100644
index 9fb51a4d..00000000
--- a/environments/major-upgrade-pacemaker.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-parameter_defaults:
- UpgradeLevelNovaCompute: mitaka
-
-resource_registry:
- OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker.yaml
- OS::TripleO::PostDeploySteps: OS::Heat::None
diff --git a/environments/major-upgrade-remove-sahara.yaml b/environments/major-upgrade-remove-sahara.yaml
deleted file mode 100644
index e0aaf130..00000000
--- a/environments/major-upgrade-remove-sahara.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-parameter_defaults:
- KeepSaharaServicesOnUpgrade: false
-resource_registry:
- OS::TripleO::Services::SaharaApi: OS::Heat::None
- OS::TripleO::Services::SaharaEngine: OS::Heat::None
-
diff --git a/environments/network-environment.yaml b/environments/network-environment.yaml
index 210b6b03..3de5dba5 100644
--- a/environments/network-environment.yaml
+++ b/environments/network-environment.yaml
@@ -18,8 +18,8 @@ parameter_defaults:
# CIDR subnet mask length for provisioning network
ControlPlaneSubnetCidr: '24'
# Gateway router for the provisioning network (or Undercloud IP)
- ControlPlaneDefaultRoute: 192.0.2.254
- EC2MetadataIp: 192.0.2.1 # Generally the IP of the Undercloud
+ ControlPlaneDefaultRoute: 192.168.24.254
+ EC2MetadataIp: 192.168.24.1 # Generally the IP of the Undercloud
# Customize the IP subnets to match the local environment
InternalApiNetCidr: 172.17.0.0/24
StorageNetCidr: 172.18.0.0/24
diff --git a/environments/neutron-ml2-bigswitch.yaml b/environments/neutron-ml2-bigswitch.yaml
index 750d3c4e..8a4a144c 100644
--- a/environments/neutron-ml2-bigswitch.yaml
+++ b/environments/neutron-ml2-bigswitch.yaml
@@ -3,12 +3,17 @@
resource_registry:
OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
OS::TripleO::ComputeExtraConfigPre: ../puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
+ OS::TripleO::NeutronBigswitchAgent: ../puppet/services/neutron-bigswitch-agent.yaml
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
parameter_defaults:
# Required to fill in:
NeutronBigswitchRestproxyServers:
NeutronBigswitchRestproxyServerAuth:
- NeutronMechanismDrivers: bsn_ml2
+ NeutronMechanismDrivers: openvswitch,bsn_ml2
+ NeutronServicePlugins: bsn_l3,bsn_service_plugin
+ KeystoneNotificationDriver: messaging
# Optional:
# NeutronBigswitchRestproxyAutoSyncOnFailure:
@@ -19,3 +24,9 @@ parameter_defaults:
# NeutronBigswitchAgentEnabled:
# NeutronBigswitchLLDPEnabled:
+ ControllerExtraConfig:
+ neutron::agents::l3::enabled: false
+ neutron::agents::dhcp::enable_force_metadata: true
+ neutron::agents::dhcp::enable_isolated_metadata: true
+ neutron::agents::dhcp::enable_metadata_network: false
+ neutron::server::l3_ha: false
diff --git a/environments/neutron-ml2-cisco-n1kv.yaml b/environments/neutron-ml2-cisco-n1kv.yaml
index 651e9564..8d46e1ca 100644
--- a/environments/neutron-ml2-cisco-n1kv.yaml
+++ b/environments/neutron-ml2-cisco-n1kv.yaml
@@ -5,7 +5,7 @@ resource_registry:
OS::TripleO::ComputeExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
parameter_defaults:
- N1000vVSMIP: '192.0.2.50'
- N1000vMgmtGatewayIP: '192.0.2.1'
+ N1000vVSMIP: '192.168.24.50'
+ N1000vMgmtGatewayIP: '192.168.24.1'
N1000vVSMDomainID: '100'
N1000vVSMHostMgmtIntf: 'br-ex'
diff --git a/environments/neutron-opendaylight.yaml b/environments/neutron-opendaylight.yaml
index e08b2b27..4f0de91c 100644
--- a/environments/neutron-opendaylight.yaml
+++ b/environments/neutron-opendaylight.yaml
@@ -3,6 +3,7 @@ resource_registry:
OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
+ OS::TripleO::Services::NeutronCorePlugin: ../puppet/services/neutron-plugin-ml2-odl.yaml
OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml
OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
diff --git a/environments/services/disable-ceilometer-api.yaml b/environments/services/disable-ceilometer-api.yaml
index 94cd8d5d..fb1ea6a7 100644
--- a/environments/services/disable-ceilometer-api.yaml
+++ b/environments/services/disable-ceilometer-api.yaml
@@ -1,2 +1,5 @@
resource_registry:
OS::TripleO::Services::CeilometerApi: OS::Heat::None
+
+parameter_defaults:
+ CeilometerApiEndpoint: false
diff --git a/environments/services/keystone_domain_specific_ldap_backend.yaml b/environments/services/keystone_domain_specific_ldap_backend.yaml
new file mode 100644
index 00000000..3cc9c7b7
--- /dev/null
+++ b/environments/services/keystone_domain_specific_ldap_backend.yaml
@@ -0,0 +1,18 @@
+# This is an example template on how to configure keystone domain specific LDAP
+# backends. This will configure a domain called tripleoldap will the attributes
+# specified.
+parameter_defaults:
+ KeystoneLDAPDomainEnable: true
+ KeystoneLDAPBackendConfigs:
+ tripleoldap:
+ url: ldap://192.168.24.251
+ user: cn=openstack,ou=Users,dc=tripleo,dc=example,dc=com
+ password: Secrete
+ suffix: dc=tripleo,dc=example,dc=com
+ user_tree_dn: ou=Users,dc=tripleo,dc=example,dc=com
+ user_filter: "(memberOf=cn=OSuser,ou=Groups,dc=tripleo,dc=example,dc=com)"
+ user_objectclass: person
+ user_id_attribute: cn
+ user_allow_create: false
+ user_allow_update: false
+ user_allow_delete: false
diff --git a/environments/sshd-banner.yaml b/environments/sshd-banner.yaml
index 041c0990..894bf1c9 100644
--- a/environments/sshd-banner.yaml
+++ b/environments/sshd-banner.yaml
@@ -1,6 +1,3 @@
-resource_registry:
- OS::TripleO::Services::Sshd: ../puppet/services/sshd.yaml
-
parameter_defaults:
BannerText: |
******************************************************************
@@ -11,3 +8,6 @@ parameter_defaults:
* evidence of criminal activity, system personnel may provide *
* the evidence from such monitoring to law enforcement officials.*
******************************************************************
+ MessageOfTheDay: |
+ ALERT! You are entering into a secured area!
+ This service is restricted to authorized users only.
diff --git a/environments/updates/update-from-192_0_2-subnet.yaml b/environments/updates/update-from-192_0_2-subnet.yaml
new file mode 100644
index 00000000..1813e7be
--- /dev/null
+++ b/environments/updates/update-from-192_0_2-subnet.yaml
@@ -0,0 +1,3 @@
+parameter_defaults:
+ ControlPlaneDefaultRoute: 192.0.2.1
+ EC2MetadataIp: 192.0.2.1
diff --git a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
index 4c9e08ef..0d0fa3f1 100644
--- a/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
+++ b/extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
@@ -11,7 +11,7 @@ if [ -e $OK ] ; then
exit 0
fi
-retryCount=0
+retry_max_count=10
opts=
config_opts=
attach_opts=
@@ -157,27 +157,41 @@ else
fi
function retry() {
- if [[ $retryCount < 3 ]]; then
- $@
- if ! [[ $? == 0 ]]; then
- retryCount=$(echo $retryCount + 1 | bc)
- echo "WARN: Failed to connect when running '$@', retrying..."
- retry $@
- else
- retryCount=0
+ # Inhibit -e since we want to retry without exiting..
+ set +e
+ # Retry delay (seconds)
+ retry_delay=2.0
+ retry_count=0
+ mycli="$@"
+ while [ $retry_count -lt ${retry_max_count} ]
+ do
+ echo "INFO: Sleeping ${retry_delay} ..."
+ sleep ${retry_delay}
+ echo "INFO: Executing '${mycli}' ..."
+ ${mycli}
+ if [ $? -eq 0 ]; then
+ echo "INFO: Ran '${mycli}' successfully, not retrying..."
+ break
+ else
+ echo "WARN: Failed to connect when running '${mycli}', retrying (attempt #$retry_count )..."
+ retry_count=$(echo $retry_count + 1 | bc)
+ fi
+ done
+
+ if [ $retry_count -ge ${retry_max_count} ]; then
+ echo "ERROR: Failed to connect after ${retry_max_count} attempts when running '${mycli}'"
+ exit 1
fi
- else
- echo "ERROR: Failed to connect after 3 attempts when running '$@'"
- exit 1
- fi
+ # Re-enable -e when exiting retry()
+ set -e
}
function detect_satellite_version {
ping_api=$REG_SAT_URL/katello/api/ping
- if curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then
+ if curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then
echo Satellite 6 detected at $REG_SAT_URL
satellite_version=6
- elif curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
+ elif curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
echo Satellite 5 detected at $REG_SAT_URL
satellite_version=5
else
@@ -213,14 +227,14 @@ case "${REG_METHOD:-}" in
if [ -z "${REG_AUTO_ATTACH:-}" -a -z "${REG_ACTIVATION_KEY:-}" ]; then
retry subscription-manager attach $attach_opts
fi
- retry subscription-manager repos --disable '*'
+ retry subscription-manager repos --disable='*'
retry subscription-manager $repos
;;
satellite)
detect_satellite_version
if [ "$satellite_version" = "6" ]; then
repos="$repos --enable ${satellite_repo}"
- curl --retry 3 --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
+ curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
rpm -Uvh katello-ca-consumer-latest.noarch.rpm || true
retry subscription-manager register $opts
retry subscription-manager $repos
@@ -229,7 +243,7 @@ case "${REG_METHOD:-}" in
retry subscription-manager repos --disable ${satellite_repo}
else
pushd /usr/share/rhn/
- curl --retry 3 --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
+ curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
popd
retry rhnreg_ks --serverUrl=$REG_SAT_URL/XMLRPC $sat5_opts
fi
diff --git a/extraconfig/tasks/aodh_data_migration.sh b/extraconfig/tasks/aodh_data_migration.sh
deleted file mode 100644
index d4c29673..00000000
--- a/extraconfig/tasks/aodh_data_migration.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-#
-# This delivers the aodh data migration script to be invoked as part of the tripleo
-# major upgrade workflow to migrate all the alarm data from mongodb to mysql.
-# This needs to run post controller node upgrades so new aodh mysql db configured and
-# running.
-#
-set -eu
-
-#Get existing mongodb connection
-MONGO_DB_CONNECTION="$(crudini --get /etc/ceilometer/ceilometer.conf database connection)"
-
-# Get the aodh database string from hiera data
-MYSQL_DB_CONNECTION="$(crudini --get /etc/aodh/aodh.conf database connection)"
-
-#Run migration
-/usr/bin/aodh-data-migration --nosql-conn $MONGO_DB_CONNECTION --sql-conn $MYSQL_DB_CONNECTION
-
-
diff --git a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml b/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
deleted file mode 100644
index cf5d7a84..00000000
--- a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-heat_template_version: ocata
-
-description: >
- Software-config for ceilometer configuration under httpd during upgrades
-
-parameters:
- servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-resources:
- CeilometerWsgiMitakaNewtonPreUpgradeConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: puppet
- config:
- get_file: mitaka_to_newton_ceilometer_wsgi_upgrade.pp
-
- CeilometerWsgiMitakaNewtonUpgradeConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - "#!/bin/bash\n\nset -e\n\n"
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - "disable_standalone_ceilometer_api\n\n"
-
- CeilometerWsgiMitakaNewtonPostUpgradeConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: |
- #!/bin/bash
- set -e
- /usr/bin/systemctl reload httpd
-
- CeilometerWsgiMitakaNewtonPreUpgradeDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- name: CeilometerWsgiMitakaNewtonPreUpgradeDeployment
- servers: {get_param: [servers, Controller]}
- config: {get_resource: CeilometerWsgiMitakaNewtonPreUpgradeConfig}
-
- CeilometerWsgiMitakaNewtonUpgradeConfigDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: CeilometerWsgiMitakaNewtonPreUpgradeDeployment
- properties:
- name: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment
- servers: {get_param: [servers, Controller]}
- config: {get_resource: CeilometerWsgiMitakaNewtonUpgradeConfig}
-
- CeilometerWsgiMitakaNewtonPostUpgradeDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment
- properties:
- name: CeilometerWsgiMitakaNewtonPostUpgradeDeployment
- servers: {get_param: [servers, Controller]}
- config: {get_resource: CeilometerWsgiMitakaNewtonPostUpgradeConfig}
diff --git a/extraconfig/tasks/major_upgrade_check.sh b/extraconfig/tasks/major_upgrade_check.sh
deleted file mode 100755
index 8bdff5e7..00000000
--- a/extraconfig/tasks/major_upgrade_check.sh
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-check_cluster()
-{
- if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then
- echo_error "ERROR: upgrade cannot start with some cluster nodes being offline"
- exit 1
- fi
-}
-
-check_pcsd()
-{
- if pcs status 2>&1 | grep -E 'Offline'; then
- echo_error "ERROR: upgrade cannot start with some pcsd daemon offline"
- exit 1
- fi
-}
-
-mysql_need_update()
-{
- # Shall we upgrade mysql data directory during the stack upgrade?
- if [ "$mariadb_do_major_upgrade" = "auto" ]; then
- ret=$(is_mysql_upgrade_needed)
- if [ $ret = "1" ]; then
- DO_MYSQL_UPGRADE=1
- else
- DO_MYSQL_UPGRADE=0
- fi
- echo "mysql upgrade required: $DO_MYSQL_UPGRADE"
- elif [ "$mariadb_do_major_upgrade" = "no" ]; then
- DO_MYSQL_UPGRADE=0
- else
- DO_MYSQL_UPGRADE=1
- fi
-}
-
-check_disk_for_mysql_dump()
-{
- # Where to backup current database if mysql need to be upgraded
- MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
- MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
- # Spare disk ratio for extra safety
- MYSQL_BACKUP_SIZE_RATIO=1.2
-
- mysql_need_update
-
- if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
- if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
-
- if [ -d "$MYSQL_BACKUP_DIR" ]; then
- echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously"
- exit 1
- fi
- mkdir "$MYSQL_BACKUP_DIR"
- if [ $? -ne 0 ]; then
- echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR"
- exit 1
- fi
-
- # the /root/.my.cnf is needed because we set the mysql root
- # password from liberty onwards
- backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
- # While not ideal, this step allows us to calculate exactly how much space the dump
- # will need. Our main goal here is avoiding any chance of corruption due to disk space
- # exhaustion
- backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c)
- database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }')
- free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1)
-
- # we need at least space for a new mysql database + dump of the existing one,
- # times a small factor for additional safety room
- # note: bash doesn't do floating point math or floats in if statements,
- # so use python to apply the ratio and cast it back to integer
- required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))")
- if [ $required_space -ge $free_space ]; then
- echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)"
- exit 1
- fi
- fi
- fi
-}
-
-check_python_rpm()
-{
- # If for some reason rpm-python are missing we want to error out early enough
- if ! rpm -q rpm-python &> /dev/null; then
- echo_error "ERROR: upgrade cannot start without rpm-python installed"
- exit 1
- fi
-}
-
-check_clean_cluster()
-{
- if pcs status | grep -q Stopped:; then
- echo_error "ERROR: upgrade cannot start with stopped resources on the cluster. Make sure that all the resources are up and running."
- exit 1
- fi
-}
-
-check_galera_root_password()
-{
- # BZ: 1357112
- if [ ! -e /root/.my.cnf ]; then
- echo_error "ERROR: upgrade cannot be started, the galera password is missing. The overcloud needs update."
- exit 1
- fi
-}
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
deleted file mode 100755
index 080831ab..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-check_cluster
-check_pcsd
-if [[ -n $(is_bootstrap_node) ]]; then
- check_clean_cluster
-fi
-check_python_rpm
-check_galera_root_password
-check_disk_for_mysql_dump
-
-# We want to disable fencing during the cluster --stop as it might fence
-# nodes where a service fails to stop, which could be fatal during an upgrade
-# procedure. So we remember the stonith state. If it was enabled we reenable it
-# at the end of this script
-if [[ -n $(is_bootstrap_node) ]]; then
- STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
- # We create this empty file if stonith was set to true so we can reenable stonith in step2
- rm -f /var/tmp/stonith-true
- if [ $STONITH_STATE == "true" ]; then
- touch /var/tmp/stonith-true
- fi
- pcs property set stonith-enabled=false
-fi
-
-# Migrate to HA NG and fix up rabbitmq queues
-# We fix up the rabbitmq ha queues after the migration because it will
-# restart the rabbitmq resource. Doing it after the migration means no other
-# services will be restart as there are no other constraints
-if [[ -n $(is_bootstrap_node) ]]; then
- migrate_full_to_ng_ha
- rabbitmq_newton_ocata_upgrade
-fi
-
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
deleted file mode 100755
index 6bfe1239..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
+++ /dev/null
@@ -1,176 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-cluster_sync_timeout=1800
-
-# After migrating the cluster to HA-NG the services not under pacemaker's control
-# are still up and running. We need to stop them explicitely otherwise during the yum
-# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
-# is going to take a long time because rabbit is down. By having the service stopped
-# systemctl try-restart is a noop
-
-for service in $(services_to_migrate); do
- manage_systemd_service stop "${service%%-clone}"
- # So the reason for not reusing check_resource_systemd is that
- # I have observed systemctl is-active returning unknown with at least
- # one service that was stopped (See LP 1627254)
- timeout=600
- tstart=$(date +%s)
- tend=$(( $tstart + $timeout ))
- check_interval=3
- while (( $(date +%s) < $tend )); do
- if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then
- echo "$service still active, sleeping $check_interval seconds."
- sleep $check_interval
- else
- # we do not care if it is inactive, unknown or failed as long as it is
- # not running
- break
- fi
-
- done
-done
-
-# In case the mysql package is updated, the database on disk must be
-# upgraded as well. This typically needs to happen during major
-# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
-#
-# Because in-place upgrades are not supported across 2+ major versions
-# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle
-# https://bugzilla.redhat.com/show_bug.cgi?id=1341968
-#
-# The default is to determine automatically if upgrade is needed based
-# on mysql package versionning, but this can be overriden manually
-# to support specific upgrade scenario
-
-# Calling this function will set the DO_MYSQL_UPGRADE variable which is used
-# later
-mysql_need_update
-
-if [[ -n $(is_bootstrap_node) ]]; then
- if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
- cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
- fi
-
- pcs resource disable redis
- check_resource redis stopped 600
- pcs resource disable rabbitmq
- check_resource rabbitmq stopped 600
- pcs resource disable galera
- check_resource galera stopped 600
- pcs resource disable openstack-cinder-volume
- check_resource openstack-cinder-volume stopped 600
- # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address:
- # https://bugzilla.redhat.com/show_bug.cgi?id=1330688
- for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do
- pcs resource disable $vip
- check_resource $vip stopped 60
- done
- pcs cluster stop --all
-fi
-
-
-# Swift isn't controlled by pacemaker
-systemctl_swift stop
-
-tstart=$(date +%s)
-while systemctl is-active pacemaker; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > cluster_sync_timeout )) ; then
- echo_error "ERROR: cluster shutdown timed out"
- exit 1
- fi
-done
-
-# The reason we do an sql dump *and* we move the old dir out of
-# the way is because it gives us an extra level of safety in case
-# something goes wrong during the upgrade. Once the restore is
-# successful we go ahead and remove it. If the directory exists
-# we bail out as it means the upgrade process had issues in the last
-# run.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then
- echo_error "ERROR: mysql backup dir already exist"
- exit 1
- fi
- mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR
-fi
-
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-special_case_ovs_upgrade_if_needed
-
-yum -y install python-zaqarclient # needed for os-collect-config
-yum -y -q update
-
-# We need to ensure at least those two configuration settings, otherwise
-# mariadb 10.1+ won't activate galera replication.
-# wsrep_cluster_address must only be set though, its value does not
-# matter because it's overriden by the galera resource agent.
-cat >> /etc/my.cnf.d/galera.cnf <<EOF
-[mysqld]
-wsrep_on = ON
-wsrep_cluster_address = gcomm://localhost
-EOF
-
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- # Scripts run via heat have no HOME variable set and this confuses
- # mysqladmin
- export HOME=/root
-
- mkdir /var/lib/mysql || /bin/true
- chown mysql:mysql /var/lib/mysql
- chmod 0755 /var/lib/mysql
- restorecon -R /var/lib/mysql/
- mysql_install_db --datadir=/var/lib/mysql --user=mysql
- chown -R mysql:mysql /var/lib/mysql/
-
- if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
- mysqld_safe --wsrep-new-cluster &
- # We have a populated /root/.my.cnf with root/password here so
- # we need to temporarily rename it because the newly created
- # db is empty and no root password is set
- mv /root/.my.cnf /root/.my.cnf.temporary
- timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done'
- mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql"
- mv /root/.my.cnf.temporary /root/.my.cnf
- mysqladmin -u root shutdown
- # The import was successful so we may remove the folder
- rm -r "$MYSQL_BACKUP_DIR"
- fi
-fi
-
-# If we reached here without error we can safely blow away the origin
-# mysql dir from every controller
-
-# TODO: What if the upgrade fails on the bootstrap node, but not on
-# this controller. Data may be lost.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR
-fi
-
-# Let's reset the stonith back to true if it was true, before starting the cluster
-if [[ -n $(is_bootstrap_node) ]]; then
- if [ -f /var/tmp/stonith-true ]; then
- pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true
- fi
- rm -f /var/tmp/stonith-true
-fi
-
-# Pin messages sent to compute nodes to kilo, these will be upgraded later
-crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute"
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284047
-# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435
-crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284058
-# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists
-crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"
-# LP: 1615035, required only for M/N upgrade.
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager
-# LP: 1627450, required only for M/N upgrade
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler
-
-crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm
-
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
deleted file mode 100755
index a3cbd945..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-cluster_form_timeout=600
-cluster_settle_timeout=1800
-galera_sync_timeout=600
-
-if [[ -n $(is_bootstrap_node) ]]; then
- pcs cluster start --all
-
- tstart=$(date +%s)
- while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > cluster_form_timeout )) ; then
- echo_error "ERROR: timed out forming the cluster"
- exit 1
- fi
- done
-
- if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
- echo_error "ERROR: timed out waiting for cluster to finish transition"
- exit 1
- fi
-
- for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do
- pcs resource enable $vip
- check_resource_pacemaker $vip started 60
- done
-fi
-
-start_or_enable_service galera
-check_resource galera started 600
-start_or_enable_service redis
-check_resource redis started 600
-# We need mongod which is now a systemd service up and running before calling
-# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes
-# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely
-# we should be good.
-# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm
-systemctl start mongod
-check_resource mongod started 600
-
-if [[ -n $(is_bootstrap_node) ]]; then
- tstart=$(date +%s)
- while ! clustercheck; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > galera_sync_timeout )) ; then
- echo_error "ERROR galera sync timed out"
- exit 1
- fi
- done
-
- # Run all the db syncs
- # TODO: check if this can be triggered in puppet and removed from here
- ceilometer-upgrade --config-file=/etc/ceilometer/ceilometer.conf --skip-gnocchi-resource-types
- cinder-manage db sync
- glance-manage db_sync
- heat-manage --config-file /etc/heat/heat.conf db_sync
- keystone-manage db_sync
- neutron-db-manage upgrade heads
- nova-manage db sync
- nova-manage api_db sync
- nova-manage db online_data_migrations
- sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
-fi
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh
deleted file mode 100755
index d2cb9553..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-start_or_enable_service rabbitmq
-check_resource rabbitmq started 600
-start_or_enable_service redis
-check_resource redis started 600
-start_or_enable_service openstack-cinder-volume
-check_resource openstack-cinder-volume started 600
-
-# start httpd so keystone is available for gnocchi
-# upgrade to run.
-systemctl start httpd
-
-# Swift isn't controled by pacemaker
-systemctl_swift start
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh
deleted file mode 100755
index fa95f1f8..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-if [[ -n $(is_bootstrap_node) ]]; then
- # run gnocchi upgrade
- gnocchi-upgrade
-fi
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh
deleted file mode 100755
index d569084d..00000000
--- a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-# We need to start the systemd services we explicitely stopped at step _1.sh
-# FIXME: Should we let puppet during the convergence step do the service enabling or
-# should we add it here?
-services=$(services_to_migrate)
-if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then
- services=${services%%openstack-sahara*}
-fi
-for service in $services; do
- manage_systemd_service start "${service%%-clone}"
- check_resource_systemd "${service%%-clone}" started 600
-done
diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml
deleted file mode 100644
index 74d3be71..00000000
--- a/extraconfig/tasks/major_upgrade_pacemaker.yaml
+++ /dev/null
@@ -1,175 +0,0 @@
-heat_template_version: ocata
-description: 'Upgrade for Pacemaker deployments'
-
-parameters:
- servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-
- UpgradeLevelNovaCompute:
- type: string
- description: Nova Compute upgrade level
- default: ''
- MySqlMajorUpgrade:
- type: string
- description: Can be auto,yes,no and influences if the major upgrade should do or detect an automatic mysql upgrade
- constraints:
- - allowed_values: ['auto', 'yes', 'no']
- default: 'auto'
- KeepSaharaServicesOnUpgrade:
- type: boolean
- default: true
- description: Whether to keep Sahara services when upgrading controller nodes from mitaka to newton
-
-
-resources:
- # TODO(jistr): for Mitaka->Newton upgrades and further we can use
- # map_merge with input_values instead of feeding params into scripts
- # via str_replace on bash snippets
-
- ControllerPacemakerUpgradeConfig_Step1:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
- params:
- UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
- - str_replace:
- template: |
- #!/bin/bash
- mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE'
- params:
- MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_check.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_1.sh
-
- ControllerPacemakerUpgradeDeployment_Step1:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step1}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step2:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
- params:
- UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
- - str_replace:
- template: |
- #!/bin/bash
- mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE'
- params:
- MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_check.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_2.sh
-
- ControllerPacemakerUpgradeDeployment_Step2:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step1
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step2}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step3:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_3.sh
-
- ControllerPacemakerUpgradeDeployment_Step3:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step2
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step3}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step4:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_4.sh
-
- ControllerPacemakerUpgradeDeployment_Step4:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step3
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step4}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step5:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_5.sh
-
- ControllerPacemakerUpgradeDeployment_Step5:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step4
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step5}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step6:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- keep_sahara_services_on_upgrade='KEEP_SAHARA_SERVICES_ON_UPGRADE'
- params:
- KEEP_SAHARA_SERVICES_ON_UPGRADE: {get_param: KeepSaharaServicesOnUpgrade}
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_6.sh
-
- ControllerPacemakerUpgradeDeployment_Step6:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step5
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step6}
- input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
deleted file mode 100644
index ae22a1e7..00000000
--- a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
+++ /dev/null
@@ -1,200 +0,0 @@
-#!/bin/bash
-
-# Special pieces of upgrade migration logic go into this
-# file. E.g. Pacemaker cluster transitions for existing deployments,
-# matching changes to overcloud_controller_pacemaker.pp (Puppet
-# handles deployment, this file handles migrations).
-#
-# This file shouldn't execute any action on its own, all logic should
-# be wrapped into bash functions. Upgrade scripts will source this
-# file and call the functions defined in this file where appropriate.
-#
-# The migration functions should be idempotent. If the migration has
-# been already applied, it should be possible to call the function
-# again without damaging the deployment or failing the upgrade.
-
-# If the major version of mysql is going to change after the major
-# upgrade, the database must be upgraded on disk to avoid failures
-# due to internal incompatibilities between major mysql versions
-# https://bugs.launchpad.net/tripleo/+bug/1587449
-# This function detects whether a database upgrade is required
-# after a mysql package upgrade. It returns 0 when no major upgrade
-# has to take place, 1 otherwise.
-function is_mysql_upgrade_needed {
- # The name of the package which provides mysql might differ
- # after the upgrade. Consider the generic package name, which
- # should capture the major version change (e.g. 5.5 -> 10.1)
- local name="mariadb"
- local output
- local ret
- set +e
- output=$(yum -q check-update $name)
- ret=$?
- set -e
- if [ $ret -ne 100 ]; then
- # no updates so we exit
- echo "0"
- return
- fi
-
- local currentepoch=$(rpm -q --qf "%{epoch}" $name)
- local currentversion=$(rpm -q --qf "%{version}" $name | cut -d. -f-2)
- local currentrelease=$(rpm -q --qf "%{release}" $name)
- local newoutput=$(repoquery -a --pkgnarrow=updates --qf "%{epoch} %{version} %{release}\n" $name)
- local newepoch=$(echo "$newoutput" | awk '{ print $1 }')
- local newversion=$(echo "$newoutput" | awk '{ print $2 }' | cut -d. -f-2)
- local newrelease=$(echo "$newoutput" | awk '{ print $3 }')
-
- # With this we trigger the dump restore/path if we change either epoch or
- # version in the package If only the release tag changes we do not do it
- # FIXME: we could refine this by trying to parse the mariadb version
- # into X.Y.Z and trigger the update only if X and/or Y change.
- output=$(python -c "import rpm; rc = rpm.labelCompare((\"$currentepoch\", \"$currentversion\", None), (\"$newepoch\", \"$newversion\", None)); print rc")
- if [ "$output" != "-1" ]; then
- echo "0"
- return
- fi
- echo "1"
-}
-
-# This function returns the list of services to be migrated away from pacemaker
-# and to systemd. The reason to have these services in a separate function is because
-# this list is needed in three different places: major_upgrade_controller_pacemaker_{1,2}
-# and in the function to migrate the cluster from full HA to HA NG
-function services_to_migrate {
- # The following PCMK resources the ones the we are going to delete
- PCMK_RESOURCE_TODELETE="
- httpd-clone
- memcached-clone
- mongod-clone
- neutron-dhcp-agent-clone
- neutron-l3-agent-clone
- neutron-metadata-agent-clone
- neutron-netns-cleanup-clone
- neutron-openvswitch-agent-clone
- neutron-ovs-cleanup-clone
- neutron-server-clone
- openstack-aodh-evaluator-clone
- openstack-aodh-listener-clone
- openstack-aodh-notifier-clone
- openstack-ceilometer-central-clone
- openstack-ceilometer-collector-clone
- openstack-ceilometer-notification-clone
- openstack-cinder-api-clone
- openstack-cinder-scheduler-clone
- openstack-glance-api-clone
- openstack-gnocchi-metricd-clone
- openstack-gnocchi-statsd-clone
- openstack-heat-api-cfn-clone
- openstack-heat-api-clone
- openstack-heat-api-cloudwatch-clone
- openstack-heat-engine-clone
- openstack-nova-api-clone
- openstack-nova-conductor-clone
- openstack-nova-consoleauth-clone
- openstack-nova-novncproxy-clone
- openstack-nova-scheduler-clone
- openstack-sahara-api-clone
- openstack-sahara-engine-clone
- "
- echo $PCMK_RESOURCE_TODELETE
-}
-
-# This function will migrate a mitaka system where all the resources are managed
-# via pacemaker to a newton setup where only a few services will be managed by pacemaker
-# On a high-level it will operate as follows:
-# 1. Set the cluster in maintenance-mode so no start/stop action will actually take place
-# during the conversion
-# 2. Remove all the colocation constraints and then the ordering constraints, except the
-# ones related to haproxy/VIPs which exist in Newton as well
-# 3. Take the cluster out of maintenance-mode
-# 4. Remove all the resources that won't be managed by pacemaker in newton. The
-# outcome will be
-# that they are stopped and removed from pacemakers control
-# 5. Do a resource cleanup to make sure the cluster is in a clean state
-function migrate_full_to_ng_ha {
- if [[ -n $(pcmk_running) ]]; then
- pcs property set maintenance-mode=true
-
- # First we go through all the colocation constraints (except the ones
- # we want to keep, i.e. the haproxy/ip ones) and we remove those
- COL_CONSTRAINTS=$(pcs config show | sed -n '/^Colocation Constraints:$/,/^$/p' | grep -v "Colocation Constraints:" | egrep -v "ip-.*haproxy" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
- for constraint in $COL_CONSTRAINTS; do
- log_debug "Deleting colocation constraint $constraint from CIB"
- pcs constraint remove "$constraint"
- done
-
- # Now we kill all the ordering constraints (except the haproxy/ip ones)
- ORD_CONSTRAINTS=$(pcs config show | sed -n '/^Ordering Constraints:/,/^Colocation Constraints:$/p' | grep -v "Ordering Constraints:" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
- for constraint in $ORD_CONSTRAINTS; do
- log_debug "Deleting ordering constraint $constraint from CIB"
- pcs constraint remove "$constraint"
- done
- # At this stage all the pacemaker resources are removed from the CIB.
- # Once we remove the maintenance-mode those systemd resources will keep
- # on running. They shall be systemd enabled via the puppet converge
- # step later on
- pcs property set maintenance-mode=false
-
- # At this stage there are no constraints whatsoever except the haproxy/ip ones
- # which we want to keep. We now disable and then delete each resource
- # that will move to systemd.
- # We want the systemd resources be stopped before doing "yum update",
- # that way "systemctl try-restart <service>" is no-op because the
- # service was down already
- PCS_STATUS_OUTPUT="$(pcs status)"
- for resource in $(services_to_migrate) "delay-clone" "openstack-core-clone"; do
- if echo "$PCS_STATUS_OUTPUT" | grep "$resource"; then
- log_debug "Deleting $resource from the CIB"
- if ! pcs resource disable "$resource" --wait=600; then
- echo_error "ERROR: resource $resource failed to be disabled"
- exit 1
- fi
- pcs resource delete --force "$resource"
- else
- log_debug "Service $resource not found as a pacemaker resource, not trying to delete."
- fi
- done
-
- # We need to do a pcs resource cleanup here + crm_resource --wait to
- # make sure the cluster is in a clean state before we stop everything,
- # upgrade and restart everything
- pcs resource cleanup
- # We are making sure here that the cluster is stable before proceeding
- if ! timeout -k 10 600 crm_resource --wait; then
- echo_error "ERROR: cluster remained unstable after resource cleanup for more than 600 seconds, exiting."
- exit 1
- fi
- fi
-}
-
-function disable_standalone_ceilometer_api {
- if [[ -n $(is_bootstrap_node) ]]; then
- if [[ -n $(is_pacemaker_managed openstack-ceilometer-api) ]]; then
- # Disable pacemaker resources for ceilometer-api
- manage_pacemaker_service disable openstack-ceilometer-api
- check_resource_pacemaker openstack-ceilometer-api stopped 600
- pcs resource delete openstack-ceilometer-api --wait=600
- fi
- fi
-}
-
-
-# This function will make sure that the rabbitmq ha policies are converted from mitaka to newton
-# In newton we had: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"all"}"
-# In ocata we want: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"exactly","ha-params":2}"
-# The nr "2" should be CEIL(N/2) where N is the number of Controllers (i.e. rabbit instances)
-# Note that changing an attribute like this makes the rabbitmq resource restart
-function rabbitmq_newton_ocata_upgrade {
- if pcs resource show rabbitmq-clone | grep -q -E "Attributes:.*\"ha-mode\":\"all\""; then
- # Number of controller is obtained by counting how many hostnames we
- # have in controller_node_names hiera key
- nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1))
- nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2)))
- if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then
- echo_error "ERROR: The nr. of HA queues during the M/N upgrade is out of range $nr_queues"
- exit 1
- fi
- pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600
- fi
-}
diff --git a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
deleted file mode 100644
index 45933fb7..00000000
--- a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-heat_template_version: ocata
-
-description: >
- Software-config for performing aodh data migration
-
-parameters:
- servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-resources:
-
- AodhMysqlMigrationScriptConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: {get_file: aodh_data_migration.sh}
-
- AodhMysqlMigrationScriptDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: AodhMysqlMigrationScriptConfig}
- input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp b/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp
deleted file mode 100644
index a8d43663..00000000
--- a/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This puppet manifest is to be used only during a Mitaka->Newton upgrade
-# It configures ceilometer to be run under httpd but it makes sure to not
-# restart any services. This snippet needs to be called before init as a
-# pre upgrade migration.
-
-Service <|
- tag == 'ceilometer-service'
-|> {
- hasrestart => true,
- restart => '/bin/true',
- start => '/bin/true',
- stop => '/bin/true',
-}
-
-if $::hostname == downcase(hiera('bootstrap_nodeid')) {
- $pacemaker_master = true
- $sync_db = true
-} else {
- $pacemaker_master = false
- $sync_db = false
-}
-
-include ::tripleo::packages
-
-
-if str2bool(hiera('mongodb::server::ipv6', false)) {
- $mongo_node_ips_with_port_prefixed = prefix(hiera('mongodb_node_ips'), '[')
- $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
-} else {
- $mongo_node_ips_with_port = suffix(hiera('mongodb_node_ips'), ':27017')
-}
-$mongodb_replset = hiera('mongodb::server::replset')
-$mongo_node_string = join($mongo_node_ips_with_port, ',')
-$database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
-
-$rabbit_hosts = hiera('rabbitmq_node_ips', undef)
-$rabbit_port = hiera('ceilometer::rabbit_port', 5672)
-$rabbit_endpoints = suffix(any2array(normalize_ip_for_uri($rabbit_hosts)), ":${rabbit_port}")
-
-class { '::ceilometer' :
- rabbit_hosts => $rabbit_endpoints,
-}
-
-class {'::ceilometer::db':
- database_connection => $database_connection,
-}
-
-if $sync_db {
- include ::ceilometer::db::sync
-}
-
-include ::ceilometer::config
-
-class { '::ceilometer::api':
- enabled => true,
- service_name => 'httpd',
- keystone_password => hiera('ceilometer::keystone::auth::password'),
- identity_uri => hiera('ceilometer::keystone::authtoken::auth_url'),
- auth_uri => hiera('ceilometer::keystone::authtoken::auth_uri'),
- keystone_tenant => hiera('ceilometer::keystone::authtoken::project_name'),
-}
-
-class { '::apache' :
- service_enable => false,
- service_manage => true,
- service_restart => '/bin/true',
- purge_configs => false,
- purge_vhost_dir => false,
-}
-
-# To ensure existing ports are not overridden
-class { '::aodh::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
-class { '::gnocchi::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
-
-class { '::keystone::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
-class { '::ceilometer::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
diff --git a/extraconfig/tasks/pacemaker_common_functions.sh b/extraconfig/tasks/pacemaker_common_functions.sh
index aae4a2de..4480f74d 100755
--- a/extraconfig/tasks/pacemaker_common_functions.sh
+++ b/extraconfig/tasks/pacemaker_common_functions.sh
@@ -299,9 +299,10 @@ function systemctl_swift {
}
# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+# Update condition and add --notriggerun for +bug/1669714
function special_case_ovs_upgrade_if_needed {
- if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
+ if rpm -qa | grep "^openvswitch-2.5.0-14" || rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart" ; then
+ echo "Manual upgrade of openvswitch - ovs-2.5.0-14 or restart in postun detected"
rm -rf OVS_UPGRADE
mkdir OVS_UPGRADE && pushd OVS_UPGRADE
echo "Attempting to downloading latest openvswitch with yumdownloader"
@@ -310,8 +311,8 @@ function special_case_ovs_upgrade_if_needed {
if rpm -U --test $pkg 2>&1 | grep "already installed" ; then
echo "Looks like newer version of $pkg is already installed, skipping"
else
- echo "Updating $pkg with nopostun option"
- rpm -U --replacepkgs --nopostun $pkg
+ echo "Updating $pkg with --nopostun --notriggerun"
+ rpm -U --replacepkgs --nopostun --notriggerun $pkg
fi
done
popd
diff --git a/extraconfig/tasks/run_puppet.sh b/extraconfig/tasks/run_puppet.sh
index b7771e33..e3f6c493 100755
--- a/extraconfig/tasks/run_puppet.sh
+++ b/extraconfig/tasks/run_puppet.sh
@@ -10,7 +10,10 @@ function run_puppet {
export FACTER_deploy_config_name="${role}Deployment_Step${step}"
if [ -e "/etc/puppet/hieradata/heat_config_${FACTER_deploy_config_name}.json" ]; then
set +e
- puppet apply --detailed-exitcodes "${manifest}"
+ puppet apply --detailed-exitcodes \
+ --modulepath \
+ /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules \
+ "${manifest}"
rc=$?
echo "puppet apply exited with exit code $rc"
else
diff --git a/extraconfig/tasks/ssh/host_public_key.yaml b/extraconfig/tasks/ssh/host_public_key.yaml
new file mode 100644
index 00000000..847c8772
--- /dev/null
+++ b/extraconfig/tasks/ssh/host_public_key.yaml
@@ -0,0 +1,42 @@
+heat_template_version: ocata
+
+description: >
+ This is a template which will fetch the ssh host public key.
+
+parameters:
+ server:
+ description: ID of the node to apply this config to
+ type: string
+
+resources:
+ SshHostPubKeyConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ outputs:
+ - name: rsa
+ - name: ecdsa
+ - name: ed25519
+ config: |
+ #!/bin/sh -x
+ test -e '/etc/ssh/ssh_host_rsa_key.pub' && cat /etc/ssh/ssh_host_rsa_key.pub > $heat_outputs_path.rsa
+ test -e '/etc/ssh/ssh_host_ecdsa_key.pub' && cat /etc/ssh/ssh_host_ecdsa_key.pub > $heat_outputs_path.ecdsa
+ test -e '/etc/ssh/ssh_host_ed25519_key.pub' && cat /etc/ssh/ssh_host_ed25519_key.pub > $heat_outputs_path.ed25519
+
+ SshHostPubKeyDeployment:
+ type: OS::Heat::SoftwareDeployment
+ properties:
+ config: {get_resource: SshHostPubKeyConfig}
+ server: {get_param: server}
+
+
+outputs:
+ ecdsa:
+ description: Host ssh public key (ecdsa)
+ value: {get_attr: [SshHostPubKeyDeployment, ecdsa]}
+ rsa:
+ description: Host ssh public key (rsa)
+ value: {get_attr: [SshHostPubKeyDeployment, rsa]}
+ ed25519:
+ description: Host ssh public key (ed25519)
+ value: {get_attr: [SshHostPubKeyDeployment, ed25519]}
diff --git a/extraconfig/tasks/ssh/known_hosts_config.yaml b/extraconfig/tasks/ssh/known_hosts_config.yaml
new file mode 100644
index 00000000..2ebcb63c
--- /dev/null
+++ b/extraconfig/tasks/ssh/known_hosts_config.yaml
@@ -0,0 +1,36 @@
+heat_template_version: ocata
+description: 'SSH Known Hosts Config'
+
+parameters:
+ known_hosts:
+ type: string
+
+resources:
+
+ SSHKnownHostsConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: known_hosts
+ default: {get_param: known_hosts}
+ config: |
+ #!/bin/bash
+ set -eux
+ set -o pipefail
+
+ echo "Creating ssh known hosts file"
+
+ if [ ! -z "${known_hosts}" ]; then
+ echo "${known_hosts}"
+ echo -ne "${known_hosts}" > /etc/ssh/ssh_known_hosts
+ chmod 0644 /etc/ssh/ssh_known_hosts
+ else
+ rm -f /etc/ssh/ssh_known_hosts
+ echo "No ssh known hosts"
+ fi
+
+outputs:
+ OS::stack_id:
+ description: The SSHKnownHostsConfig resource.
+ value: {get_resource: SSHKnownHostsConfig} \ No newline at end of file
diff --git a/extraconfig/tasks/swift-ring-deploy.yaml b/extraconfig/tasks/swift-ring-deploy.yaml
deleted file mode 100644
index d17f78ae..00000000
--- a/extraconfig/tasks/swift-ring-deploy.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-heat_template_version: ocata
-
-parameters:
- servers:
- type: json
- SwiftRingGetTempurl:
- default: ''
- description: A temporary Swift URL to download rings from.
- type: string
-
-resources:
- SwiftRingDeployConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- inputs:
- - name: swift_ring_get_tempurl
- config: |
- #!/bin/sh
- pushd /
- curl --insecure --silent "${swift_ring_get_tempurl}" | tar xz || true
- popd
-
- SwiftRingDeploy:
- type: OS::Heat::SoftwareDeployments
- properties:
- name: SwiftRingDeploy
- config: {get_resource: SwiftRingDeployConfig}
- servers: {get_param: servers}
- input_values:
- swift_ring_get_tempurl: {get_param: SwiftRingGetTempurl}
diff --git a/extraconfig/tasks/swift-ring-update.yaml b/extraconfig/tasks/swift-ring-update.yaml
deleted file mode 100644
index 440c6883..00000000
--- a/extraconfig/tasks/swift-ring-update.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-heat_template_version: ocata
-
-parameters:
- servers:
- type: json
- SwiftRingPutTempurl:
- default: ''
- description: A temporary Swift URL to upload rings to.
- type: string
-
-resources:
- SwiftRingUpdateConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- inputs:
- - name: swift_ring_put_tempurl
- config: |
- #!/bin/sh
- TMP_DATA=$(mktemp -d)
- function cleanup {
- rm -Rf "$TMP_DATA"
- }
- trap cleanup EXIT
- # sanity check in case rings are not consistent within cluster
- swift-recon --md5 | grep -q "doesn't match" && exit 1
- pushd ${TMP_DATA}
- tar -cvzf swift-rings.tar.gz /etc/swift/*.builder /etc/swift/*.ring.gz /etc/swift/backups/*
- resp=`curl --insecure --silent -X PUT "${swift_ring_put_tempurl}" --write-out "%{http_code}" --data-binary @swift-rings.tar.gz`
- popd
- if [ "$resp" != "201" ]; then
- exit 1
- fi
-
- SwiftRingUpdate:
- type: OS::Heat::SoftwareDeployments
- properties:
- name: SwiftRingUpdate
- config: {get_resource: SwiftRingUpdateConfig}
- servers: {get_param: servers}
- input_values:
- swift_ring_put_tempurl: {get_param: SwiftRingPutTempurl}
diff --git a/extraconfig/tasks/tripleo_upgrade_node.sh b/extraconfig/tasks/tripleo_upgrade_node.sh
index 24211ab0..a5a312dc 100644
--- a/extraconfig/tasks/tripleo_upgrade_node.sh
+++ b/extraconfig/tasks/tripleo_upgrade_node.sh
@@ -28,12 +28,15 @@ SCRIPT_NAME=$(basename $0)
$(declare -f log_debug)
$(declare -f manage_systemd_service)
$(declare -f systemctl_swift)
+$(declare -f special_case_ovs_upgrade_if_needed)
# pin nova messaging +-1 for the nova-compute service
if [[ -n \$NOVA_COMPUTE ]]; then
crudini --set /etc/nova/nova.conf upgrade_levels compute auto
fi
+special_case_ovs_upgrade_if_needed
+
if [[ -n \$SWIFT_STORAGE ]]; then
systemctl_swift stop
fi
diff --git a/extraconfig/tasks/yum_update.sh b/extraconfig/tasks/yum_update.sh
index f8ab10c2..f7e2769b 100755
--- a/extraconfig/tasks/yum_update.sh
+++ b/extraconfig/tasks/yum_update.sh
@@ -40,14 +40,29 @@ touch "$timestamp_file"
command_arguments=${command_arguments:-}
-list_updates=$(yum list updates)
-
-if [[ "$list_updates" == "" ]]; then
+# yum check-update exits 100 if updates are available
+set +e
+check_update=$(yum check-update 2>&1)
+check_update_exit=$?
+set -e
+
+if [[ "$check_update_exit" == "1" ]]; then
+ echo "Failed to check for package updates"
+ echo "$check_update"
+ exit 1
+elif [[ "$check_update_exit" != "100" ]]; then
echo "No packages require updating"
exit 0
fi
-pacemaker_status=$(systemctl is-active pacemaker || :)
+pacemaker_status=""
+if hiera -c /etc/puppet/hiera.yaml service_names | grep -q pacemaker; then
+ pacemaker_status=$(systemctl is-active pacemaker)
+fi
+
+# TODO: FIXME: remove this in Pike.
+# Hack around mod_ssl update and puppet https://bugs.launchpad.net/tripleo/+bug/1682448
+touch /etc/httpd/conf.d/ssl.conf
# Fix the redis/rabbit resource start/stop timeouts. See https://bugs.launchpad.net/tripleo/+bug/1633455
# and https://bugs.launchpad.net/tripleo/+bug/1634851
@@ -67,6 +82,9 @@ if [[ "$pacemaker_status" == "active" && \
fi
fi
+# special case https://bugs.launchpad.net/tripleo/+bug/1635205 +bug/1669714
+special_case_ovs_upgrade_if_needed
+
if [[ "$pacemaker_status" == "active" ]] ; then
echo "Pacemaker running, stopping cluster node and doing full package update"
node_count=$(pcs status xml | grep -o "<nodes_configured.*/>" | grep -o 'number="[0-9]*"' | grep -o "[0-9]*")
@@ -94,17 +112,6 @@ return_code=$?
echo "$result"
echo "yum return code: $return_code"
-# Writes any changes caused by alterations to os-net-config and bounces the
-# interfaces *before* restarting the cluster.
-os-net-config -c /etc/os-net-config/config.json -v --detailed-exit-codes
-RETVAL=$?
-if [[ $RETVAL == 2 ]]; then
- echo "os-net-config: interface configuration files updated successfully"
-elif [[ $RETVAL != 0 ]]; then
- echo "ERROR: os-net-config configuration failed"
- exit $RETVAL
-fi
-
if [[ "$pacemaker_status" == "active" ]] ; then
echo "Starting cluster node"
pcs cluster start
@@ -121,15 +128,19 @@ if [[ "$pacemaker_status" == "active" ]] ; then
fi
done
- tstart=$(date +%s)
- while ! clustercheck; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > galera_sync_timeout )) ; then
- echo "ERROR galera sync timed out"
- exit 1
- fi
- done
+ RETVAL=$( pcs resource show galera-master | grep wsrep_cluster_address | grep -q `crm_node -n` ; echo $? )
+
+ if [[ $RETVAL -eq 0 && -e /etc/sysconfig/clustercheck ]]; then
+ tstart=$(date +%s)
+ while ! clustercheck; do
+ sleep 5
+ tnow=$(date +%s)
+ if (( tnow-tstart > galera_sync_timeout )) ; then
+ echo "ERROR galera sync timed out"
+ exit 1
+ fi
+ done
+ fi
echo "Waiting for pacemaker cluster to settle"
if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
diff --git a/net-config-linux-bridge.yaml b/net-config-linux-bridge.yaml
index 04664818..a544d547 100644
--- a/net-config-linux-bridge.yaml
+++ b/net-config-linux-bridge.yaml
@@ -33,7 +33,7 @@ parameters:
ControlPlaneDefaultRoute: # Override this via parameter_defaults
description: The default route of the control plane network.
type: string
- default: 192.0.2.1
+ default: 192.168.24.1
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml
index 8e86adf1..55bc2ece 100644
--- a/overcloud-resource-registry-puppet.j2.yaml
+++ b/overcloud-resource-registry-puppet.j2.yaml
@@ -5,15 +5,14 @@ resource_registry:
OS::TripleO::PostUpgradeSteps: puppet/post-upgrade.yaml
OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml
OS::TripleO::Hosts::SoftwareConfig: hosts-config.yaml
+ OS::TripleO::Ssh::HostPubKey: extraconfig/tasks/ssh/host_public_key.yaml
+ OS::TripleO::Ssh::KnownHostsConfig: extraconfig/tasks/ssh/known_hosts_config.yaml
OS::TripleO::DefaultPasswords: default_passwords.yaml
# Tasks (for internal TripleO usage)
OS::TripleO::Tasks::UpdateWorkflow: OS::Heat::None
OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml
- OS::TripleO::Tasks::SwiftRingDeploy: extraconfig/tasks/swift-ring-deploy.yaml
- OS::TripleO::Tasks::SwiftRingUpdate: extraconfig/tasks/swift-ring-update.yaml
-
{% for role in roles %}
OS::TripleO::{{role.name}}::PreNetworkConfig: OS::Heat::None
OS::TripleO::{{role.name}}PostDeploySteps: puppet/post.yaml
@@ -69,8 +68,10 @@ resource_registry:
OS::TripleO::NodeExtraConfig: puppet/extraconfig/pre_deploy/default.yaml
OS::TripleO::NodeExtraConfigPost: extraconfig/post_deploy/default.yaml
- OS::TripleO::Tasks::ControllerPrePuppet: OS::Heat::None
- OS::TripleO::Tasks::ControllerPostPuppet: OS::Heat::None
+{% for role in roles %}
+ OS::TripleO::Tasks::{{role.name}}PrePuppet: OS::Heat::None
+ OS::TripleO::Tasks::{{role.name}}PostPuppet: OS::Heat::None
+{% endfor %}
# "AllNodes" Extra cluster config, runs on all nodes prior to the post_deploy
# phase, e.g when puppet is applied, but after the pre_deploy phase. Useful when
@@ -174,7 +175,7 @@ resource_registry:
OS::TripleO::Services::Memcached: puppet/services/memcached.yaml
OS::TripleO::Services::SaharaApi: OS::Heat::None
OS::TripleO::Services::SaharaEngine: OS::Heat::None
- OS::TripleO::Services::Sshd: OS::Heat::None
+ OS::TripleO::Services::Sshd: puppet/services/sshd.yaml
OS::TripleO::Services::Redis: puppet/services/database/redis.yaml
OS::TripleO::Services::NovaConductor: puppet/services/nova-conductor.yaml
OS::TripleO::Services::MongoDb: puppet/services/database/mongodb.yaml
@@ -237,6 +238,10 @@ resource_registry:
OS::TripleO::Services::Zaqar: OS::Heat::None
OS::TripleO::Services::NeutronML2FujitsuCfab: OS::Heat::None
OS::TripleO::Services::NeutronML2FujitsuFossw: OS::Heat::None
+ OS::TripleO::Services::CinderBackendDellPs: OS::Heat::None
+ OS::TripleO::Services::CinderBackendDellSc: OS::Heat::None
+ OS::TripleO::Services::CinderBackendNetApp: OS::Heat::None
+ OS::TripleO::Services::CinderBackendScaleIO: OS::Heat::None
OS::TripleO::Services::CinderHPELeftHandISCSI: OS::Heat::None
OS::TripleO::Services::Etcd: OS::Heat::None
OS::TripleO::Services::Ec2Api: OS::Heat::None
diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml
index bbb5bae4..a36e4078 100644
--- a/overcloud.j2.yaml
+++ b/overcloud.j2.yaml
@@ -249,6 +249,16 @@ resources:
type: json
value: {get_attr: [EndpointMap, endpoint_map]}
+ SshKnownHostsConfig:
+ type: OS::TripleO::Ssh::KnownHostsConfig
+ properties:
+ known_hosts:
+ list_join:
+ - ''
+ {% for role in roles %}
+ - {get_attr: [{{role.name}}, known_hosts_entry]}
+ {% endfor %}
+
# Jinja loop for Role in roles_data.yaml
{% for role in roles %}
# Resources generated for {{role.name}} Role
@@ -268,6 +278,13 @@ resources:
config: {get_attr: [hostsConfig, config_id]}
servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+ {{role.name}}SshKnownHostsDeployment:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ name: {{role.name}}SshKnownHostsDeployment
+ config: {get_resource: SshKnownHostsConfig}
+ servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+
{{role.name}}AllNodesDeployment:
type: OS::Heat::StructuredDeployments
depends_on:
@@ -567,12 +584,24 @@ resources:
PingTestIps:
list_join:
- ' '
- - - {get_attr: [{{primary_role_name}}, resource.0.external_ip_address]}
- - {get_attr: [{{primary_role_name}}, resource.0.internal_api_ip_address]}
- - {get_attr: [{{primary_role_name}}, resource.0.storage_ip_address]}
- - {get_attr: [{{primary_role_name}}, resource.0.storage_mgmt_ip_address]}
- - {get_attr: [{{primary_role_name}}, resource.0.tenant_ip_address]}
- - {get_attr: [{{primary_role_name}}, resource.0.management_ip_address]}
+ - - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{primary_role_name}}, external_ip_address]}
+ - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{primary_role_name}}, internal_api_ip_address]}
+ - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{primary_role_name}}, storage_ip_address]}
+ - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{primary_role_name}}, storage_mgmt_ip_address]}
+ - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{primary_role_name}}, tenant_ip_address]}
+ - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{primary_role_name}}, management_ip_address]}
UpdateWorkflow:
type: OS::TripleO::Tasks::UpdateWorkflow
diff --git a/puppet/blockstorage-role.yaml b/puppet/blockstorage-role.yaml
index 51f9abac..16fb4b90 100644
--- a/puppet/blockstorage-role.yaml
+++ b/puppet/blockstorage-role.yaml
@@ -457,6 +457,12 @@ resources:
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: BlockStorageDeployment
+ properties:
+ server: {get_resource: BlockStorage}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
@@ -504,6 +510,37 @@ outputs:
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, BlockStorageHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [BlockStorage, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for the block storage server
value:
diff --git a/puppet/cephstorage-role.yaml b/puppet/cephstorage-role.yaml
index d7d7f478..4b022452 100644
--- a/puppet/cephstorage-role.yaml
+++ b/puppet/cephstorage-role.yaml
@@ -468,6 +468,12 @@ resources:
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: CephStorageDeployment
+ properties:
+ server: {get_resource: CephStorage}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
@@ -515,6 +521,37 @@ outputs:
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephStorageHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [CephStorage, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for the ceph storage server
value:
diff --git a/puppet/compute-role.yaml b/puppet/compute-role.yaml
index ebdd762d..37331f37 100644
--- a/puppet/compute-role.yaml
+++ b/puppet/compute-role.yaml
@@ -492,6 +492,12 @@ resources:
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: NovaComputeDeployment
+ properties:
+ server: {get_resource: NovaCompute}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
@@ -559,7 +565,38 @@ outputs:
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ComputeHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [NovaCompute, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for the Nova compute server
value:
- {get_resource: NovaCompute}
+ {get_resource: NovaCompute} \ No newline at end of file
diff --git a/puppet/controller-role.yaml b/puppet/controller-role.yaml
index 2f4f583c..68623e2d 100644
--- a/puppet/controller-role.yaml
+++ b/puppet/controller-role.yaml
@@ -467,7 +467,6 @@ resources:
- all_nodes # provided by allNodesConfig
- vip_data # provided by allNodesConfig
- '"%{::osfamily}"'
- - cinder_netapp_data # Optionally provided by ControllerExtraConfigPre
- neutron_bigswitch_data # Optionally provided by ControllerExtraConfigPre
- neutron_cisco_data # Optionally provided by ControllerExtraConfigPre
- cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre
@@ -532,6 +531,12 @@ resources:
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: ControllerDeployment
+ properties:
+ server: {get_resource: Controller}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
@@ -599,6 +604,37 @@ outputs:
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [Controller, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [Controller, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [Controller, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for the Nova compute server
value:
diff --git a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
index 533c0ee9..e3f4cce6 100644
--- a/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
+++ b/puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
@@ -27,6 +27,15 @@ resources:
mapped_data:
neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
+ # NOTE(aschultz): required for the puppet module but we don't
+ # actually want them defined on the compute nodes so we're
+ # relying on the puppet module's handling of <SERVICE DEFAULT>
+ # to just not set these but still accept that they were defined.
+ # This will should be fixed in puppet-neutron and removed here,
+ # but for backportability, we need to define something.
+ neutron::plugins::ml2::bigswitch::restproxy::servers: '<SERVICE DEFAULT>'
+ neutron::plugins::ml2::bigswitch::restproxy::server_auth: '<SERVICE DEFAULT>'
+
NeutronBigswitchDeployment:
type: OS::Heat::StructuredDeployment
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
deleted file mode 100644
index 378f7f98..00000000
--- a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
+++ /dev/null
@@ -1,157 +0,0 @@
-heat_template_version: ocata
-
-description: Configure hieradata for Cinder Netapp configuration
-
-parameters:
- server:
- description: ID of the controller node to apply this config to
- type: string
-
- # Config specific parameters, to be provided via parameter_defaults
- CinderEnableNetappBackend:
- type: boolean
- default: true
- CinderNetappBackendName:
- type: string
- default: 'tripleo_netapp'
- CinderNetappLogin:
- type: string
- CinderNetappPassword:
- type: string
- hidden: true
- CinderNetappServerHostname:
- type: string
- CinderNetappServerPort:
- type: string
- default: '80'
- CinderNetappSizeMultiplier:
- type: string
- default: '1.2'
- CinderNetappStorageFamily:
- type: string
- default: 'ontap_cluster'
- CinderNetappStorageProtocol:
- type: string
- default: 'nfs'
- CinderNetappTransportType:
- type: string
- default: 'http'
- CinderNetappVfiler:
- type: string
- default: ''
- CinderNetappVolumeList:
- type: string
- default: ''
- CinderNetappVserver:
- type: string
- default: ''
- CinderNetappPartnerBackendName:
- type: string
- default: ''
- CinderNetappNfsShares:
- type: string
- default: ''
- CinderNetappNfsSharesConfig:
- type: string
- default: '/etc/cinder/shares.conf'
- CinderNetappNfsMountOptions:
- type: string
- default: ''
- CinderNetappCopyOffloadToolPath:
- type: string
- default: ''
- CinderNetappControllerIps:
- type: string
- default: ''
- CinderNetappSaPassword:
- type: string
- default: ''
- hidden: true
- CinderNetappStoragePools:
- type: string
- default: ''
- CinderNetappHostType:
- type: string
- default: ''
- CinderNetappWebservicePath:
- type: string
- default: '/devmgr/v2'
- # DEPRECATED options for compatibility with older versions
- CinderNetappEseriesHostType:
- type: string
- default: 'linux_dm_mp'
-
-parameter_groups:
-- label: deprecated
- description: Do not use deprecated params, they will be removed.
- parameters:
- - CinderNetappEseriesHostType
-
-resources:
- CinderNetappConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: hiera
- config:
- datafiles:
- cinder_netapp_data:
- mapped_data:
- tripleo::profile::base::cinder::volume::cinder_enable_netapp_backend: {get_input: EnableNetappBackend}
- cinder::backend::netapp::title: {get_input: NetappBackendName}
- cinder::backend::netapp::netapp_login: {get_input: NetappLogin}
- cinder::backend::netapp::netapp_password: {get_input: NetappPassword}
- cinder::backend::netapp::netapp_server_hostname: {get_input: NetappServerHostname}
- cinder::backend::netapp::netapp_server_port: {get_input: NetappServerPort}
- cinder::backend::netapp::netapp_size_multiplier: {get_input: NetappSizeMultiplier}
- cinder::backend::netapp::netapp_storage_family: {get_input: NetappStorageFamily}
- cinder::backend::netapp::netapp_storage_protocol: {get_input: NetappStorageProtocol}
- cinder::backend::netapp::netapp_transport_type: {get_input: NetappTransportType}
- cinder::backend::netapp::netapp_vfiler: {get_input: NetappVfiler}
- cinder::backend::netapp::netapp_volume_list: {get_input: NetappVolumeList}
- cinder::backend::netapp::netapp_vserver: {get_input: NetappVserver}
- cinder::backend::netapp::netapp_partner_backend_name: {get_input: NetappPartnerBackendName}
- cinder::backend::netapp::nfs_shares: {get_input: NetappNfsShares}
- cinder::backend::netapp::nfs_shares_config: {get_input: NetappNfsSharesConfig}
- cinder::backend::netapp::nfs_mount_options: {get_input: NetappNfsMountOptions}
- cinder::backend::netapp::netapp_copyoffload_tool_path: {get_input: NetappCopyOffloadToolPath}
- cinder::backend::netapp::netapp_controller_ips: {get_input: NetappControllerIps}
- cinder::backend::netapp::netapp_sa_password: {get_input: NetappSaPassword}
- cinder::backend::netapp::netapp_storage_pools: {get_input: NetappStoragePools}
- cinder::backend::netapp::netapp_host_type: {get_input: NetappHostType}
- cinder::backend::netapp::netapp_webservice_path: {get_input: NetappWebservicePath}
-
- CinderNetappDeployment:
- type: OS::Heat::StructuredDeployment
- properties:
- name: CinderNetappDeployment
- config: {get_resource: CinderNetappConfig}
- server: {get_param: server}
- input_values:
- EnableNetappBackend: {get_param: CinderEnableNetappBackend}
- NetappBackendName: {get_param: CinderNetappBackendName}
- NetappLogin: {get_param: CinderNetappLogin}
- NetappPassword: {get_param: CinderNetappPassword}
- NetappServerHostname: {get_param: CinderNetappServerHostname}
- NetappServerPort: {get_param: CinderNetappServerPort}
- NetappSizeMultiplier: {get_param: CinderNetappSizeMultiplier}
- NetappStorageFamily: {get_param: CinderNetappStorageFamily}
- NetappStorageProtocol: {get_param: CinderNetappStorageProtocol}
- NetappTransportType: {get_param: CinderNetappTransportType}
- NetappVfiler: {get_param: CinderNetappVfiler}
- NetappVolumeList: {get_param: CinderNetappVolumeList}
- NetappVserver: {get_param: CinderNetappVserver}
- NetappPartnerBackendName: {get_param: CinderNetappPartnerBackendName}
- NetappNfsShares: {get_param: CinderNetappNfsShares}
- NetappNfsSharesConfig: {get_param: CinderNetappNfsSharesConfig}
- NetappNfsMountOptions: {get_param: CinderNetappNfsMountOptions}
- NetappCopyOffloadToolPath: {get_param: CinderNetappCopyOffloadToolPath}
- NetappControllerIps: {get_param: CinderNetappControllerIps}
- NetappSaPassword: {get_param: CinderNetappSaPassword}
- NetappStoragePools: {get_param: CinderNetappStoragePools}
- NetappHostType: {get_param: CinderNetappHostType}
- NetappWebservicePath: {get_param: CinderNetappWebservicePath}
-
-outputs:
- deploy_stdout:
- description: Deployment reference, used to trigger puppet apply on changes
- value: {get_attr: [CinderNetappDeployment, deploy_stdout]}
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
index 1456337f..e7d0b830 100644
--- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
@@ -6,6 +6,14 @@ parameters:
server:
description: ID of the controller node to apply this config to
type: string
+ NeutronBigswitchAgentEnabled:
+ description: The state of the neutron-bsn-agent service.
+ type: boolean
+ default: true
+ NeutronBigswitchLLDPEnabled:
+ description: The state of the neutron-bsn-lldp service.
+ type: boolean
+ default: false
NeutronBigswitchRestproxyServers:
description: 'Big Switch controllers ("IP:port,IP:port")'
type: string
@@ -43,6 +51,8 @@ resources:
datafiles:
neutron_bigswitch_data:
mapped_data:
+ neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
+ neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers}
neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth}
neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure}
@@ -58,6 +68,8 @@ resources:
config: {get_resource: NeutronBigswitchConfig}
server: {get_param: server}
input_values:
+ neutron_enable_bigswitch_agent: {get_param: NeutronBigswitchAgentEnabled}
+ neutron_enable_bigswitch_lldp: {get_param: NeutronBigswitchLLDPEnabled}
restproxy_servers: {get_param: NeutronBigswitchRestproxyServers}
restproxy_server_auth: {get_param: NeutronBigswitchRestproxyServerAuth }
restproxy_auto_sync_on_failure: {get_param: NeutronBigswitchRestproxyAutoSyncOnFailure}
diff --git a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
index bca6010a..40b407bc 100644
--- a/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
+++ b/puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
@@ -10,7 +10,7 @@ parameters:
# Config specific parameters, to be provided via parameter_defaults
N1000vVSMIP:
type: string
- default: '192.0.2.50'
+ default: '192.168.24.50'
N1000vVSMDomainID:
type: number
default: 100
@@ -62,7 +62,7 @@ parameters:
default: '255.255.255.0'
N1000vMgmtGatewayIP:
type: string
- default: '192.0.2.1'
+ default: '192.168.24.1'
N1000vPacemakerControl:
type: boolean
default: true
diff --git a/puppet/major_upgrade_steps.j2.yaml b/puppet/major_upgrade_steps.j2.yaml
index 6f2dd684..28092773 100644
--- a/puppet/major_upgrade_steps.j2.yaml
+++ b/puppet/major_upgrade_steps.j2.yaml
@@ -32,20 +32,6 @@ parameters:
type: string
hidden: true
-conditions:
- # Conditions to disable any steps where the task list is empty
-{%- for role in roles %}
- {{role.name}}UpgradeBatchConfigEnabled:
- not:
- equals:
- - {get_param: [role_data, {{role.name}}, upgrade_batch_tasks]}
- - []
- {{role.name}}UpgradeConfigEnabled:
- not:
- equals:
- - {get_param: [role_data, {{role.name}}, upgrade_tasks]}
- - []
-{%- endfor %}
resources:
@@ -65,18 +51,22 @@ resources:
- " crudini --set /etc/nova/nova.conf placement project_domain_name Default\n\n"
- " crudini --set /etc/nova/nova.conf placement user_domain_name Default\n\n"
- " crudini --set /etc/nova/nova.conf placement project_name service\n\n"
- - " systemctl restart openstack-nova-compute\n\n"
- - "fi\n\n"
+ - " crudini --set /etc/nova/nova.conf placement os_interface internal\n\n"
- str_replace:
template: |
crudini --set /etc/nova/nova.conf placement password 'SERVICE_PASSWORD'
- crudini --set /etc/nova/nova.conf placement region_name 'REGION_NAME'
+ crudini --set /etc/nova/nova.conf placement os_region_name 'REGION_NAME'
crudini --set /etc/nova/nova.conf placement auth_url 'AUTH_URL'
- ROLE='ROLE_NAME'
params:
SERVICE_PASSWORD: { get_param: NovaPassword }
REGION_NAME: { get_param: KeystoneRegion }
AUTH_URL: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ - " systemctl restart openstack-nova-compute\n\n"
+ - "fi\n\n"
+ - str_replace:
+ template: |
+ ROLE='ROLE_NAME'
+ params:
ROLE_NAME: {{role.name}}
- get_file: ../extraconfig/tasks/pacemaker_common_functions.sh
- get_file: ../extraconfig/tasks/run_puppet.sh
@@ -100,12 +90,11 @@ resources:
{{role.name}}UpgradeBatchConfig_Step{{step}}:
type: OS::TripleO::UpgradeConfig
{%- if step > 0 %}
- condition: {{role.name}}UpgradeBatchConfigEnabled
- {% if role.name in enabled_roles %}
+ {%- if role in enabled_roles %}
depends_on:
- {{role.name}}UpgradeBatch_Step{{step -1}}
{%- endif %}
- {% else %}
+ {%- else %}
{% for role in roles if role.disable_upgrade_deployment|default(false) %}
{% if deliver_script.update({'deliver': True}) %} {% endif %}
{% endfor %}
@@ -125,13 +114,11 @@ resources:
{%- for role in enabled_roles %}
{{role.name}}UpgradeBatch_Step{{step}}:
type: OS::Heat::SoftwareDeploymentGroup
- condition: {{role.name}}UpgradeBatchConfigEnabled
{%- if step > 0 %}
depends_on:
- - {{role.name}}UpgradeBatch_Step{{step -1}}
- {% else %}
- depends_on:
- - {{role.name}}UpgradeBatchConfig_Step{{step}}
+ {%- for role_inside in enabled_roles %}
+ - {{role_inside.name}}UpgradeBatch_Step{{step -1}}
+ {%- endfor %}
{%- endif %}
update_policy:
batch_create:
@@ -185,11 +172,10 @@ resources:
# do, and there should be minimal performance hit (creating the
# config is cheap compared to the time to apply the deployment).
{%- if step > 0 %}
- condition: {{role.name}}UpgradeConfigEnabled
- {% if role.name in enabled_roles %}
+ {%- if role in enabled_roles %}
depends_on:
- {{role.name}}Upgrade_Step{{step -1}}
- {% endif %}
+ {%- endif %}
{%- endif %}
properties:
UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_tasks]}
@@ -201,9 +187,18 @@ resources:
{{role.name}}Upgrade_Step{{step}}:
type: OS::Heat::SoftwareDeploymentGroup
{%- if step > 0 %}
- condition: {{role.name}}UpgradeConfigEnabled
+ # Make sure we wait that all roles have finished their own
+ # previous step before going to the next, so we can guarantee
+ # state for each steps.
depends_on:
- - {{role.name}}Upgrade_Step{{step -1}}
+ {%- for role_inside in enabled_roles %}
+ - {{role_inside.name}}Upgrade_Step{{step -1}}
+ {%- endfor %}
+ {%- else %}
+ depends_on:
+ {%- for role_inside in enabled_roles %}
+ - {{role_inside.name}}UpgradeBatch_Step{{batch_upgrade_steps_max -1}}
+ {%- endfor %}
{%- endif %}
properties:
name: {{role.name}}Upgrade_Step{{step}}
diff --git a/puppet/objectstorage-role.yaml b/puppet/objectstorage-role.yaml
index 6ee06d78..a329d13f 100644
--- a/puppet/objectstorage-role.yaml
+++ b/puppet/objectstorage-role.yaml
@@ -455,6 +455,12 @@ resources:
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: SwiftStorageHieraDeploy
+ properties:
+ server: {get_resource: SwiftStorage}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
@@ -502,6 +508,37 @@ outputs:
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ObjectStorageHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [SwiftStorage, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for the swift storage server
value:
diff --git a/puppet/puppet-steps.j2 b/puppet/puppet-steps.j2
index 4eca2333..ed362e4e 100644
--- a/puppet/puppet-steps.j2
+++ b/puppet/puppet-steps.j2
@@ -23,27 +23,20 @@
properties:
StepConfig: {get_param: [role_data, {{role.name}}, step_config]}
- {% if role.name == 'Controller' %}
- ControllerPrePuppet:
- type: OS::TripleO::Tasks::ControllerPrePuppet
+ {{role.name}}PrePuppet:
+ type: OS::TripleO::Tasks::{{role.name}}PrePuppet
properties:
- servers: {get_param: [servers, Controller]}
+ servers: {get_param: [servers, {{role.name}}]}
input_values:
update_identifier: {get_param: DeployIdentifier}
- {% endif %}
-
- {% if role.name in ['Controller', 'ObjectStorage'] %}
- {{role.name}}SwiftRingDeploy:
- type: OS::TripleO::Tasks::SwiftRingDeploy
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- {% endif %}
# Step through a series of configuration steps
{% for step in range(1, 6) %}
{{role.name}}Deployment_Step{{step}}:
type: OS::Heat::StructuredDeploymentGroup
- {% if step == 1 %}
+ {% if step == 1 and role.name == 'Controller' %}
+ depends_on: [ControllerPrePuppet, {{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
+ {% elif step == 1 and role.name != 'Controller' %}
depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
{% else %}
depends_on:
@@ -82,25 +75,12 @@
properties:
servers: {get_param: [servers, {{role.name}}]}
- {% if role.name == 'Controller' %}
- ControllerPostPuppet:
+ {{role.name}}PostPuppet:
depends_on:
- - ControllerExtraConfigPost
- type: OS::TripleO::Tasks::ControllerPostPuppet
+ - {{role.name}}ExtraConfigPost
+ type: OS::TripleO::Tasks::{{role.name}}PostPuppet
properties:
- servers: {get_param: [servers, Controller]}
+ servers: {get_param: [servers, {{role.name}}]}
input_values:
update_identifier: {get_param: DeployIdentifier}
- {% endif %}
-
- {% if role.name in ['Controller', 'ObjectStorage'] %}
- {{role.name}}SwiftRingUpdate:
- type: OS::TripleO::Tasks::SwiftRingUpdate
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step5
- {% endfor %}
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- {% endif %}
{% endfor %}
diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml
index 1f68f41f..8f1f3142 100644
--- a/puppet/role.role.j2.yaml
+++ b/puppet/role.role.j2.yaml
@@ -489,6 +489,12 @@ resources:
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: {{role}}Deployment
+ properties:
+ server: {get_resource: {{role}}}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
@@ -536,6 +542,37 @@ outputs:
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, {{role}}HostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [{{role}}, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for {{role}} server
value:
diff --git a/puppet/services/ceilometer-base.yaml b/puppet/services/ceilometer-base.yaml
index 874c6893..7dfcacd1 100644
--- a/puppet/services/ceilometer-base.yaml
+++ b/puppet/services/ceilometer-base.yaml
@@ -37,7 +37,7 @@ parameters:
constraints:
- allowed_values: ['gnocchi', 'database']
CeilometerEventDispatcher:
- default: ['gnocchi']
+ default: ['panko', 'gnocchi']
description: Comma-separated list of Dispatchers to process events data
type: comma_delimited_list
constraints:
@@ -76,6 +76,11 @@ parameters:
default: 5672
description: Set rabbit subscriber port, change this if using SSL
type: number
+ CeilometerApiEndpoint:
+ default: true
+ description: Whether to create or skip API endpoint. Set this to
+ false, if you choose to disable Ceilometer API service.
+ type: boolean
outputs:
role_data:
@@ -123,12 +128,14 @@ outputs:
ceilometer::telemetry_secret: {get_param: CeilometerMeteringSecret}
service_config_settings:
keystone:
+ ceilometer_auth_enabled: true
ceilometer::keystone::auth::public_url: {get_param: [EndpointMap, CeilometerPublic, uri]}
ceilometer::keystone::auth::internal_url: {get_param: [EndpointMap, CeilometerInternal, uri]}
ceilometer::keystone::auth::admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]}
ceilometer::keystone::auth::password: {get_param: CeilometerPassword}
ceilometer::keystone::auth::region: {get_param: KeystoneRegion}
ceilometer::keystone::auth::tenant: 'service'
+ ceilometer::keystone::auth::configure_endpoint: {get_param: CeilometerApiEndpoint}
mysql:
ceilometer::db::mysql::password: {get_param: CeilometerPassword}
ceilometer::db::mysql::user: ceilometer
diff --git a/puppet/services/ceph-rgw.yaml b/puppet/services/ceph-rgw.yaml
index 01531971..49856115 100644
--- a/puppet/services/ceph-rgw.yaml
+++ b/puppet/services/ceph-rgw.yaml
@@ -73,7 +73,7 @@ outputs:
ceph::rgw::keystone::auth::internal_url: {get_param: [EndpointMap, CephRgwInternal, uri]}
ceph::rgw::keystone::auth::admin_url: {get_param: [EndpointMap, CephRgwAdmin, uri]}
ceph::rgw::keystone::auth::region: {get_param: KeystoneRegion}
- ceph::rgw::keystone::auth::roles: [ 'admin', 'member', '_member_' ]
+ ceph::rgw::keystone::auth::roles: [ 'admin', 'Member', '_member_' ]
ceph::rgw::keystone::auth::tenant: service
ceph::rgw::keystone::auth::user: swift
ceph::rgw::keystone::auth::password: {get_param: SwiftPassword}
diff --git a/puppet/services/cinder-backend-netapp.yaml b/puppet/services/cinder-backend-netapp.yaml
new file mode 100644
index 00000000..29a0ce1b
--- /dev/null
+++ b/puppet/services/cinder-backend-netapp.yaml
@@ -0,0 +1,129 @@
+heat_template_version: ocata
+
+description: Openstack Cinder Netapp backend
+
+parameters:
+ CinderEnableNetappBackend:
+ type: boolean
+ default: true
+ CinderNetappBackendName:
+ type: string
+ default: 'tripleo_netapp'
+ CinderNetappLogin:
+ type: string
+ CinderNetappPassword:
+ type: string
+ hidden: true
+ CinderNetappServerHostname:
+ type: string
+ CinderNetappServerPort:
+ type: string
+ default: '80'
+ CinderNetappSizeMultiplier:
+ type: string
+ default: '1.2'
+ CinderNetappStorageFamily:
+ type: string
+ default: 'ontap_cluster'
+ CinderNetappStorageProtocol:
+ type: string
+ default: 'nfs'
+ CinderNetappTransportType:
+ type: string
+ default: 'http'
+ CinderNetappVfiler:
+ type: string
+ default: ''
+ CinderNetappVolumeList:
+ type: string
+ default: ''
+ CinderNetappVserver:
+ type: string
+ default: ''
+ CinderNetappPartnerBackendName:
+ type: string
+ default: ''
+ CinderNetappNfsShares:
+ type: string
+ default: ''
+ CinderNetappNfsSharesConfig:
+ type: string
+ default: '/etc/cinder/shares.conf'
+ CinderNetappNfsMountOptions:
+ type: string
+ default: ''
+ CinderNetappCopyOffloadToolPath:
+ type: string
+ default: ''
+ CinderNetappControllerIps:
+ type: string
+ default: ''
+ CinderNetappSaPassword:
+ type: string
+ default: ''
+ hidden: true
+ CinderNetappStoragePools:
+ type: string
+ default: ''
+ CinderNetappHostType:
+ type: string
+ default: ''
+ CinderNetappWebservicePath:
+ type: string
+ default: '/devmgr/v2'
+ # DEPRECATED options for compatibility with older versions
+ CinderNetappEseriesHostType:
+ type: string
+ default: 'linux_dm_mp'
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ type: json
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+
+parameter_groups:
+- label: deprecated
+ description: Do not use deprecated params, they will be removed.
+ parameters:
+ - CinderNetappEseriesHostType
+
+outputs:
+ role_data:
+ description: Role data for the Cinder NetApp backend.
+ value:
+ service_name: cinder_backend_netapp
+ config_settings:
+ tripleo::profile::base::cinder::volume::cinder_enable_netapp_backend: {get_param: CinderEnableNetappBackend}
+ cinder::backend::netapp::title: {get_param: CinderNetappBackendName}
+ cinder::backend::netapp::netapp_login: {get_param: CinderNetappLogin}
+ cinder::backend::netapp::netapp_password: {get_param: CinderNetappPassword}
+ cinder::backend::netapp::netapp_server_hostname: {get_param: CinderNetappServerHostname}
+ cinder::backend::netapp::netapp_server_port: {get_param: CinderNetappServerPort}
+ cinder::backend::netapp::netapp_size_multiplier: {get_param: CinderNetappSizeMultiplier}
+ cinder::backend::netapp::netapp_storage_family: {get_param: CinderNetappStorageFamily}
+ cinder::backend::netapp::netapp_storage_protocol: {get_param: CinderNetappStorageProtocol}
+ cinder::backend::netapp::netapp_transport_type: {get_param: CinderNetappTransportType}
+ cinder::backend::netapp::netapp_vfiler: {get_param: CinderNetappVfiler}
+ cinder::backend::netapp::netapp_volume_list: {get_param: CinderNetappVolumeList}
+ cinder::backend::netapp::netapp_vserver: {get_param: CinderNetappVserver}
+ cinder::backend::netapp::netapp_partner_backend_name: {get_param: CinderNetappPartnerBackendName}
+ cinder::backend::netapp::nfs_shares: {get_param: CinderNetappNfsShares}
+ cinder::backend::netapp::nfs_shares_config: {get_param: CinderNetappNfsSharesConfig}
+ cinder::backend::netapp::nfs_mount_options: {get_param: CinderNetappNfsMountOptions}
+ cinder::backend::netapp::netapp_copyoffload_tool_path: {get_param: CinderNetappCopyOffloadToolPath}
+ cinder::backend::netapp::netapp_controller_ips: {get_param: CinderNetappControllerIps}
+ cinder::backend::netapp::netapp_sa_password: {get_param: CinderNetappSaPassword}
+ cinder::backend::netapp::netapp_storage_pools: {get_param: CinderNetappStoragePools}
+ cinder::backend::netapp::netapp_host_type: {get_param: CinderNetappHostType}
+ cinder::backend::netapp::netapp_webservice_path: {get_param: CinderNetappWebservicePath}
+ step_config: |
+ include ::tripleo::profile::base::cinder::volume
diff --git a/puppet/services/cinder-backend-scaleio.yaml b/puppet/services/cinder-backend-scaleio.yaml
index eb709cd5..c4e4aa3d 100644
--- a/puppet/services/cinder-backend-scaleio.yaml
+++ b/puppet/services/cinder-backend-scaleio.yaml
@@ -106,6 +106,6 @@ outputs:
cinder::backend::scaleio::sio_round_volume_capacity: {get_param: CinderScaleIORoundVolumeCapacity}
cinder::backend::scaleio::sio_unmap_volume_before_deletion: {get_param: CinderScaleIOUnmapVolumeBeforeDeletion}
cinder::backend::scaleio::sio_max_over_subscription_ratio: {get_param: CinderScaleIOMaxOverSubscriptionRatio}
- cinder::backend::scaleio::sio_thin_provision: {get_param: CinderScaleIOThinProvision}
+ cinder::backend::scaleio::sio_thin_provision: {get_param: CinderScaleIOSanThinProvision}
step_config: |
include ::tripleo::profile::base::cinder::volume
diff --git a/puppet/services/cinder-volume.yaml b/puppet/services/cinder-volume.yaml
index b52955ef..26f1a96f 100644
--- a/puppet/services/cinder-volume.yaml
+++ b/puppet/services/cinder-volume.yaml
@@ -94,11 +94,7 @@ outputs:
tripleo::profile::base::cinder::volume::cinder_enable_nfs_backend: {get_param: CinderEnableNfsBackend}
tripleo::profile::base::cinder::volume::cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend}
tripleo::profile::base::cinder::volume::nfs::cinder_nfs_mount_options: {get_param: CinderNfsMountOptions}
- tripleo::profile::base::cinder::volume::nfs::cinder_nfs_servers:
- str_replace:
- template: SERVERS
- params:
- SERVERS: {get_param: CinderNfsServers}
+ tripleo::profile::base::cinder::volume::nfs::cinder_nfs_servers: {get_param: CinderNfsServers}
tripleo::profile::base::cinder::volume::iscsi::cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize}
tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_helper: {get_param: CinderISCSIHelper}
tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_protocol: {get_param: CinderISCSIProtocol}
diff --git a/puppet/services/congress.yaml b/puppet/services/congress.yaml
index 8bc9f2e3..1b7d698d 100644
--- a/puppet/services/congress.yaml
+++ b/puppet/services/congress.yaml
@@ -88,6 +88,7 @@ outputs:
service_config_settings:
keystone:
congress::keystone::auth::tenant: 'service'
+ congress::keystone::auth::region: {get_param: KeystoneRegion}
congress::keystone::auth::password: {get_param: CongressPassword}
congress::keystone::auth::public_url: {get_param: [EndpointMap, CongressPublic, uri]}
congress::keystone::auth::internal_url: {get_param: [EndpointMap, CongressInternal, uri]}
diff --git a/puppet/services/database/mongodb.yaml b/puppet/services/database/mongodb.yaml
index 63ec4446..50597216 100644
--- a/puppet/services/database/mongodb.yaml
+++ b/puppet/services/database/mongodb.yaml
@@ -19,6 +19,10 @@ parameters:
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MongodbMemoryLimit:
+ default: '20G'
+ description: Limit the amount of memory mongodb uses with systemd.
+ type: string
MongoDbLoggingSource:
type: json
description: Fluentd logging configuration for mongodb.
@@ -49,6 +53,7 @@ outputs:
map_merge:
- get_attr: [MongoDbBase, role_data, config_settings]
- tripleo::profile::base::database::mongodb::mongodb_replset: {get_attr: [MongoDbBase, aux_parameters, rplset_name]}
+ tripleo::profile::base::database::mongodb::memory_limit: {get_param: MongodbMemoryLimit}
mongodb::server::service_manage: True
tripleo.mongodb.firewall_rules:
'101 mongodb_config':
diff --git a/puppet/services/database/mysql.yaml b/puppet/services/database/mysql.yaml
index 808f1353..7078b60f 100644
--- a/puppet/services/database/mysql.yaml
+++ b/puppet/services/database/mysql.yaml
@@ -23,6 +23,10 @@ parameters:
description: Configures MySQL max_connections config setting
type: number
default: 4096
+ MysqlIncreaseFileLimit:
+ description: Flag to increase MySQL open-files-limit to 16384
+ type: boolean
+ default: true
MysqlRootPassword:
type: string
hidden: true
@@ -96,6 +100,8 @@ outputs:
$NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
tripleo::profile::base::database::mysql::client_bind_address:
{get_param: [ServiceNetMap, MysqlNetwork]}
+ tripleo::profile::base::database::mysql::generate_dropin_file_limit:
+ {get_param: MysqlIncreaseFileLimit}
step_config: |
include ::tripleo::profile::base::database::mysql
metadata_settings:
diff --git a/puppet/services/ec2-api.yaml b/puppet/services/ec2-api.yaml
index 70821396..f6cf13ff 100644
--- a/puppet/services/ec2-api.yaml
+++ b/puppet/services/ec2-api.yaml
@@ -91,6 +91,11 @@ outputs:
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/ec2_api'
- '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ ec2api::api::keystone_ec2_tokens_url:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, KeystoneV3Internal, uri]}
+ - '/ec2tokens'
-
if:
- nova_workers_zero
diff --git a/puppet/services/etcd.yaml b/puppet/services/etcd.yaml
index ec87a75a..d2a0e302 100644
--- a/puppet/services/etcd.yaml
+++ b/puppet/services/etcd.yaml
@@ -19,9 +19,9 @@ parameters:
via parameter_defaults in the resource registry.
type: json
EtcdInitialClusterToken:
- default: 'etcd-tripleo'
description: Initial cluster token for the etcd cluster during bootstrap.
type: string
+ hidden: true
MonitoringSubscriptionEtcd:
default: 'overcloud-etcd'
type: string
diff --git a/puppet/services/ironic-conductor.yaml b/puppet/services/ironic-conductor.yaml
index f9547bef..56e1a90b 100644
--- a/puppet/services/ironic-conductor.yaml
+++ b/puppet/services/ironic-conductor.yaml
@@ -44,6 +44,10 @@ parameters:
default: 8088
description: Port to use for serving images when iPXE is used.
type: string
+ IronicPassword:
+ description: The password for the Ironic service and db account, used by the Ironic services
+ type: string
+ hidden: true
MonitoringSubscriptionIronicConductor:
default: 'overcloud-ironic-conductor'
type: string
@@ -65,9 +69,7 @@ outputs:
config_settings:
map_merge:
- get_attr: [IronicBase, role_data, config_settings]
- # FIXME: I have no idea why neutron_url is in "api" manifest
- - ironic::api::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]}
- ironic::conductor::api_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]}
+ - ironic::conductor::api_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]}
ironic::conductor::cleaning_disk_erase: {get_param: IronicCleaningDiskErase}
ironic::conductor::cleaning_network: {get_param: IronicCleaningNetwork}
ironic::conductor::enabled_drivers: {get_param: IronicEnabledDrivers}
@@ -104,7 +106,40 @@ outputs:
# the VIP, but rather a real IP of the host.
ironic::my_ip: {get_param: [ServiceNetMap, IronicNetwork]}
ironic::pxe::common::http_port: {get_param: IronicIPXEPort}
-
+ # Credentials to access other services
+ ironic::glance::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::glance::username: 'ironic'
+ ironic::glance::password: {get_param: IronicPassword}
+ ironic::glance::project_name: 'service'
+ ironic::glance::user_domain_name: 'Default'
+ ironic::glance::project_domain_name: 'Default'
+ ironic::neutron::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::neutron::username: 'ironic'
+ ironic::neutron::password: {get_param: IronicPassword}
+ ironic::neutron::project_name: 'service'
+ ironic::neutron::user_domain_name: 'Default'
+ ironic::neutron::project_domain_name: 'Default'
+ ironic::service_catalog::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::service_catalog::username: 'ironic'
+ ironic::service_catalog::password: {get_param: IronicPassword}
+ ironic::service_catalog::project_name: 'service'
+ ironic::service_catalog::user_domain_name: 'Default'
+ ironic::service_catalog::project_domain_name: 'Default'
+ ironic::swift::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::swift::username: 'ironic'
+ ironic::swift::password: {get_param: IronicPassword}
+ ironic::swift::project_name: 'service'
+ ironic::swift::user_domain_name: 'Default'
+ ironic::swift::project_domain_name: 'Default'
+ # ironic-inspector support is not implemented, but let's configure
+ # the credentials for consistency.
+ ironic::drivers::inspector::enabled: false
+ ironic::drivers::inspector::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::drivers::inspector::username: 'ironic'
+ ironic::drivers::inspector::password: {get_param: IronicPassword}
+ ironic::drivers::inspector::project_name: 'service'
+ ironic::drivers::inspector::user_domain_name: 'Default'
+ ironic::drivers::inspector::project_domain_name: 'Default'
step_config: |
include ::tripleo::profile::base::ironic::conductor
upgrade_tasks:
diff --git a/puppet/services/keystone.yaml b/puppet/services/keystone.yaml
index a85a9ed0..0b9f10ca 100644
--- a/puppet/services/keystone.yaml
+++ b/puppet/services/keystone.yaml
@@ -119,27 +119,27 @@ parameters:
Cron to purge expired tokens - Ensure
default: 'present'
KeystoneCronTokenFlushMinute:
- type: string
+ type: comma_delimited_list
description: >
Cron to purge expired tokens - Minute
default: '1'
KeystoneCronTokenFlushHour:
- type: string
+ type: comma_delimited_list
description: >
Cron to purge expired tokens - Hour
- default: '0'
+ default: '*'
KeystoneCronTokenFlushMonthday:
- type: string
+ type: comma_delimited_list
description: >
Cron to purge expired tokens - Month Day
default: '*'
KeystoneCronTokenFlushMonth:
- type: string
+ type: comma_delimited_list
description: >
Cron to purge expired tokens - Month
default: '*'
KeystoneCronTokenFlushWeekday:
- type: string
+ type: comma_delimited_list
description: >
Cron to purge expired tokens - Week Day
default: '*'
@@ -158,6 +158,16 @@ parameters:
description: >
Cron to purge expired tokens - User
default: 'keystone'
+ KeystoneLDAPDomainEnable:
+ description: Trigger to call ldap_backend puppet keystone define.
+ type: boolean
+ default: False
+ KeystoneLDAPBackendConfigs:
+ description: Hash containing the configurations for the LDAP backends
+ configured in keystone.
+ type: json
+ default: {}
+ hidden: true
resources:
@@ -171,6 +181,7 @@ resources:
conditions:
keystone_fernet_tokens: {equals: [{get_param: KeystoneTokenProvider}, "fernet"]}
+ keystone_ldap_domain_enabled: {equals: [{get_param: KeystoneLDAPDomainEnable}, True]}
outputs:
role_data:
@@ -226,6 +237,7 @@ outputs:
keystone::endpoint::internal_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
keystone::endpoint::admin_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
keystone::endpoint::region: {get_param: KeystoneRegion}
+ keystone::endpoint::version: ''
keystone_enable_db_purge: {get_param: KeystoneEnableDBPurge}
keystone::rabbit_heartbeat_timeout_threshold: 60
keystone::cron::token_flush::maxdelay: 3600
@@ -292,6 +304,15 @@ outputs:
keystone::cron::token_flush::maxdelay: {get_param: KeystoneCronTokenFlushMaxDelay}
keystone::cron::token_flush::destination: {get_param: KeystoneCronTokenFlushDestination}
keystone::cron::token_flush::user: {get_param: KeystoneCronTokenFlushUser}
+ -
+ if:
+ - keystone_ldap_domain_enabled
+ -
+ tripleo::profile::base::keystone::ldap_backend_enable: True
+ keystone::using_domain_config: True
+ tripleo::profile::base::keystone::ldap_backends_config:
+ get_param: KeystoneLDAPBackendConfigs
+ - {}
step_config: |
include ::tripleo::profile::base::keystone
@@ -304,6 +325,13 @@ outputs:
keystone::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ horizon:
+ if:
+ - keystone_ldap_domain_enabled
+ -
+ horizon::keystone_multidomain_support: true
+ horizon::keystone_default_domain: 'Default'
+ - {}
# Ansible tasks to handle upgrade
upgrade_tasks:
- name: Stop keystone service (running under httpd)
diff --git a/puppet/services/metrics/collectd.yaml b/puppet/services/metrics/collectd.yaml
index 49b2d4c2..d2d9f3dc 100644
--- a/puppet/services/metrics/collectd.yaml
+++ b/puppet/services/metrics/collectd.yaml
@@ -70,7 +70,9 @@ parameters:
CollectdSecurityLevel:
type: string
description: >
- Security level setting for remote collectd connection.
+ Security level setting for remote collectd connection. If it is
+ set to Sign or Encrypt the CollectdPassword and CollectdUsername
+ parameters need to be set.
default: 'None'
constraints:
- allowed_values:
diff --git a/puppet/services/monitoring/sensu-client.yaml b/puppet/services/monitoring/sensu-client.yaml
index aba2b1ed..4b5f36ac 100644
--- a/puppet/services/monitoring/sensu-client.yaml
+++ b/puppet/services/monitoring/sensu-client.yaml
@@ -81,4 +81,4 @@ outputs:
- name: Install sensu package if it was disabled
tags: step3
yum: name=sensu state=latest
- when: sensu_client.rc != 0
+ when: sensu_client_enabled.rc != 0
diff --git a/puppet/services/network/contrail-vrouter.yaml b/puppet/services/network/contrail-vrouter.yaml
index db9f0836..0cd1f829 100644
--- a/puppet/services/network/contrail-vrouter.yaml
+++ b/puppet/services/network/contrail-vrouter.yaml
@@ -27,7 +27,7 @@ parameters:
description: vRouter physical interface
type: string
ContrailVrouterGateway:
- default: '192.0.2.1'
+ default: '192.168.24.1'
description: vRouter default gateway
type: string
ContrailVrouterNetmask:
diff --git a/puppet/services/neutron-base.yaml b/puppet/services/neutron-base.yaml
index 43657bd9..5ed54f31 100644
--- a/puppet/services/neutron-base.yaml
+++ b/puppet/services/neutron-base.yaml
@@ -24,7 +24,7 @@ parameters:
type: number
NeutronDhcpAgentsPerNetwork:
type: number
- default: 3
+ default: 0
description: The number of neutron dhcp agents to schedule per network
NeutronCorePlugin:
default: 'ml2'
@@ -44,10 +44,10 @@ parameters:
description: Set to True to enable debugging on all services.
EnableConfigPurge:
type: boolean
- default: true
+ default: false
description: >
- Remove configuration that is not generated by TripleO. Setting
- to false may result in configuration remnants after updates/upgrades.
+ Remove configuration that is not generated by TripleO. Used to avoid
+ configuration remnants after upgrades.
NeutronGlobalPhysnetMtu:
type: number
default: 1500
@@ -72,24 +72,31 @@ parameters:
via parameter_defaults in the resource registry.
type: json
+conditions:
+ dhcp_agents_zero: {equals : [{get_param: NeutronDhcpAgentsPerNetwork}, 0]}
+
outputs:
role_data:
description: Role data for the Neutron base service.
value:
service_name: neutron_base
config_settings:
- neutron::rabbit_password: {get_param: RabbitPassword}
- neutron::rabbit_user: {get_param: RabbitUserName}
- neutron::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
- neutron::rabbit_port: {get_param: RabbitClientPort}
- neutron::dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork}
- neutron::core_plugin: {get_param: NeutronCorePlugin}
- neutron::service_plugins: {get_param: NeutronServicePlugins}
- neutron::debug: {get_param: Debug}
- neutron::purge_config: {get_param: EnableConfigPurge}
- neutron::allow_overlapping_ips: true
- neutron::rabbit_heartbeat_timeout_threshold: 60
- neutron::host: '%{::fqdn}'
- neutron::db::database_db_max_retries: -1
- neutron::db::database_max_retries: -1
- neutron::global_physnet_mtu: {get_param: NeutronGlobalPhysnetMtu}
+ map_merge:
+ - neutron::rabbit_password: {get_param: RabbitPassword}
+ neutron::rabbit_user: {get_param: RabbitUserName}
+ neutron::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ neutron::rabbit_port: {get_param: RabbitClientPort}
+ neutron::core_plugin: {get_param: NeutronCorePlugin}
+ neutron::service_plugins: {get_param: NeutronServicePlugins}
+ neutron::debug: {get_param: Debug}
+ neutron::purge_config: {get_param: EnableConfigPurge}
+ neutron::allow_overlapping_ips: true
+ neutron::rabbit_heartbeat_timeout_threshold: 60
+ neutron::host: '%{::fqdn}'
+ neutron::db::database_db_max_retries: -1
+ neutron::db::database_max_retries: -1
+ neutron::global_physnet_mtu: {get_param: NeutronGlobalPhysnetMtu}
+ - if:
+ - dhcp_agents_zero
+ - {}
+ - tripleo::profile::base::neutron::dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork}
diff --git a/puppet/services/neutron-bigswitch-agent.yaml b/puppet/services/neutron-bigswitch-agent.yaml
new file mode 100644
index 00000000..845f0da0
--- /dev/null
+++ b/puppet/services/neutron-bigswitch-agent.yaml
@@ -0,0 +1,31 @@
+heat_template_version: ocata
+
+description: >
+ Installs bigswitch agent and enables the services
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+
+outputs:
+ role_data:
+ description: Configure the bigswitch agent services
+ value:
+ service_name: neutron_bigswitch_agent
+ step_config: |
+ if hiera('step') >= 4 {
+ include ::neutron::agents::bigswitch
+ }
diff --git a/puppet/services/neutron-ovs-agent.yaml b/puppet/services/neutron-ovs-agent.yaml
index 01471ba2..ef2485d4 100644
--- a/puppet/services/neutron-ovs-agent.yaml
+++ b/puppet/services/neutron-ovs-agent.yaml
@@ -82,6 +82,9 @@ resources:
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ OpenVswitchUpgrade:
+ type: ./openvswitch-upgrade.yaml
+
outputs:
role_data:
description: Role data for the Neutron OVS agent service.
@@ -121,16 +124,22 @@ outputs:
step_config: |
include ::tripleo::profile::base::neutron::ovs
upgrade_tasks:
- - name: Check if neutron_ovs_agent is deployed
- command: systemctl is-enabled neutron-openvswitch-agent
- tags: common
- ignore_errors: True
- register: neutron_ovs_agent_enabled
- - name: "PreUpgrade step0,validation: Check service neutron-openvswitch-agent is running"
- shell: /usr/bin/systemctl show 'neutron-openvswitch-agent' --property ActiveState | grep '\bactive\b'
- when: neutron_ovs_agent_enabled.rc == 0
- tags: step0,validation
- - name: Stop neutron_ovs_agent service
- tags: step1
- when: neutron_ovs_agent_enabled.rc == 0
- service: name=neutron-openvswitch-agent state=stopped
+ yaql:
+ expression: $.data.ovs_upgrade + $.data.neutron_ovs_upgrade
+ data:
+ ovs_upgrade:
+ get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ neutron_ovs_upgrade:
+ - name: Check if neutron_ovs_agent is deployed
+ command: systemctl is-enabled neutron-openvswitch-agent
+ tags: common
+ ignore_errors: True
+ register: neutron_ovs_agent_enabled
+ - name: "PreUpgrade step0,validation: Check service neutron-openvswitch-agent is running"
+ shell: /usr/bin/systemctl show 'neutron-openvswitch-agent' --property ActiveState | grep '\bactive\b'
+ when: neutron_ovs_agent_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop neutron_ovs_agent service
+ tags: step1
+ when: neutron_ovs_agent_enabled.rc == 0
+ service: name=neutron-openvswitch-agent state=stopped
diff --git a/puppet/services/neutron-ovs-dpdk-agent.yaml b/puppet/services/neutron-ovs-dpdk-agent.yaml
index e25bc495..80516fe6 100644
--- a/puppet/services/neutron-ovs-dpdk-agent.yaml
+++ b/puppet/services/neutron-ovs-dpdk-agent.yaml
@@ -62,6 +62,9 @@ resources:
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ OpenVswitchUpgrade:
+ type: ./openvswitch-upgrade.yaml
+
outputs:
role_data:
description: Role data for the Neutron OVS DPDK Agent service.
@@ -69,7 +72,10 @@ outputs:
service_name: neutron_ovs_dpdk_agent
config_settings:
map_merge:
- - get_attr: [NeutronOvsAgent, role_data, config_settings]
+ - map_replace:
+ - get_attr: [NeutronOvsAgent, role_data, config_settings]
+ - keys:
+ tripleo.neutron_ovs_agent.firewall_rules: tripleo.neutron_ovs_dpdk_agent.firewall_rules
- neutron::agents::ml2::ovs::enable_dpdk: true
neutron::agents::ml2::ovs::datapath_type: {get_param: NeutronDatapathType}
neutron::agents::ml2::ovs::vhostuser_socket_dir: {get_param: NeutronVhostuserSocketDir}
@@ -79,3 +85,5 @@ outputs:
vswitch::dpdk::socket_mem: {get_param: NeutronDpdkSocketMemory}
vswitch::dpdk::driver_type: {get_param: NeutronDpdkDriverType}
step_config: {get_attr: [NeutronOvsAgent, role_data, step_config]}
+ upgrade_tasks:
+ get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
diff --git a/puppet/services/neutron-plugin-ml2-odl.yaml b/puppet/services/neutron-plugin-ml2-odl.yaml
new file mode 100644
index 00000000..acacadfa
--- /dev/null
+++ b/puppet/services/neutron-plugin-ml2-odl.yaml
@@ -0,0 +1,45 @@
+heat_template_version: ocata
+
+description: >
+ OpenStack Neutron ML2/OpenDaylight plugin configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ OpenDaylightPortBindingController:
+ description: OpenDaylight port binding controller
+ type: string
+ default: 'network-topology'
+
+resources:
+
+ NeutronMl2Base:
+ type: ./neutron-plugin-ml2.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron ML2/ODL plugin.
+ value:
+ service_name: neutron_plugin_ml2_odl
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronMl2Base, role_data, config_settings]
+ - neutron::plugins::ml2::opendaylight::port_binding_controller: {get_param: OpenDaylightPortBindingController}
+ step_config: |
+ include ::tripleo::profile::base::neutron::plugins::ml2
diff --git a/puppet/services/nova-api.yaml b/puppet/services/nova-api.yaml
index f27b53f2..6a597a88 100644
--- a/puppet/services/nova-api.yaml
+++ b/puppet/services/nova-api.yaml
@@ -218,14 +218,14 @@ outputs:
- name: Run puppet apply to set tranport_url in nova.conf
tags: step5
when: is_bootstrap_node
- command: puppet apply --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
+ command: puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
register: puppet_apply_nova_api_upgrade
failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2]
changed_when: puppet_apply_nova_api_upgrade.rc == 2
- name: Setup cell_v2 (map cell0)
tags: step5
when: is_bootstrap_node
- command: nova-manage cell_v2 map_cell0
+ shell: nova-manage cell_v2 map_cell0 --database_connection=$(hiera nova::cell0_database_connection)
- name: Setup cell_v2 (create default cell)
tags: step5
when: is_bootstrap_node
@@ -241,15 +241,15 @@ outputs:
command: nova-manage db sync
async: {get_param: NovaDbSyncTimeout}
poll: 10
- - name: Setup cell_v2 (migrate hosts)
- tags: step5
- when: is_bootstrap_node
- command: nova-manage cell_v2 map_cell_and_hosts
- name: Setup cell_v2 (get cell uuid)
tags: step5
when: is_bootstrap_node
shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}'
register: nova_api_cell_uuid
+ - name: Setup cell_v2 (migrate hosts)
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} --verbose
- name: Setup cell_v2 (migrate instances)
tags: step5
when: is_bootstrap_node
diff --git a/puppet/services/nova-base.yaml b/puppet/services/nova-base.yaml
index ceacb0b2..7b568e9e 100644
--- a/puppet/services/nova-base.yaml
+++ b/puppet/services/nova-base.yaml
@@ -58,10 +58,10 @@ parameters:
description: Set to True to enable debugging on all services.
EnableConfigPurge:
type: boolean
- default: true
+ default: false
description: >
- Remove configuration that is not generated by TripleO. Setting
- to false may result in configuration remnants after updates/upgrades.
+ Remove configuration that is not generated by TripleO. Used to avoid
+ configuration remnants after upgrades.
NovaIPv6:
default: false
description: Enable IPv6 features in Nova
@@ -151,6 +151,16 @@ outputs:
- {get_param: [EndpointMap, MysqlInternal, host]}
- '/nova'
- '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ nova::cell0_database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://nova:'
+ - {get_param: NovaPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/nova_cell0'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
nova::api_database_connection:
list_join:
- ''
diff --git a/puppet/services/nova-compute.yaml b/puppet/services/nova-compute.yaml
index d208bede..b1711436 100644
--- a/puppet/services/nova-compute.yaml
+++ b/puppet/services/nova-compute.yaml
@@ -52,7 +52,7 @@ parameters:
For different formats, refer to the nova.conf documentation for
pci_passthrough_whitelist configuration
type: json
- default: {}
+ default: ''
NovaVcpuPinSet:
description: >
A list or range of physical CPU cores to reserve for virtual machine
@@ -79,6 +79,13 @@ parameters:
type: string
description: Nova Compute upgrade level
default: auto
+ MigrationSshKey:
+ type: json
+ description: >
+ SSH key for migration.
+ Expects a dictionary with keys 'public_key' and 'private_key'.
+ Values should be identical to SSH public/private key files.
+ default: {}
resources:
NovaBase:
@@ -101,12 +108,17 @@ outputs:
map_merge:
- get_attr: [NovaBase, role_data, config_settings]
- nova::compute::libvirt::manage_libvirt_services: false
- nova::compute::pci_passthrough: {get_param: NovaPCIPassthrough}
+ nova::compute::pci_passthrough:
+ str_replace:
+ template: "JSON_PARAM"
+ params:
+ JSON_PARAM: {get_param: NovaPCIPassthrough}
nova::compute::vcpu_pin_set: {get_param: NovaVcpuPinSet}
nova::compute::reserved_host_memory: {get_param: NovaReservedHostMemory}
# we manage migration in nova common puppet profile
nova::compute::libvirt::migration_support: false
tripleo::profile::base::nova::manage_migration: true
+ tripleo::profile::base::nova::migration_ssh_key: {get_param: MigrationSshKey}
tripleo::profile::base::nova::nova_compute_enabled: true
nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
diff --git a/puppet/services/nova-ironic.yaml b/puppet/services/nova-ironic.yaml
index 5eb2170a..5fc77f07 100644
--- a/puppet/services/nova-ironic.yaml
+++ b/puppet/services/nova-ironic.yaml
@@ -51,3 +51,7 @@ outputs:
nova::scheduler::filter::scheduler_host_manager: 'ironic_host_manager'
step_config: |
include tripleo::profile::base::nova::compute::ironic
+ upgrade_tasks:
+ - name: Stop openstack-nova-compute service
+ tags: step1
+ service: name=openstack-nova-compute state=stopped enabled=no
diff --git a/puppet/services/nova-libvirt.yaml b/puppet/services/nova-libvirt.yaml
index faf1ae48..b297424e 100644
--- a/puppet/services/nova-libvirt.yaml
+++ b/puppet/services/nova-libvirt.yaml
@@ -66,7 +66,6 @@ outputs:
tripleo.nova_libvirt.firewall_rules:
'200 nova_libvirt':
dport:
- - 16509
- 16514
- '49152-49215'
- '5900-5999'
diff --git a/puppet/services/octavia-base.yaml b/puppet/services/octavia-base.yaml
index b537a2bc..f3aa1d64 100644
--- a/puppet/services/octavia-base.yaml
+++ b/puppet/services/octavia-base.yaml
@@ -24,10 +24,10 @@ parameters:
description: Set to True to enable debugging on all services.
EnableConfigPurge:
type: boolean
- default: true
+ default: false
description: >
- Remove configuration that is not generated by TripleO. Setting
- to false may result in configuration remnants after updates/upgrades.
+ Remove configuration that is not generated by TripleO. Used to avoid
+ configuration remnants after upgrades.
RabbitPassword:
description: The password for RabbitMQ
type: string
diff --git a/puppet/services/opendaylight-api.yaml b/puppet/services/opendaylight-api.yaml
index 51a520cc..e55cd2ee 100644
--- a/puppet/services/opendaylight-api.yaml
+++ b/puppet/services/opendaylight-api.yaml
@@ -17,6 +17,10 @@ parameters:
type: string
description: The password for the opendaylight server.
hidden: true
+ OpenDaylightConnectionProtocol:
+ description: L7 protocol used for REST access
+ type: string
+ default: 'http'
OpenDaylightEnableDHCP:
description: Knob to enable/disable ODL DHCP Server
type: boolean
@@ -55,6 +59,7 @@ outputs:
opendaylight::extra_features: {get_param: OpenDaylightFeatures}
opendaylight::enable_dhcp: {get_param: OpenDaylightEnableDHCP}
opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpendaylightApiNetwork]}
+ opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
tripleo.opendaylight_api.firewall_rules:
'137 opendaylight api':
dport:
diff --git a/puppet/services/opendaylight-ovs.yaml b/puppet/services/opendaylight-ovs.yaml
index 3db0848e..ed572b4d 100644
--- a/puppet/services/opendaylight-ovs.yaml
+++ b/puppet/services/opendaylight-ovs.yaml
@@ -48,6 +48,10 @@ parameters:
default: {}
type: json
+resources:
+ OpenVswitchUpgrade:
+ type: ./openvswitch-upgrade.yaml
+
outputs:
role_data:
description: Role data for the OpenDaylight service.
@@ -60,11 +64,7 @@ outputs:
opendaylight_check_url: {get_param: OpenDaylightCheckURL}
opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
- neutron::plugins::ovs::opendaylight::provider_mappings:
- str_replace:
- template: MAPPINGS
- params:
- MAPPINGS: {get_param: OpenDaylightProviderMappings}
+ neutron::plugins::ovs::opendaylight::provider_mappings: {get_param: OpenDaylightProviderMappings}
tripleo.opendaylight_ovs.firewall_rules:
'118 neutron vxlan networks':
proto: 'udp'
@@ -74,16 +74,22 @@ outputs:
step_config: |
include tripleo::profile::base::neutron::plugins::ovs::opendaylight
upgrade_tasks:
- - name: Check if openvswitch is deployed
- command: systemctl is-enabled openvswitch
- tags: common
- ignore_errors: True
- register: openvswitch_enabled
- - name: "PreUpgrade step0,validation: Check service openvswitch is running"
- shell: /usr/bin/systemctl show 'openvswitch' --property ActiveState | grep '\bactive\b'
- when: openvswitch_enabled.rc == 0
- tags: step0,validation
- - name: Stop openvswitch service
- tags: step1
- when: openvswitch_enabled.rc == 0
- service: name=openvswitch state=stopped
+ yaql:
+ expression: $.data.ovs_upgrade + $.data.opendaylight_upgrade
+ data:
+ ovs_upgrade:
+ get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ opendaylight_upgrade:
+ - name: Check if openvswitch is deployed
+ command: systemctl is-enabled openvswitch
+ tags: common
+ ignore_errors: True
+ register: openvswitch_enabled
+ - name: "PreUpgrade step0,validation: Check service openvswitch is running"
+ shell: /usr/bin/systemctl show 'openvswitch' --property ActiveState | grep '\bactive\b'
+ when: openvswitch_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop openvswitch service
+ tags: step1
+ when: openvswitch_enabled.rc == 0
+ service: name=openvswitch state=stopped
diff --git a/puppet/services/openvswitch-upgrade.yaml b/puppet/services/openvswitch-upgrade.yaml
new file mode 100644
index 00000000..fea1ba96
--- /dev/null
+++ b/puppet/services/openvswitch-upgrade.yaml
@@ -0,0 +1,50 @@
+heat_template_version: ocata
+
+description: >
+ Openvswitch package special handling for upgrade.
+
+outputs:
+ role_data:
+ description: Upgrade task for special handling of Openvswitch (OVS) upgrade.
+ value:
+ service_name: openvswitch_upgrade
+ upgrade_tasks:
+ - name: Check openvswitch version.
+ tags: step2
+ register: ovs_version
+ ignore_errors: true
+ shell: rpm -qa | awk -F- '/^openvswitch-2/{print $2 "-" $3}'
+ - name: Check openvswitch packaging.
+ tags: step2
+ shell: rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep -q "systemctl.*try-restart"
+ register: ovs_packaging_issue
+ ignore_errors: true
+ - block:
+ - name: "Ensure empty directory: emptying."
+ file:
+ state: absent
+ path: /root/OVS_UPGRADE
+ - name: "Ensure empty directory: creating."
+ file:
+ state: directory
+ path: /root/OVS_UPGRADE
+ owner: root
+ group: root
+ mode: 0750
+ - name: Download OVS packages.
+ command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch
+ - name: Get rpm list for manual upgrade of OVS.
+ shell: ls -1 /root/OVS_UPGRADE/*.rpm
+ register: ovs_list_of_rpms
+ - name: Manual upgrade of OVS
+ shell: |
+ rpm -U --test {{item}} 2>&1 | grep "already installed" || \
+ rpm -U --replacepkgs --notriggerun --nopostun {{item}};
+ args:
+ chdir: /root/OVS_UPGRADE
+ with_items:
+ - "{{ovs_list_of_rpms.stdout_lines}}"
+ tags: step2
+ when: "'2.5.0-14' in '{{ovs_version.stdout}}'
+ or
+ ovs_packaging_issue|succeeded"
diff --git a/puppet/services/pacemaker.yaml b/puppet/services/pacemaker.yaml
index 5be58c18..28fcbd6f 100644
--- a/puppet/services/pacemaker.yaml
+++ b/puppet/services/pacemaker.yaml
@@ -87,10 +87,16 @@ parameters:
\[(?<pid>[^ ]*)\]
(?<host>[^ ]*)
(?<message>.*)$/
+
+ EnableLoadBalancer:
+ default: true
+ description: Whether to deploy a LoadBalancer on the Controller
+ type: boolean
+
PacemakerResources:
type: comma_delimited_list
description: List of resources managed by pacemaker
- default: ['rabbitmq','haproxy']
+ default: ['rabbitmq', 'galera']
outputs:
role_data:
@@ -143,5 +149,13 @@ outputs:
pacemaker_cluster: state=online
- name: Check pacemaker resource
tags: step4
- pacemaker_resource: state=started resource={{item}} check_mode=true wait_for_resource=true timeout=500
+ pacemaker_is_active:
+ resource: "{{ item }}"
+ max_wait: 500
with_items: {get_param: PacemakerResources}
+ - name: Check pacemaker haproxy resource
+ tags: step4
+ pacemaker_is_active:
+ resource: haproxy
+ max_wait: 500
+ when: {get_param: EnableLoadBalancer}
diff --git a/puppet/services/sshd.yaml b/puppet/services/sshd.yaml
index 41e144a0..e09a8894 100644
--- a/puppet/services/sshd.yaml
+++ b/puppet/services/sshd.yaml
@@ -22,6 +22,33 @@ parameters:
default: ''
description: Configures Banner text in sshd_config
type: string
+ MessageOfTheDay:
+ default: ''
+ description: Configures /etc/motd text
+ type: string
+ SshServerOptions:
+ default:
+ HostKey:
+ - '/etc/ssh/ssh_host_rsa_key'
+ - '/etc/ssh/ssh_host_ecdsa_key'
+ - '/etc/ssh/ssh_host_ed25519_key'
+ SyslogFacility: 'AUTHPRIV'
+ AuthorizedKeysFile: '.ssh/authorized_keys'
+ PasswordAuthentication: 'no'
+ ChallengeResponseAuthentication: 'no'
+ GSSAPIAuthentication: 'yes'
+ GSSAPICleanupCredentials: 'no'
+ UsePAM: 'yes'
+ X11Forwarding: 'yes'
+ UsePrivilegeSeparation: 'sandbox'
+ AcceptEnv:
+ - 'LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES'
+ - 'LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT'
+ - 'LC_IDENTIFICATION LC_ALL LANGUAGE'
+ - 'XMODIFIERS'
+ Subsystem: 'sftp /usr/libexec/openssh/sftp-server'
+ description: Mapping of sshd_config values
+ type: json
outputs:
role_data:
@@ -29,6 +56,8 @@ outputs:
value:
service_name: sshd
config_settings:
- BannerText: {get_param: BannerText}
+ tripleo::profile::base::sshd::bannertext: {get_param: BannerText}
+ tripleo::profile::base::sshd::motd: {get_param: MessageOfTheDay}
+ tripleo::profile::base::sshd::options: {get_param: SshServerOptions}
step_config: |
include ::tripleo::profile::base::sshd
diff --git a/puppet/services/swift-ringbuilder.yaml b/puppet/services/swift-ringbuilder.yaml
index 2e3c818f..f62d5e18 100644
--- a/puppet/services/swift-ringbuilder.yaml
+++ b/puppet/services/swift-ringbuilder.yaml
@@ -42,6 +42,14 @@ parameters:
default: true
description: 'Use a local directory for Swift storage services when building rings'
type: boolean
+ SwiftRingGetTempurl:
+ default: ''
+ description: A temporary Swift URL to download rings from.
+ type: string
+ SwiftRingPutTempurl:
+ default: ''
+ description: A temporary Swift URL to upload rings to.
+ type: string
conditions:
swift_use_local_dir:
@@ -59,6 +67,8 @@ outputs:
value:
service_name: swift_ringbuilder
config_settings:
+ tripleo::profile::base::swift::ringbuilder::swift_ring_get_tempurl: {get_param: SwiftRingGetTempurl}
+ tripleo::profile::base::swift::ringbuilder::swift_ring_put_tempurl: {get_param: SwiftRingPutTempurl}
tripleo::profile::base::swift::ringbuilder::build_ring: {get_param: SwiftRingBuild}
tripleo::profile::base::swift::ringbuilder::replicas: {get_param: SwiftReplicas}
tripleo::profile::base::swift::ringbuilder::part_power: {get_param: SwiftPartPower}
diff --git a/puppet/services/tacker.yaml b/puppet/services/tacker.yaml
index 6f92066e..2f803b0b 100644
--- a/puppet/services/tacker.yaml
+++ b/puppet/services/tacker.yaml
@@ -89,6 +89,7 @@ outputs:
service_config_settings:
keystone:
tacker::keystone::auth::tenant: 'service'
+ tacker::keystone::auth::region: {get_param: KeystoneRegion}
tacker::keystone::auth::password: {get_param: TackerPassword}
tacker::keystone::auth::public_url: {get_param: [EndpointMap, TackerPublic, uri]}
tacker::keystone::auth::internal_url: {get_param: [EndpointMap, TackerInternal, uri]}
diff --git a/puppet/services/tripleo-firewall.yaml b/puppet/services/tripleo-firewall.yaml
index 67e14d9c..ff2b067f 100644
--- a/puppet/services/tripleo-firewall.yaml
+++ b/puppet/services/tripleo-firewall.yaml
@@ -37,3 +37,9 @@ outputs:
tripleo::firewall::purge_firewall_rules: {get_param: PurgeFirewallRules}
step_config: |
include ::tripleo::firewall
+ upgrade_tasks:
+ - name: blank ipv6 rule before activating ipv6 firewall.
+ tags: step3
+ shell: cat /etc/sysconfig/ip6tables > /etc/sysconfig/ip6tables.n-o-upgrade; cat</dev/null>/etc/sysconfig/ip6tables
+ args:
+ creates: /etc/sysconfig/ip6tables.n-o-upgrade
diff --git a/releasenotes/notes/add-ldap-backend-0bda702fb0aa24bf.yaml b/releasenotes/notes/add-ldap-backend-0bda702fb0aa24bf.yaml
new file mode 100644
index 00000000..19452f27
--- /dev/null
+++ b/releasenotes/notes/add-ldap-backend-0bda702fb0aa24bf.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - Add capabilities to configure LDAP backends as for keystone domains.
+ This can be done by using the KeystoneLDAPDomainEnable and
+ KeystoneLDAPBackendConfigs parameters.
diff --git a/releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml b/releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml
new file mode 100644
index 00000000..ec22942a
--- /dev/null
+++ b/releasenotes/notes/allow-neutron-dhcp-agents-per-network-calculation-536c70391497256d.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ NeutronDhcpAgents had a default value of 3 that, even though unused in
+ practice was a bad default value. Changing the default value to a
+ sentinel value and making the hiera conditional allows deploy-time
+ logic in puppet to provide a default value based on the number of dhcp
+ agents being deployed.
diff --git a/releasenotes/notes/big-switch-agent-4c743a2112251234.yaml b/releasenotes/notes/big-switch-agent-4c743a2112251234.yaml
new file mode 100644
index 00000000..49ede200
--- /dev/null
+++ b/releasenotes/notes/big-switch-agent-4c743a2112251234.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Updated bigswitch environment file to include the bigswitch agent
+ installation and correct support for the restproxy configuration.
diff --git a/releasenotes/notes/deployed-server-firewall-purge-9d9fe73faf925056.yaml b/releasenotes/notes/deployed-server-firewall-purge-9d9fe73faf925056.yaml
new file mode 100644
index 00000000..298a8ece
--- /dev/null
+++ b/releasenotes/notes/deployed-server-firewall-purge-9d9fe73faf925056.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - The initial firewall will now be purged by the deployed-server bootstrap
+ scripts. This is needed to prevent possible issues with bootstrapping the
+ initial Pacemaker cluster. See
+ https://bugs.launchpad.net/tripleo/+bug/1679234
diff --git a/releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml b/releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml
new file mode 100644
index 00000000..da995949
--- /dev/null
+++ b/releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml
@@ -0,0 +1,6 @@
+---
+security:
+ - |
+ Secure EtcdInitialClusterToken by removing the default value
+ and make the parameter hidden.
+ Fixes `bug 1673266 <https://bugs.launchpad.net/tripleo/+bug/1673266>`__.
diff --git a/releasenotes/notes/fix-cinder-nfs-share-usage-0968f88eff7ffb99.yaml b/releasenotes/notes/fix-cinder-nfs-share-usage-0968f88eff7ffb99.yaml
new file mode 100644
index 00000000..682171c1
--- /dev/null
+++ b/releasenotes/notes/fix-cinder-nfs-share-usage-0968f88eff7ffb99.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - Fixes an issue when using the CinderNfsServers
+ parameter_defaults setting. It now works using a
+ single share as well as a comma-separated list of
+ shares.
diff --git a/releasenotes/notes/fix-neutron-dpdk-firewall-436aee39a0d7ed65.yaml b/releasenotes/notes/fix-neutron-dpdk-firewall-436aee39a0d7ed65.yaml
new file mode 100644
index 00000000..bb18aed8
--- /dev/null
+++ b/releasenotes/notes/fix-neutron-dpdk-firewall-436aee39a0d7ed65.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - Fixes firewall rules from neutron OVS agent not being
+ inherited correctly and applied in neutron OVS DPDK
+ template.
diff --git a/releasenotes/notes/fix-odl-provider-mapping-hiera-5b3472184be490e2.yaml b/releasenotes/notes/fix-odl-provider-mapping-hiera-5b3472184be490e2.yaml
new file mode 100644
index 00000000..79cea05e
--- /dev/null
+++ b/releasenotes/notes/fix-odl-provider-mapping-hiera-5b3472184be490e2.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - Fixes OpenDaylightProviderMappings parsing on a
+ comma delimited list.
diff --git a/releasenotes/notes/install-openstack-selinux-d14b2e26feb6d04e.yaml b/releasenotes/notes/install-openstack-selinux-d14b2e26feb6d04e.yaml
new file mode 100644
index 00000000..d2b2eb94
--- /dev/null
+++ b/releasenotes/notes/install-openstack-selinux-d14b2e26feb6d04e.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - openstack-selinux is now installed by the deployed-server
+ bootstrap scripts. Previously, it was not installed, so
+ if SELinux was set to enforcing, all OpenStack policy
+ was missing.
diff --git a/releasenotes/notes/make-panko-default-8d0e824fc91cef56.yaml b/releasenotes/notes/make-panko-default-8d0e824fc91cef56.yaml
new file mode 100644
index 00000000..d0624265
--- /dev/null
+++ b/releasenotes/notes/make-panko-default-8d0e824fc91cef56.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - Since panko is enabled by default, include it the default dispatcher
+ for ceilometer events.
diff --git a/releasenotes/notes/migration_over_ssh-003e2a92f5f5374d.yaml b/releasenotes/notes/migration_over_ssh-003e2a92f5f5374d.yaml
new file mode 100644
index 00000000..45ca9fe5
--- /dev/null
+++ b/releasenotes/notes/migration_over_ssh-003e2a92f5f5374d.yaml
@@ -0,0 +1,14 @@
+---
+features:
+ - |
+ Add support for cold migration over ssh.
+
+ This enables nova cold migration.
+
+ This also switches to SSH as the default transport for live-migration.
+ The tripleo-common mistral action that generates passwords supplies the
+ MigrationSshKey parameter that enables this.
+deprecations:
+ - |
+ The TCP transport is no longer used for live-migration and the firewall
+ port has been closed.
diff --git a/releasenotes/notes/ovs-2.5-2.6-composable-upgrades-workaround-73f4e56127c910b4.yaml b/releasenotes/notes/ovs-2.5-2.6-composable-upgrades-workaround-73f4e56127c910b4.yaml
new file mode 100644
index 00000000..8c210823
--- /dev/null
+++ b/releasenotes/notes/ovs-2.5-2.6-composable-upgrades-workaround-73f4e56127c910b4.yaml
@@ -0,0 +1,12 @@
+---
+issues:
+ - During the ovs upgrade for 2.5 to 2.6 we need to workaround the classic
+ yum update command by handling the upgrade of the package separately to not
+ loose the IPs and the connectivity on the nodes. The workaround is
+ discussed here https://bugs.launchpad.net/tripleo/+bug/1669714
+upgrade:
+ - The upgrade from openvswitch 2.5 to 2.6 is handled gracefully and there should
+ be no user impact in particular no restart of the openvswitch service. For more
+ information please see the related bug above which also links the relevant code reviews.
+ The workaround (transparent to the user/doesn't require any input) is to download the OVS
+ package and install with --nopostun and --notriggerun options provided by the rpm binary.
diff --git a/releasenotes/notes/replace-references-to-old-ctlplane-0df7f2ae8910559c.yaml b/releasenotes/notes/replace-references-to-old-ctlplane-0df7f2ae8910559c.yaml
new file mode 100644
index 00000000..09d3be03
--- /dev/null
+++ b/releasenotes/notes/replace-references-to-old-ctlplane-0df7f2ae8910559c.yaml
@@ -0,0 +1,20 @@
+---
+upgrade:
+ - |
+ The default network for the ctlplane changed from 192.0.2.0/24 to
+ 192.168.24.0/24. All references to the ctlplane network in the templates
+ have been updated to reflect this change. When upgrading from a previous
+ release, if the default network was used for the ctlplane (192.0.2.0/24),
+ then it is necessary to provide as input, via environment file, the correct
+ setting for all the parameters that previously defaulted to 192.0.2.x and
+ now default to 192.168.24.x; there is an environment file which could be
+ used on upgrade `environments/updates/update-from-192_0_2-subnet.yaml` to
+ cover a simple scenario but it won't be enough for scenarios using an
+ external load balancer, Contrail or Cisto N1KV. Follows a list of params to
+ be provided on upgrade.
+ From contrail-net.yaml: EC2MetadataIp, ControlPlaneDefaultRoute
+ From external-loadbalancer-vip-v6.yaml: ControlFixedIPs
+ From external-loadbalancer-vip.yaml: ControlFixedIPs
+ From network-environment.yaml: EC2MetadataIp, ControlPlaneDefaultRoute
+ From neutron-ml2-cisco-n1kv.yaml: N1000vVSMIP, N1000vMgmtGatewayIP
+ From contrail-vrouter.yaml: ContrailVrouterGateway
diff --git a/releasenotes/notes/restrict-mongodb-memory-de7bf6754d7234d9.yaml b/releasenotes/notes/restrict-mongodb-memory-de7bf6754d7234d9.yaml
new file mode 100644
index 00000000..86622bc1
--- /dev/null
+++ b/releasenotes/notes/restrict-mongodb-memory-de7bf6754d7234d9.yaml
@@ -0,0 +1,3 @@
+---
+fixes:
+ - Add knobs to limit memory comsumed by mongodb with systemd
diff --git a/releasenotes/notes/set-ceilometer-auth-flag-382f68ddb2cbcb6b.yaml b/releasenotes/notes/set-ceilometer-auth-flag-382f68ddb2cbcb6b.yaml
new file mode 100644
index 00000000..07407f20
--- /dev/null
+++ b/releasenotes/notes/set-ceilometer-auth-flag-382f68ddb2cbcb6b.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - We need ceilometer user in cases where ceilometer API is disabled.
+ This is to ensure other ceilometer services can still authenticate
+ with keystone.
diff --git a/releasenotes/notes/sriov-pci-passthrough-8f28719b889bdaf7.yaml b/releasenotes/notes/sriov-pci-passthrough-8f28719b889bdaf7.yaml
new file mode 100644
index 00000000..20146b0a
--- /dev/null
+++ b/releasenotes/notes/sriov-pci-passthrough-8f28719b889bdaf7.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - The ``pci_passthrough`` hiera value should be passed as a string
+ (`bug 1675036 <https://bugs.launchpad.net/tripleo/+bug/1675036>`__).
diff --git a/releasenotes/notes/ssh_known_hosts-287563590632d1aa.yaml b/releasenotes/notes/ssh_known_hosts-287563590632d1aa.yaml
new file mode 100644
index 00000000..8b533b1a
--- /dev/null
+++ b/releasenotes/notes/ssh_known_hosts-287563590632d1aa.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - SSH host key exchange. The ssh host keys are collected from each host,
+ combined, and written to /etc/ssh/ssh_known_hosts.
diff --git a/releasenotes/notes/sshd-service-extensions-0c4d0879942a2052.yaml b/releasenotes/notes/sshd-service-extensions-0c4d0879942a2052.yaml
new file mode 100644
index 00000000..4cc01df8
--- /dev/null
+++ b/releasenotes/notes/sshd-service-extensions-0c4d0879942a2052.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Added ability to manage MOTD Banner
+ Enabled SSHD composible service by default. Puppet-ssh manages the sshd config.
diff --git a/releasenotes/notes/token-flush-twice-a-day-d4b00a2953a6b383.yaml b/releasenotes/notes/token-flush-twice-a-day-d4b00a2953a6b383.yaml
new file mode 100644
index 00000000..70051f65
--- /dev/null
+++ b/releasenotes/notes/token-flush-twice-a-day-d4b00a2953a6b383.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - The token flush cron job has been modified to run hourly instead of once
+ a day. This is because this was causing issues with larger deployments, as
+ the operation would take too long and sometimes even fail because of the
+ transaction being so large. Note that this only affects people using the
+ UUID token provider.
diff --git a/requirements.txt b/requirements.txt
index 057aa287..cb3f96eb 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,6 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-pbr>=1.8 # Apache-2.0
+pbr<2.0.0,>=1.8 # Apache-2.0
Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause)
six>=1.9.0 # MIT
diff --git a/roles_data.yaml b/roles_data.yaml
index 9e3b0a18..d3e08da5 100644
--- a/roles_data.yaml
+++ b/roles_data.yaml
@@ -42,6 +42,10 @@
- OS::TripleO::Services::CinderBackup
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::CinderBackendDellPs
+ - OS::TripleO::Services::CinderBackendDellSc
+ - OS::TripleO::Services::CinderBackendNetApp
+ - OS::TripleO::Services::CinderBackendScaleIO
- OS::TripleO::Services::Congress
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
diff --git a/validation-scripts/all-nodes.sh b/validation-scripts/all-nodes.sh
index 0b8b3523..f1f4cc11 100644
--- a/validation-scripts/all-nodes.sh
+++ b/validation-scripts/all-nodes.sh
@@ -67,5 +67,23 @@ function ping_default_gateways() {
echo "SUCCESS"
}
+# Verify the FQDN from the nova/ironic deployment matches
+# FQDN in the heat templates.
+function fqdn_check() {
+ HOSTNAME=$(hostname)
+ SHORT_NAME=$(hostname -s)
+ FQDN_FROM_HOSTS=$(awk '$3 == "'${SHORT_NAME}'"{print $2}' /etc/hosts)
+ echo -n "Checking hostname vs /etc/hosts entry..."
+ if [[ $HOSTNAME != $FQDN_FROM_HOSTS ]]; then
+ echo "FAILURE"
+ echo -e "System hostname: ${HOSTNAME}\nEntry from /etc/hosts: ${FQDN_FROM_HOSTS}\n"
+ exit 1
+ fi
+ echo "SUCCESS"
+}
+
ping_controller_ips "$ping_test_ips"
ping_default_gateways
+if [[ $validate_fqdn == "True" ]];then
+ fqdn_check
+fi