aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ci/environments/multinode_major_upgrade.yaml7
-rwxr-xr-xdocker/docker-puppet.py63
-rw-r--r--docker/post.j2.yaml2
-rw-r--r--docker/services/keystone.yaml2
-rw-r--r--puppet/services/kernel.yaml1
-rw-r--r--puppet/services/pacemaker.yaml2
-rw-r--r--puppet/services/sahara-engine.yaml3
-rw-r--r--puppet/services/swift-proxy.yaml1
-rw-r--r--releasenotes/notes/manila-with-managed-ceph-e5178fd06127624f.yaml11
-rw-r--r--releasenotes/notes/swift-ring-keeper-c04b440d7d5ce13f.yaml9
10 files changed, 76 insertions, 25 deletions
diff --git a/ci/environments/multinode_major_upgrade.yaml b/ci/environments/multinode_major_upgrade.yaml
index 6710fef7..af409835 100644
--- a/ci/environments/multinode_major_upgrade.yaml
+++ b/ci/environments/multinode_major_upgrade.yaml
@@ -37,6 +37,13 @@ parameter_defaults:
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
diff --git a/docker/docker-puppet.py b/docker/docker-puppet.py
index fe87ce7a..1bb8bc2b 100755
--- a/docker/docker-puppet.py
+++ b/docker/docker-puppet.py
@@ -23,6 +23,7 @@ import os
import subprocess
import sys
import tempfile
+import multiprocessing
# this is to match what we do in deployed-server
@@ -53,6 +54,8 @@ def rm_container(name):
print(cmd_stdout)
print(cmd_stderr)
+process_count = int(os.environ.get('PROCESS_COUNT',
+ multiprocessing.cpu_count()))
config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json')
print('docker-puppet')
@@ -106,34 +109,25 @@ for service in (json_data or []):
print('Service compilation completed.\n')
-for config_volume in configs:
-
- service = configs[config_volume]
- puppet_tags = service[1] or ''
- manifest = service[2] or ''
- config_image = service[3] or ''
- volumes = service[4] if len(service) > 4 else []
-
- if puppet_tags:
- puppet_tags = "file,file_line,concat,%s" % puppet_tags
- else:
- puppet_tags = "file,file_line,concat"
+def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volumes)):
print('---------')
print('config_volume %s' % config_volume)
print('puppet_tags %s' % puppet_tags)
print('manifest %s' % manifest)
print('config_image %s' % config_image)
+ print('volumes %s' % volumes)
hostname = short_hostname()
+ sh_script = '/var/lib/docker-puppet/docker-puppet-%s.sh' % config_volume
- with open('/var/lib/docker-puppet/docker-puppet.sh', 'w') as script_file:
+ with open(sh_script, 'w') as script_file:
os.chmod(script_file.name, 0755)
script_file.write("""#!/bin/bash
set -ex
mkdir -p /etc/puppet
cp -a /tmp/puppet-etc/* /etc/puppet
rm -Rf /etc/puppet/ssl # not in use and causes permission errors
- echo '{"step": 6}' > /etc/puppet/hieradata/docker.json
+ echo '{"step": %(step)s}' > /etc/puppet/hieradata/docker.json
TAGS=""
if [ -n "%(puppet_tags)s" ]; then
TAGS='--tags "%(puppet_tags)s"'
@@ -168,7 +162,8 @@ for config_volume in configs:
fi
""" % {'puppet_tags': puppet_tags, 'name': config_volume,
'hostname': hostname,
- 'no_archive': os.environ.get('NO_ARCHIVE', '')})
+ 'no_archive': os.environ.get('NO_ARCHIVE', ''),
+ 'step': os.environ.get('STEP', '6')})
with tempfile.NamedTemporaryFile() as tmp_man:
with open(tmp_man.name, 'w') as man_file:
@@ -186,12 +181,12 @@ for config_volume in configs:
'--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro',
'--volume', '/var/lib/config-data/:/var/lib/config-data/:rw',
'--volume', 'tripleo_logs:/var/log/tripleo/',
- '--volume', '/var/lib/docker-puppet/docker-puppet.sh:/var/lib/docker-puppet/docker-puppet.sh:ro']
+ '--volume', '%s:%s:rw' % (sh_script, sh_script) ]
for volume in volumes:
dcmd.extend(['--volume', volume])
- dcmd.extend(['--entrypoint', '/var/lib/docker-puppet/docker-puppet.sh'])
+ dcmd.extend(['--entrypoint', sh_script])
env = {}
if os.environ.get('NET_HOST', 'false') == 'true':
@@ -207,6 +202,34 @@ for config_volume in configs:
print(cmd_stderr)
if subproc.returncode != 0:
print('Failed running docker-puppet.py for %s' % config_volume)
- sys.exit(subproc.returncode)
- else:
- rm_container('docker-puppet-%s' % config_volume)
+ rm_container('docker-puppet-%s' % config_volume)
+ return subproc.returncode
+
+# Holds all the information for each process to consume.
+# Instead of starting them all linearly we run them using a process
+# pool. This creates a list of arguments for the above function
+# to consume.
+process_map = []
+
+for config_volume in configs:
+
+ service = configs[config_volume]
+ puppet_tags = service[1] or ''
+ manifest = service[2] or ''
+ config_image = service[3] or ''
+ volumes = service[4] if len(service) > 4 else []
+
+ if puppet_tags:
+ puppet_tags = "file,file_line,concat,%s" % puppet_tags
+ else:
+ puppet_tags = "file,file_line,concat"
+
+ process_map.append([config_volume, puppet_tags, manifest, config_image, volumes])
+
+for p in process_map:
+ print '--\n%s' % p
+
+# Fire off processes to perform each configuration. Defaults
+# to the number of CPUs on the system.
+p = multiprocessing.Pool(process_count)
+p.map(mp_puppet_config, process_map)
diff --git a/docker/post.j2.yaml b/docker/post.j2.yaml
index 3473f4ca..e1154a62 100644
--- a/docker/post.j2.yaml
+++ b/docker/post.j2.yaml
@@ -68,6 +68,7 @@ resources:
- name: CONFIG
- name: NET_HOST
- name: NO_ARCHIVE
+ - name: STEP
{{primary_role_name}}DockerPuppetTasksDeployment{{step}}:
type: OS::Heat::SoftwareDeployment
@@ -85,6 +86,7 @@ resources:
CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json
NET_HOST: 'true'
NO_ARCHIVE: 'true'
+ STEP: {{step}}
{% endfor %}
# END primary_role_name docker-puppet-tasks
diff --git a/docker/services/keystone.yaml b/docker/services/keystone.yaml
index 1d25da72..2bf8fa09 100644
--- a/docker/services/keystone.yaml
+++ b/docker/services/keystone.yaml
@@ -144,7 +144,7 @@ outputs:
[ 'keystone', 'keystone-manage', 'bootstrap', '--bootstrap-password', {get_param: AdminPassword} ]
docker_puppet_tasks:
# Keystone endpoint creation occurs only on single node
- step_4:
+ step_3:
- 'keystone_init_tasks'
- 'keystone_config,keystone_domain_config,keystone_endpoint,keystone_identity_provider,keystone_paste_ini,keystone_role,keystone_service,keystone_tenant,keystone_user,keystone_user_role,keystone_domain'
- 'include ::tripleo::profile::base::keystone'
diff --git a/puppet/services/kernel.yaml b/puppet/services/kernel.yaml
index 29157959..fec455d1 100644
--- a/puppet/services/kernel.yaml
+++ b/puppet/services/kernel.yaml
@@ -31,6 +31,7 @@ outputs:
config_settings:
kernel_modules:
nf_conntrack: {}
+ ip_conntrack_proto_sctp: {}
sysctl_settings:
net.ipv4.tcp_keepalive_intvl:
value: 1
diff --git a/puppet/services/pacemaker.yaml b/puppet/services/pacemaker.yaml
index ca21cfbe..9398d6b5 100644
--- a/puppet/services/pacemaker.yaml
+++ b/puppet/services/pacemaker.yaml
@@ -143,5 +143,5 @@ outputs:
pacemaker_cluster: state=online
- name: Check pacemaker resource
tags: step4
- pacemaker_resource: state=started resource={{item}} check_mode=true wait_for_resource=true timeout=200
+ pacemaker_resource: state=started resource={{item}} check_mode=true wait_for_resource=true timeout=500
with_items: {get_param: PacemakerResources}
diff --git a/puppet/services/sahara-engine.yaml b/puppet/services/sahara-engine.yaml
index 987fe25b..176514ec 100644
--- a/puppet/services/sahara-engine.yaml
+++ b/puppet/services/sahara-engine.yaml
@@ -53,6 +53,3 @@ outputs:
- name: Stop sahara_engine service
tags: step2
service: name=openstack-sahara-engine state=stopped
- - name: Sync sahara_engine DB
- tags: step5
- command: sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
diff --git a/puppet/services/swift-proxy.yaml b/puppet/services/swift-proxy.yaml
index 526fa888..c941b598 100644
--- a/puppet/services/swift-proxy.yaml
+++ b/puppet/services/swift-proxy.yaml
@@ -138,6 +138,7 @@ outputs:
- ''
- 'proxy-logging'
- 'proxy-server'
+ swift::proxy::ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
swift::proxy::account_autocreate: true
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples
diff --git a/releasenotes/notes/manila-with-managed-ceph-e5178fd06127624f.yaml b/releasenotes/notes/manila-with-managed-ceph-e5178fd06127624f.yaml
new file mode 100644
index 00000000..59f1fb99
--- /dev/null
+++ b/releasenotes/notes/manila-with-managed-ceph-e5178fd06127624f.yaml
@@ -0,0 +1,11 @@
+---
+prelude: >
+ Support for Manila/CephFS with TripleO managed Ceph cluster
+features:
+ - |
+ It is now possible to configure Manila with CephFS to use a
+ TripleO managed Ceph cluster. When using the Heat environment
+ file at environments/manila-cephfsnative-config.yaml Manila
+ will be configured to use the TripleO managed Ceph cluster
+ if CephMDS is deployed as well, which can be done using the
+ file environments/services/ceph-mds.yaml \ No newline at end of file
diff --git a/releasenotes/notes/swift-ring-keeper-c04b440d7d5ce13f.yaml b/releasenotes/notes/swift-ring-keeper-c04b440d7d5ce13f.yaml
new file mode 100644
index 00000000..e9974a20
--- /dev/null
+++ b/releasenotes/notes/swift-ring-keeper-c04b440d7d5ce13f.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - |
+ Swift rings created or updated on the overcloud nodes will now be
+ stored on the undercloud at the end of each deployment. They will be
+ retrieved before any deployment update, and by doing this the Swift
+ rings will be in a consistent state across the cluster all the time.
+ This makes it possible to add, remove or replace nodes without
+ manual operator interaction.