aboutsummaryrefslogtreecommitdiffstats
path: root/docker
diff options
context:
space:
mode:
Diffstat (limited to 'docker')
-rwxr-xr-xdocker/docker-puppet.py72
-rwxr-xr-xdocker/docker-toool189
-rw-r--r--docker/post.j2.yaml20
-rw-r--r--docker/services/keystone.yaml2
-rw-r--r--docker/services/neutron-api.yaml3
5 files changed, 265 insertions, 21 deletions
diff --git a/docker/docker-puppet.py b/docker/docker-puppet.py
index fe87ce7a..86c8ec98 100755
--- a/docker/docker-puppet.py
+++ b/docker/docker-puppet.py
@@ -23,6 +23,7 @@ import os
import subprocess
import sys
import tempfile
+import multiprocessing
# this is to match what we do in deployed-server
@@ -45,6 +46,15 @@ def pull_image(name):
def rm_container(name):
+ if os.environ.get('SHOW_DIFF', None):
+ print('Diffing container: %s' % name)
+ subproc = subprocess.Popen(['/usr/bin/docker', 'diff', name],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ cmd_stdout, cmd_stderr = subproc.communicate()
+ print(cmd_stdout)
+ print(cmd_stderr)
+
print('Removing container: %s' % name)
subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name],
stdout=subprocess.PIPE,
@@ -53,6 +63,8 @@ def rm_container(name):
print(cmd_stdout)
print(cmd_stderr)
+process_count = int(os.environ.get('PROCESS_COUNT',
+ multiprocessing.cpu_count()))
config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json')
print('docker-puppet')
@@ -106,34 +118,25 @@ for service in (json_data or []):
print('Service compilation completed.\n')
-for config_volume in configs:
-
- service = configs[config_volume]
- puppet_tags = service[1] or ''
- manifest = service[2] or ''
- config_image = service[3] or ''
- volumes = service[4] if len(service) > 4 else []
-
- if puppet_tags:
- puppet_tags = "file,file_line,concat,%s" % puppet_tags
- else:
- puppet_tags = "file,file_line,concat"
+def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volumes)):
print('---------')
print('config_volume %s' % config_volume)
print('puppet_tags %s' % puppet_tags)
print('manifest %s' % manifest)
print('config_image %s' % config_image)
+ print('volumes %s' % volumes)
hostname = short_hostname()
+ sh_script = '/var/lib/docker-puppet/docker-puppet-%s.sh' % config_volume
- with open('/var/lib/docker-puppet/docker-puppet.sh', 'w') as script_file:
+ with open(sh_script, 'w') as script_file:
os.chmod(script_file.name, 0755)
script_file.write("""#!/bin/bash
set -ex
mkdir -p /etc/puppet
cp -a /tmp/puppet-etc/* /etc/puppet
rm -Rf /etc/puppet/ssl # not in use and causes permission errors
- echo '{"step": 6}' > /etc/puppet/hieradata/docker.json
+ echo '{"step": %(step)s}' > /etc/puppet/hieradata/docker.json
TAGS=""
if [ -n "%(puppet_tags)s" ]; then
TAGS='--tags "%(puppet_tags)s"'
@@ -168,7 +171,8 @@ for config_volume in configs:
fi
""" % {'puppet_tags': puppet_tags, 'name': config_volume,
'hostname': hostname,
- 'no_archive': os.environ.get('NO_ARCHIVE', '')})
+ 'no_archive': os.environ.get('NO_ARCHIVE', ''),
+ 'step': os.environ.get('STEP', '6')})
with tempfile.NamedTemporaryFile() as tmp_man:
with open(tmp_man.name, 'w') as man_file:
@@ -186,12 +190,12 @@ for config_volume in configs:
'--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro',
'--volume', '/var/lib/config-data/:/var/lib/config-data/:rw',
'--volume', 'tripleo_logs:/var/log/tripleo/',
- '--volume', '/var/lib/docker-puppet/docker-puppet.sh:/var/lib/docker-puppet/docker-puppet.sh:ro']
+ '--volume', '%s:%s:rw' % (sh_script, sh_script) ]
for volume in volumes:
dcmd.extend(['--volume', volume])
- dcmd.extend(['--entrypoint', '/var/lib/docker-puppet/docker-puppet.sh'])
+ dcmd.extend(['--entrypoint', sh_script])
env = {}
if os.environ.get('NET_HOST', 'false') == 'true':
@@ -207,6 +211,34 @@ for config_volume in configs:
print(cmd_stderr)
if subproc.returncode != 0:
print('Failed running docker-puppet.py for %s' % config_volume)
- sys.exit(subproc.returncode)
- else:
- rm_container('docker-puppet-%s' % config_volume)
+ rm_container('docker-puppet-%s' % config_volume)
+ return subproc.returncode
+
+# Holds all the information for each process to consume.
+# Instead of starting them all linearly we run them using a process
+# pool. This creates a list of arguments for the above function
+# to consume.
+process_map = []
+
+for config_volume in configs:
+
+ service = configs[config_volume]
+ puppet_tags = service[1] or ''
+ manifest = service[2] or ''
+ config_image = service[3] or ''
+ volumes = service[4] if len(service) > 4 else []
+
+ if puppet_tags:
+ puppet_tags = "file,file_line,concat,%s" % puppet_tags
+ else:
+ puppet_tags = "file,file_line,concat"
+
+ process_map.append([config_volume, puppet_tags, manifest, config_image, volumes])
+
+for p in process_map:
+ print '--\n%s' % p
+
+# Fire off processes to perform each configuration. Defaults
+# to the number of CPUs on the system.
+p = multiprocessing.Pool(process_count)
+p.map(mp_puppet_config, process_map)
diff --git a/docker/docker-toool b/docker/docker-toool
new file mode 100755
index 00000000..36aba4a7
--- /dev/null
+++ b/docker/docker-toool
@@ -0,0 +1,189 @@
+#!/usr/bin/env python
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import argparse
+import os
+import shutil
+import sys
+import json
+
+docker_cmd = '/bin/docker'
+
+# Tool to start docker containers as configured via
+# tripleo-heat-templates.
+#
+# This tool reads data from a json file generated from heat when the
+# TripleO stack is run. All the configuration data used to start the
+# containerized services is in this file.
+#
+# By default this tool lists all the containers that are started and
+# their start order.
+#
+# If you wish to see the command line used to start a given container,
+# specify it by name using the --container argument. --run can then be
+# used with this to actually execute docker to run the container.\n
+#
+# Other options listed allow you to modify this command line for
+# debugging purposes. For example:
+#
+# docker-toool -c swift-proxy -r -e /bin/bash -u root -i -n test
+#
+# will run the swift proxy container as user root, executing /bin/bash,
+#
+# named 'test', and will run interactively (eg -ti).
+
+
+def parse_opts(argv):
+ parser = argparse.ArgumentParser("Tool to start docker containers via "
+ "TripleO configurations")
+ parser.add_argument('-f', '--config',
+ help="""File to use as docker startup configuration data.""",
+ default='/var/lib/docker-container-startup-configs.json')
+ parser.add_argument('-r', '--run',
+ action='store_true',
+ help="""Run the container as specified with --container.""",
+ default=False)
+ parser.add_argument('-e', '--command',
+ help="""Override the command used to run the container.""",
+ default='')
+ parser.add_argument('-c', '--container',
+ help="""Specify a container to run or show the command for.""",
+ default='')
+ parser.add_argument('-u', '--user',
+ help="""User to run container as.""",
+ default='')
+ parser.add_argument('-n', '--name',
+ help="""Name of container.""",
+ default='')
+ parser.add_argument('-i', '--interactive',
+ action='store_true',
+ help="""Start docker container interactively (-ti).""",
+ default=False)
+ opts = parser.parse_args(argv[1:])
+
+ return opts
+
+def docker_arg_map(key, value):
+ value = str(value).encode('ascii', 'ignore')
+ return {
+ 'environment': "--env=%s" % value,
+ # 'image': value,
+ 'net': "--net=%s" % value,
+ 'pid': "--pid=%s" % value,
+ 'privileged': "--privileged=%s" % value.lower(),
+ #'restart': "--restart=%s" % "false",
+ 'user': "--user=%s" % value,
+ 'volumes': "--volume=%s" % value,
+ 'volumes_from': "--volumes-from=%s" % value,
+ }.get(key, None)
+
+def run_docker_container(opts, container_name):
+ container_found = False
+
+ with open(opts.config) as f:
+ json_data = json.load(f)
+
+ for step in (json_data or []):
+ if step is None:
+ continue
+ for container in (json_data[step] or []):
+ if container == container_name:
+ print('container found: %s' % container)
+ container_found = True
+ # A few positional arguments:
+ command = ''
+ image = ''
+
+ cmd = [
+ docker_cmd,
+ 'run',
+ '--name',
+ opts.name or container
+ ]
+ for container_data in (json_data[step][container] or []):
+ if container_data == "environment":
+ for env in (json_data[step][container][container_data] or []):
+ arg = docker_arg_map("environment", env)
+ if arg:
+ cmd.append(arg)
+ elif container_data == "volumes":
+ for volume in (json_data[step][container][container_data] or []):
+ arg = docker_arg_map("volumes", volume)
+ if arg:
+ cmd.append(arg)
+ elif container_data == "volumes_from":
+ for volume in (json_data[step][container][container_data] or []):
+ arg = docker_arg_map("volumes_from", volume)
+ if arg:
+ cmd.append(arg)
+ elif container_data == 'command':
+ command = json_data[step][container][container_data]
+ elif container_data == 'image':
+ image = json_data[step][container][container_data]
+ else:
+ # Only add a restart if we're not interactive
+ if container_data == 'restart':
+ if opts.interactive:
+ continue
+ if container_data == 'user':
+ if opts.user:
+ continue
+ arg = docker_arg_map(container_data,
+ json_data[step][container][container_data])
+ if arg:
+ cmd.append(arg)
+
+ if opts.user:
+ cmd.append('--user')
+ cmd.append(opts.user)
+ if opts.interactive:
+ cmd.append('-ti')
+ # May as well remove it when we're done too
+ cmd.append('--rm')
+ cmd.append(image)
+ if opts.command:
+ cmd.append(opts.command)
+ elif command:
+ cmd.extend(command)
+
+ print ' '.join(cmd)
+
+ if opts.run:
+ os.execl(docker_cmd, *cmd)
+
+ if not container_found:
+ print("Container '%s' not found!" % container_name)
+
+def list_docker_containers(opts):
+ print opts
+ with open(opts.config) as f:
+ json_data = json.load(f)
+
+ for step in (json_data or []):
+ if step is None:
+ continue
+ print step
+ for container in (json_data[step] or []):
+ print('\tcontainer: %s' % container)
+ for container_data in (json_data[step][container] or []):
+ #print('\t\tcontainer_data: %s' % container_data)
+ if container_data == "start_order":
+ print('\t\tstart_order: %s' % json_data[step][container][container_data])
+
+opts = parse_opts(sys.argv)
+
+if opts.container:
+ run_docker_container(opts, opts.container)
+else:
+ list_docker_containers(opts)
+
diff --git a/docker/post.j2.yaml b/docker/post.j2.yaml
index 3473f4ca..65d0c4ee 100644
--- a/docker/post.j2.yaml
+++ b/docker/post.j2.yaml
@@ -68,6 +68,7 @@ resources:
- name: CONFIG
- name: NET_HOST
- name: NO_ARCHIVE
+ - name: STEP
{{primary_role_name}}DockerPuppetTasksDeployment{{step}}:
type: OS::Heat::SoftwareDeployment
@@ -85,6 +86,7 @@ resources:
CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json
NET_HOST: 'true'
NO_ARCHIVE: 'true'
+ STEP: {{step}}
{% endfor %}
# END primary_role_name docker-puppet-tasks
@@ -187,6 +189,24 @@ resources:
docker_config: {get_param: [role_data, {{role.name}}, docker_config]}
docker_image: {get_param: [role_data, {{role.name}}, docker_image]}
+ # Here we are dumping all the docker container startup configuration data
+ # so that we can have access to how they are started outside of heat
+ # and docker-cmd. This lets us create command line tools to start and
+ # test these containers.
+ {{role.name}}DockerConfigJsonStartupData:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: json-file
+ config:
+ /var/lib/docker-container-startup-configs.json:
+ {get_attr: [{{role.name}}DockerConfig, value]}
+
+ {{role.name}}DockerConfigJsonStartupDataDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ config: {get_resource: {{role.name}}DockerConfigJsonStartupData}
+ servers: {get_param: [servers, {{role.name}}]}
+
{{role.name}}KollaJsonConfig:
type: OS::Heat::StructuredConfig
properties:
diff --git a/docker/services/keystone.yaml b/docker/services/keystone.yaml
index 1d25da72..2bf8fa09 100644
--- a/docker/services/keystone.yaml
+++ b/docker/services/keystone.yaml
@@ -144,7 +144,7 @@ outputs:
[ 'keystone', 'keystone-manage', 'bootstrap', '--bootstrap-password', {get_param: AdminPassword} ]
docker_puppet_tasks:
# Keystone endpoint creation occurs only on single node
- step_4:
+ step_3:
- 'keystone_init_tasks'
- 'keystone_config,keystone_domain_config,keystone_endpoint,keystone_identity_provider,keystone_paste_ini,keystone_role,keystone_service,keystone_tenant,keystone_user,keystone_user_role,keystone_domain'
- 'include ::tripleo::profile::base::keystone'
diff --git a/docker/services/neutron-api.yaml b/docker/services/neutron-api.yaml
index e444f391..dfd1d5c0 100644
--- a/docker/services/neutron-api.yaml
+++ b/docker/services/neutron-api.yaml
@@ -81,6 +81,9 @@ outputs:
net: host
privileged: false
detach: false
+ # FIXME: we should make config file permissions right
+ # and run as neutron user
+ user: root
volumes:
- /var/lib/config-data/neutron/etc/neutron:/etc/neutron:ro
- /var/lib/config-data/neutron/usr/share/neutron:/usr/share/neutron:ro