aboutsummaryrefslogtreecommitdiffstats
path: root/charms/trusty/ceilometer/charmhelpers/contrib
diff options
context:
space:
mode:
Diffstat (limited to 'charms/trusty/ceilometer/charmhelpers/contrib')
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/nrpe.py398
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/volumes.py175
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/apache.py82
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/cluster.py316
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/README.hardening.md38
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/__init__.py19
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/__init__.py31
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/config.py100
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/__init__.py0
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/alias.conf31
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/hardening.conf18
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/__init__.py63
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apache.py100
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apt.py105
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/file.py552
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/__init__.py0
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml13
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml.schema9
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml38
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml67
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml.schema42
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml49
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema42
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/harden.py84
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/__init__.py19
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/__init__.py50
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/apt.py39
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/limits.py55
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/login.py67
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/minimize_access.py52
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/pam.py134
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/profile.py45
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/securetty.py39
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/suid_sgid.py131
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/sysctl.py211
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf8
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf7
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/__init__.py0
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/login.defs349
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/modules117
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/passwdqc.conf11
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh8
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/securetty11
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/tally214
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/__init__.py19
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/__init__.py31
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/config.py89
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/__init__.py0
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf12
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/__init__.py19
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/__init__.py31
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/config.py394
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/__init__.py0
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/ssh_config70
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/sshd_config159
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/templating.py71
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/utils.py157
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/network/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/network/ip.py499
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/alternatives.py33
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/deployment.py304
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/utils.py1012
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/context.py1583
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/__init__.py18
-rwxr-xr-xcharms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy.sh34
-rwxr-xr-xcharms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh30
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/ip.py179
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/neutron.py384
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/__init__.py18
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/ceph.conf21
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/git.upstart17
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/haproxy.cfg66
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend26
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf26
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken12
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy10
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka12
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo22
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-zeromq14
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templating.py323
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/utils.py1576
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/peerstorage/__init__.py269
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/python/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/python/packages.py145
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/storage/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/ceph.py1206
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/loopback.py88
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/lvm.py105
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/utils.py71
97 files changed, 13059 insertions, 0 deletions
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/nrpe.py b/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/nrpe.py
new file mode 100644
index 0000000..2f24642
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/nrpe.py
@@ -0,0 +1,398 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+"""Compatibility with the nrpe-external-master charm"""
+# Copyright 2012 Canonical Ltd.
+#
+# Authors:
+# Matthew Wedgwood <matthew.wedgwood@canonical.com>
+
+import subprocess
+import pwd
+import grp
+import os
+import glob
+import shutil
+import re
+import shlex
+import yaml
+
+from charmhelpers.core.hookenv import (
+ config,
+ local_unit,
+ log,
+ relation_ids,
+ relation_set,
+ relations_of_type,
+)
+
+from charmhelpers.core.host import service
+
+# This module adds compatibility with the nrpe-external-master and plain nrpe
+# subordinate charms. To use it in your charm:
+#
+# 1. Update metadata.yaml
+#
+# provides:
+# (...)
+# nrpe-external-master:
+# interface: nrpe-external-master
+# scope: container
+#
+# and/or
+#
+# provides:
+# (...)
+# local-monitors:
+# interface: local-monitors
+# scope: container
+
+#
+# 2. Add the following to config.yaml
+#
+# nagios_context:
+# default: "juju"
+# type: string
+# description: |
+# Used by the nrpe subordinate charms.
+# A string that will be prepended to instance name to set the host name
+# in nagios. So for instance the hostname would be something like:
+# juju-myservice-0
+# If you're running multiple environments with the same services in them
+# this allows you to differentiate between them.
+# nagios_servicegroups:
+# default: ""
+# type: string
+# description: |
+# A comma-separated list of nagios servicegroups.
+# If left empty, the nagios_context will be used as the servicegroup
+#
+# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
+#
+# 4. Update your hooks.py with something like this:
+#
+# from charmsupport.nrpe import NRPE
+# (...)
+# def update_nrpe_config():
+# nrpe_compat = NRPE()
+# nrpe_compat.add_check(
+# shortname = "myservice",
+# description = "Check MyService",
+# check_cmd = "check_http -w 2 -c 10 http://localhost"
+# )
+# nrpe_compat.add_check(
+# "myservice_other",
+# "Check for widget failures",
+# check_cmd = "/srv/myapp/scripts/widget_check"
+# )
+# nrpe_compat.write()
+#
+# def config_changed():
+# (...)
+# update_nrpe_config()
+#
+# def nrpe_external_master_relation_changed():
+# update_nrpe_config()
+#
+# def local_monitors_relation_changed():
+# update_nrpe_config()
+#
+# 5. ln -s hooks.py nrpe-external-master-relation-changed
+# ln -s hooks.py local-monitors-relation-changed
+
+
+class CheckException(Exception):
+ pass
+
+
+class Check(object):
+ shortname_re = '[A-Za-z0-9-_]+$'
+ service_template = ("""
+#---------------------------------------------------
+# This file is Juju managed
+#---------------------------------------------------
+define service {{
+ use active-service
+ host_name {nagios_hostname}
+ service_description {nagios_hostname}[{shortname}] """
+ """{description}
+ check_command check_nrpe!{command}
+ servicegroups {nagios_servicegroup}
+}}
+""")
+
+ def __init__(self, shortname, description, check_cmd):
+ super(Check, self).__init__()
+ # XXX: could be better to calculate this from the service name
+ if not re.match(self.shortname_re, shortname):
+ raise CheckException("shortname must match {}".format(
+ Check.shortname_re))
+ self.shortname = shortname
+ self.command = "check_{}".format(shortname)
+ # Note: a set of invalid characters is defined by the
+ # Nagios server config
+ # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
+ self.description = description
+ self.check_cmd = self._locate_cmd(check_cmd)
+
+ def _get_check_filename(self):
+ return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
+
+ def _get_service_filename(self, hostname):
+ return os.path.join(NRPE.nagios_exportdir,
+ 'service__{}_{}.cfg'.format(hostname, self.command))
+
+ def _locate_cmd(self, check_cmd):
+ search_path = (
+ '/usr/lib/nagios/plugins',
+ '/usr/local/lib/nagios/plugins',
+ )
+ parts = shlex.split(check_cmd)
+ for path in search_path:
+ if os.path.exists(os.path.join(path, parts[0])):
+ command = os.path.join(path, parts[0])
+ if len(parts) > 1:
+ command += " " + " ".join(parts[1:])
+ return command
+ log('Check command not found: {}'.format(parts[0]))
+ return ''
+
+ def _remove_service_files(self):
+ if not os.path.exists(NRPE.nagios_exportdir):
+ return
+ for f in os.listdir(NRPE.nagios_exportdir):
+ if f.endswith('_{}.cfg'.format(self.command)):
+ os.remove(os.path.join(NRPE.nagios_exportdir, f))
+
+ def remove(self, hostname):
+ nrpe_check_file = self._get_check_filename()
+ if os.path.exists(nrpe_check_file):
+ os.remove(nrpe_check_file)
+ self._remove_service_files()
+
+ def write(self, nagios_context, hostname, nagios_servicegroups):
+ nrpe_check_file = self._get_check_filename()
+ with open(nrpe_check_file, 'w') as nrpe_check_config:
+ nrpe_check_config.write("# check {}\n".format(self.shortname))
+ nrpe_check_config.write("command[{}]={}\n".format(
+ self.command, self.check_cmd))
+
+ if not os.path.exists(NRPE.nagios_exportdir):
+ log('Not writing service config as {} is not accessible'.format(
+ NRPE.nagios_exportdir))
+ else:
+ self.write_service_config(nagios_context, hostname,
+ nagios_servicegroups)
+
+ def write_service_config(self, nagios_context, hostname,
+ nagios_servicegroups):
+ self._remove_service_files()
+
+ templ_vars = {
+ 'nagios_hostname': hostname,
+ 'nagios_servicegroup': nagios_servicegroups,
+ 'description': self.description,
+ 'shortname': self.shortname,
+ 'command': self.command,
+ }
+ nrpe_service_text = Check.service_template.format(**templ_vars)
+ nrpe_service_file = self._get_service_filename(hostname)
+ with open(nrpe_service_file, 'w') as nrpe_service_config:
+ nrpe_service_config.write(str(nrpe_service_text))
+
+ def run(self):
+ subprocess.call(self.check_cmd)
+
+
+class NRPE(object):
+ nagios_logdir = '/var/log/nagios'
+ nagios_exportdir = '/var/lib/nagios/export'
+ nrpe_confdir = '/etc/nagios/nrpe.d'
+
+ def __init__(self, hostname=None):
+ super(NRPE, self).__init__()
+ self.config = config()
+ self.nagios_context = self.config['nagios_context']
+ if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
+ self.nagios_servicegroups = self.config['nagios_servicegroups']
+ else:
+ self.nagios_servicegroups = self.nagios_context
+ self.unit_name = local_unit().replace('/', '-')
+ if hostname:
+ self.hostname = hostname
+ else:
+ nagios_hostname = get_nagios_hostname()
+ if nagios_hostname:
+ self.hostname = nagios_hostname
+ else:
+ self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
+ self.checks = []
+
+ def add_check(self, *args, **kwargs):
+ self.checks.append(Check(*args, **kwargs))
+
+ def remove_check(self, *args, **kwargs):
+ if kwargs.get('shortname') is None:
+ raise ValueError('shortname of check must be specified')
+
+ # Use sensible defaults if they're not specified - these are not
+ # actually used during removal, but they're required for constructing
+ # the Check object; check_disk is chosen because it's part of the
+ # nagios-plugins-basic package.
+ if kwargs.get('check_cmd') is None:
+ kwargs['check_cmd'] = 'check_disk'
+ if kwargs.get('description') is None:
+ kwargs['description'] = ''
+
+ check = Check(*args, **kwargs)
+ check.remove(self.hostname)
+
+ def write(self):
+ try:
+ nagios_uid = pwd.getpwnam('nagios').pw_uid
+ nagios_gid = grp.getgrnam('nagios').gr_gid
+ except:
+ log("Nagios user not set up, nrpe checks not updated")
+ return
+
+ if not os.path.exists(NRPE.nagios_logdir):
+ os.mkdir(NRPE.nagios_logdir)
+ os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
+
+ nrpe_monitors = {}
+ monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
+ for nrpecheck in self.checks:
+ nrpecheck.write(self.nagios_context, self.hostname,
+ self.nagios_servicegroups)
+ nrpe_monitors[nrpecheck.shortname] = {
+ "command": nrpecheck.command,
+ }
+
+ service('restart', 'nagios-nrpe-server')
+
+ monitor_ids = relation_ids("local-monitors") + \
+ relation_ids("nrpe-external-master")
+ for rid in monitor_ids:
+ relation_set(relation_id=rid, monitors=yaml.dump(monitors))
+
+
+def get_nagios_hostcontext(relation_name='nrpe-external-master'):
+ """
+ Query relation with nrpe subordinate, return the nagios_host_context
+
+ :param str relation_name: Name of relation nrpe sub joined to
+ """
+ for rel in relations_of_type(relation_name):
+ if 'nagios_host_context' in rel:
+ return rel['nagios_host_context']
+
+
+def get_nagios_hostname(relation_name='nrpe-external-master'):
+ """
+ Query relation with nrpe subordinate, return the nagios_hostname
+
+ :param str relation_name: Name of relation nrpe sub joined to
+ """
+ for rel in relations_of_type(relation_name):
+ if 'nagios_hostname' in rel:
+ return rel['nagios_hostname']
+
+
+def get_nagios_unit_name(relation_name='nrpe-external-master'):
+ """
+ Return the nagios unit name prepended with host_context if needed
+
+ :param str relation_name: Name of relation nrpe sub joined to
+ """
+ host_context = get_nagios_hostcontext(relation_name)
+ if host_context:
+ unit = "%s:%s" % (host_context, local_unit())
+ else:
+ unit = local_unit()
+ return unit
+
+
+def add_init_service_checks(nrpe, services, unit_name):
+ """
+ Add checks for each service in list
+
+ :param NRPE nrpe: NRPE object to add check to
+ :param list services: List of services to check
+ :param str unit_name: Unit name to use in check description
+ """
+ for svc in services:
+ upstart_init = '/etc/init/%s.conf' % svc
+ sysv_init = '/etc/init.d/%s' % svc
+ if os.path.exists(upstart_init):
+ # Don't add a check for these services from neutron-gateway
+ if svc not in ['ext-port', 'os-charm-phy-nic-mtu']:
+ nrpe.add_check(
+ shortname=svc,
+ description='process check {%s}' % unit_name,
+ check_cmd='check_upstart_job %s' % svc
+ )
+ elif os.path.exists(sysv_init):
+ cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
+ cron_file = ('*/5 * * * * root '
+ '/usr/local/lib/nagios/plugins/check_exit_status.pl '
+ '-s /etc/init.d/%s status > '
+ '/var/lib/nagios/service-check-%s.txt\n' % (svc,
+ svc)
+ )
+ f = open(cronpath, 'w')
+ f.write(cron_file)
+ f.close()
+ nrpe.add_check(
+ shortname=svc,
+ description='process check {%s}' % unit_name,
+ check_cmd='check_status_file.py -f '
+ '/var/lib/nagios/service-check-%s.txt' % svc,
+ )
+
+
+def copy_nrpe_checks():
+ """
+ Copy the nrpe checks into place
+
+ """
+ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
+ nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
+ 'charmhelpers', 'contrib', 'openstack',
+ 'files')
+
+ if not os.path.exists(NAGIOS_PLUGINS):
+ os.makedirs(NAGIOS_PLUGINS)
+ for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
+ if os.path.isfile(fname):
+ shutil.copy2(fname,
+ os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
+
+
+def add_haproxy_checks(nrpe, unit_name):
+ """
+ Add checks for each service in list
+
+ :param NRPE nrpe: NRPE object to add check to
+ :param str unit_name: Unit name to use in check description
+ """
+ nrpe.add_check(
+ shortname='haproxy_servers',
+ description='Check HAProxy {%s}' % unit_name,
+ check_cmd='check_haproxy.sh')
+ nrpe.add_check(
+ shortname='haproxy_queue',
+ description='Check HAProxy queue depth {%s}' % unit_name,
+ check_cmd='check_haproxy_queue_depth.sh')
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/volumes.py b/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/volumes.py
new file mode 100644
index 0000000..320961b
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/volumes.py
@@ -0,0 +1,175 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+'''
+Functions for managing volumes in juju units. One volume is supported per unit.
+Subordinates may have their own storage, provided it is on its own partition.
+
+Configuration stanzas::
+
+ volume-ephemeral:
+ type: boolean
+ default: true
+ description: >
+ If false, a volume is mounted as sepecified in "volume-map"
+ If true, ephemeral storage will be used, meaning that log data
+ will only exist as long as the machine. YOU HAVE BEEN WARNED.
+ volume-map:
+ type: string
+ default: {}
+ description: >
+ YAML map of units to device names, e.g:
+ "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
+ Service units will raise a configure-error if volume-ephemeral
+ is 'true' and no volume-map value is set. Use 'juju set' to set a
+ value and 'juju resolved' to complete configuration.
+
+Usage::
+
+ from charmsupport.volumes import configure_volume, VolumeConfigurationError
+ from charmsupport.hookenv import log, ERROR
+ def post_mount_hook():
+ stop_service('myservice')
+ def post_mount_hook():
+ start_service('myservice')
+
+ if __name__ == '__main__':
+ try:
+ configure_volume(before_change=pre_mount_hook,
+ after_change=post_mount_hook)
+ except VolumeConfigurationError:
+ log('Storage could not be configured', ERROR)
+
+'''
+
+# XXX: Known limitations
+# - fstab is neither consulted nor updated
+
+import os
+from charmhelpers.core import hookenv
+from charmhelpers.core import host
+import yaml
+
+
+MOUNT_BASE = '/srv/juju/volumes'
+
+
+class VolumeConfigurationError(Exception):
+ '''Volume configuration data is missing or invalid'''
+ pass
+
+
+def get_config():
+ '''Gather and sanity-check volume configuration data'''
+ volume_config = {}
+ config = hookenv.config()
+
+ errors = False
+
+ if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
+ volume_config['ephemeral'] = True
+ else:
+ volume_config['ephemeral'] = False
+
+ try:
+ volume_map = yaml.safe_load(config.get('volume-map', '{}'))
+ except yaml.YAMLError as e:
+ hookenv.log("Error parsing YAML volume-map: {}".format(e),
+ hookenv.ERROR)
+ errors = True
+ if volume_map is None:
+ # probably an empty string
+ volume_map = {}
+ elif not isinstance(volume_map, dict):
+ hookenv.log("Volume-map should be a dictionary, not {}".format(
+ type(volume_map)))
+ errors = True
+
+ volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
+ if volume_config['device'] and volume_config['ephemeral']:
+ # asked for ephemeral storage but also defined a volume ID
+ hookenv.log('A volume is defined for this unit, but ephemeral '
+ 'storage was requested', hookenv.ERROR)
+ errors = True
+ elif not volume_config['device'] and not volume_config['ephemeral']:
+ # asked for permanent storage but did not define volume ID
+ hookenv.log('Ephemeral storage was requested, but there is no volume '
+ 'defined for this unit.', hookenv.ERROR)
+ errors = True
+
+ unit_mount_name = hookenv.local_unit().replace('/', '-')
+ volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
+
+ if errors:
+ return None
+ return volume_config
+
+
+def mount_volume(config):
+ if os.path.exists(config['mountpoint']):
+ if not os.path.isdir(config['mountpoint']):
+ hookenv.log('Not a directory: {}'.format(config['mountpoint']))
+ raise VolumeConfigurationError()
+ else:
+ host.mkdir(config['mountpoint'])
+ if os.path.ismount(config['mountpoint']):
+ unmount_volume(config)
+ if not host.mount(config['device'], config['mountpoint'], persist=True):
+ raise VolumeConfigurationError()
+
+
+def unmount_volume(config):
+ if os.path.ismount(config['mountpoint']):
+ if not host.umount(config['mountpoint'], persist=True):
+ raise VolumeConfigurationError()
+
+
+def managed_mounts():
+ '''List of all mounted managed volumes'''
+ return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
+
+
+def configure_volume(before_change=lambda: None, after_change=lambda: None):
+ '''Set up storage (or don't) according to the charm's volume configuration.
+ Returns the mount point or "ephemeral". before_change and after_change
+ are optional functions to be called if the volume configuration changes.
+ '''
+
+ config = get_config()
+ if not config:
+ hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
+ raise VolumeConfigurationError()
+
+ if config['ephemeral']:
+ if os.path.ismount(config['mountpoint']):
+ before_change()
+ unmount_volume(config)
+ after_change()
+ return 'ephemeral'
+ else:
+ # persistent storage
+ if os.path.ismount(config['mountpoint']):
+ mounts = dict(managed_mounts())
+ if mounts.get(config['mountpoint']) != config['device']:
+ before_change()
+ unmount_volume(config)
+ mount_volume(config)
+ after_change()
+ else:
+ before_change()
+ mount_volume(config)
+ after_change()
+ return config['mountpoint']
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/apache.py b/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/apache.py
new file mode 100644
index 0000000..0091719
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/apache.py
@@ -0,0 +1,82 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+#
+# Copyright 2012 Canonical Ltd.
+#
+# This file is sourced from lp:openstack-charm-helpers
+#
+# Authors:
+# James Page <james.page@ubuntu.com>
+# Adam Gandelman <adamg@ubuntu.com>
+#
+
+import subprocess
+
+from charmhelpers.core.hookenv import (
+ config as config_get,
+ relation_get,
+ relation_ids,
+ related_units as relation_list,
+ log,
+ INFO,
+)
+
+
+def get_cert(cn=None):
+ # TODO: deal with multiple https endpoints via charm config
+ cert = config_get('ssl_cert')
+ key = config_get('ssl_key')
+ if not (cert and key):
+ log("Inspecting identity-service relations for SSL certificate.",
+ level=INFO)
+ cert = key = None
+ if cn:
+ ssl_cert_attr = 'ssl_cert_{}'.format(cn)
+ ssl_key_attr = 'ssl_key_{}'.format(cn)
+ else:
+ ssl_cert_attr = 'ssl_cert'
+ ssl_key_attr = 'ssl_key'
+ for r_id in relation_ids('identity-service'):
+ for unit in relation_list(r_id):
+ if not cert:
+ cert = relation_get(ssl_cert_attr,
+ rid=r_id, unit=unit)
+ if not key:
+ key = relation_get(ssl_key_attr,
+ rid=r_id, unit=unit)
+ return (cert, key)
+
+
+def get_ca_cert():
+ ca_cert = config_get('ssl_ca')
+ if ca_cert is None:
+ log("Inspecting identity-service relations for CA SSL certificate.",
+ level=INFO)
+ for r_id in relation_ids('identity-service'):
+ for unit in relation_list(r_id):
+ if ca_cert is None:
+ ca_cert = relation_get('ca_cert',
+ rid=r_id, unit=unit)
+ return ca_cert
+
+
+def install_ca_cert(ca_cert):
+ if ca_cert:
+ with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
+ 'w') as crt:
+ crt.write(ca_cert)
+ subprocess.check_call(['update-ca-certificates', '--fresh'])
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/cluster.py b/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/cluster.py
new file mode 100644
index 0000000..aa0b515
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/cluster.py
@@ -0,0 +1,316 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+#
+# Copyright 2012 Canonical Ltd.
+#
+# Authors:
+# James Page <james.page@ubuntu.com>
+# Adam Gandelman <adamg@ubuntu.com>
+#
+
+"""
+Helpers for clustering and determining "cluster leadership" and other
+clustering-related helpers.
+"""
+
+import subprocess
+import os
+
+from socket import gethostname as get_unit_hostname
+
+import six
+
+from charmhelpers.core.hookenv import (
+ log,
+ relation_ids,
+ related_units as relation_list,
+ relation_get,
+ config as config_get,
+ INFO,
+ ERROR,
+ WARNING,
+ unit_get,
+ is_leader as juju_is_leader
+)
+from charmhelpers.core.decorators import (
+ retry_on_exception,
+)
+from charmhelpers.core.strutils import (
+ bool_from_string,
+)
+
+DC_RESOURCE_NAME = 'DC'
+
+
+class HAIncompleteConfig(Exception):
+ pass
+
+
+class CRMResourceNotFound(Exception):
+ pass
+
+
+class CRMDCNotFound(Exception):
+ pass
+
+
+def is_elected_leader(resource):
+ """
+ Returns True if the charm executing this is the elected cluster leader.
+
+ It relies on two mechanisms to determine leadership:
+ 1. If juju is sufficiently new and leadership election is supported,
+ the is_leader command will be used.
+ 2. If the charm is part of a corosync cluster, call corosync to
+ determine leadership.
+ 3. If the charm is not part of a corosync cluster, the leader is
+ determined as being "the alive unit with the lowest unit numer". In
+ other words, the oldest surviving unit.
+ """
+ try:
+ return juju_is_leader()
+ except NotImplementedError:
+ log('Juju leadership election feature not enabled'
+ ', using fallback support',
+ level=WARNING)
+
+ if is_clustered():
+ if not is_crm_leader(resource):
+ log('Deferring action to CRM leader.', level=INFO)
+ return False
+ else:
+ peers = peer_units()
+ if peers and not oldest_peer(peers):
+ log('Deferring action to oldest service unit.', level=INFO)
+ return False
+ return True
+
+
+def is_clustered():
+ for r_id in (relation_ids('ha') or []):
+ for unit in (relation_list(r_id) or []):
+ clustered = relation_get('clustered',
+ rid=r_id,
+ unit=unit)
+ if clustered:
+ return True
+ return False
+
+
+def is_crm_dc():
+ """
+ Determine leadership by querying the pacemaker Designated Controller
+ """
+ cmd = ['crm', 'status']
+ try:
+ status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ if not isinstance(status, six.text_type):
+ status = six.text_type(status, "utf-8")
+ except subprocess.CalledProcessError as ex:
+ raise CRMDCNotFound(str(ex))
+
+ current_dc = ''
+ for line in status.split('\n'):
+ if line.startswith('Current DC'):
+ # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
+ current_dc = line.split(':')[1].split()[0]
+ if current_dc == get_unit_hostname():
+ return True
+ elif current_dc == 'NONE':
+ raise CRMDCNotFound('Current DC: NONE')
+
+ return False
+
+
+@retry_on_exception(5, base_delay=2,
+ exc_type=(CRMResourceNotFound, CRMDCNotFound))
+def is_crm_leader(resource, retry=False):
+ """
+ Returns True if the charm calling this is the elected corosync leader,
+ as returned by calling the external "crm" command.
+
+ We allow this operation to be retried to avoid the possibility of getting a
+ false negative. See LP #1396246 for more info.
+ """
+ if resource == DC_RESOURCE_NAME:
+ return is_crm_dc()
+ cmd = ['crm', 'resource', 'show', resource]
+ try:
+ status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ if not isinstance(status, six.text_type):
+ status = six.text_type(status, "utf-8")
+ except subprocess.CalledProcessError:
+ status = None
+
+ if status and get_unit_hostname() in status:
+ return True
+
+ if status and "resource %s is NOT running" % (resource) in status:
+ raise CRMResourceNotFound("CRM resource %s not found" % (resource))
+
+ return False
+
+
+def is_leader(resource):
+ log("is_leader is deprecated. Please consider using is_crm_leader "
+ "instead.", level=WARNING)
+ return is_crm_leader(resource)
+
+
+def peer_units(peer_relation="cluster"):
+ peers = []
+ for r_id in (relation_ids(peer_relation) or []):
+ for unit in (relation_list(r_id) or []):
+ peers.append(unit)
+ return peers
+
+
+def peer_ips(peer_relation='cluster', addr_key='private-address'):
+ '''Return a dict of peers and their private-address'''
+ peers = {}
+ for r_id in relation_ids(peer_relation):
+ for unit in relation_list(r_id):
+ peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
+ return peers
+
+
+def oldest_peer(peers):
+ """Determines who the oldest peer is by comparing unit numbers."""
+ local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
+ for peer in peers:
+ remote_unit_no = int(peer.split('/')[1])
+ if remote_unit_no < local_unit_no:
+ return False
+ return True
+
+
+def eligible_leader(resource):
+ log("eligible_leader is deprecated. Please consider using "
+ "is_elected_leader instead.", level=WARNING)
+ return is_elected_leader(resource)
+
+
+def https():
+ '''
+ Determines whether enough data has been provided in configuration
+ or relation data to configure HTTPS
+ .
+ returns: boolean
+ '''
+ use_https = config_get('use-https')
+ if use_https and bool_from_string(use_https):
+ return True
+ if config_get('ssl_cert') and config_get('ssl_key'):
+ return True
+ for r_id in relation_ids('identity-service'):
+ for unit in relation_list(r_id):
+ # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
+ rel_state = [
+ relation_get('https_keystone', rid=r_id, unit=unit),
+ relation_get('ca_cert', rid=r_id, unit=unit),
+ ]
+ # NOTE: works around (LP: #1203241)
+ if (None not in rel_state) and ('' not in rel_state):
+ return True
+ return False
+
+
+def determine_api_port(public_port, singlenode_mode=False):
+ '''
+ Determine correct API server listening port based on
+ existence of HTTPS reverse proxy and/or haproxy.
+
+ public_port: int: standard public port for given service
+
+ singlenode_mode: boolean: Shuffle ports when only a single unit is present
+
+ returns: int: the correct listening port for the API service
+ '''
+ i = 0
+ if singlenode_mode:
+ i += 1
+ elif len(peer_units()) > 0 or is_clustered():
+ i += 1
+ if https():
+ i += 1
+ return public_port - (i * 10)
+
+
+def determine_apache_port(public_port, singlenode_mode=False):
+ '''
+ Description: Determine correct apache listening port based on public IP +
+ state of the cluster.
+
+ public_port: int: standard public port for given service
+
+ singlenode_mode: boolean: Shuffle ports when only a single unit is present
+
+ returns: int: the correct listening port for the HAProxy service
+ '''
+ i = 0
+ if singlenode_mode:
+ i += 1
+ elif len(peer_units()) > 0 or is_clustered():
+ i += 1
+ return public_port - (i * 10)
+
+
+def get_hacluster_config(exclude_keys=None):
+ '''
+ Obtains all relevant configuration from charm configuration required
+ for initiating a relation to hacluster:
+
+ ha-bindiface, ha-mcastport, vip
+
+ param: exclude_keys: list of setting key(s) to be excluded.
+ returns: dict: A dict containing settings keyed by setting name.
+ raises: HAIncompleteConfig if settings are missing.
+ '''
+ settings = ['ha-bindiface', 'ha-mcastport', 'vip']
+ conf = {}
+ for setting in settings:
+ if exclude_keys and setting in exclude_keys:
+ continue
+
+ conf[setting] = config_get(setting)
+ missing = []
+ [missing.append(s) for s, v in six.iteritems(conf) if v is None]
+ if missing:
+ log('Insufficient config data to configure hacluster.', level=ERROR)
+ raise HAIncompleteConfig
+ return conf
+
+
+def canonical_url(configs, vip_setting='vip'):
+ '''
+ Returns the correct HTTP URL to this host given the state of HTTPS
+ configuration and hacluster.
+
+ :configs : OSTemplateRenderer: A config tempating object to inspect for
+ a complete https context.
+
+ :vip_setting: str: Setting in charm config that specifies
+ VIP address.
+ '''
+ scheme = 'http'
+ if 'https' in configs.complete_contexts():
+ scheme = 'https'
+ if is_clustered():
+ addr = config_get(vip_setting)
+ else:
+ addr = unit_get('private-address')
+ return '%s://%s' % (scheme, addr)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/README.hardening.md b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/README.hardening.md
new file mode 100644
index 0000000..91280c0
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/README.hardening.md
@@ -0,0 +1,38 @@
+# Juju charm-helpers hardening library
+
+## Description
+
+This library provides multiple implementations of system and application
+hardening that conform to the standards of http://hardening.io/.
+
+Current implementations include:
+
+ * OS
+ * SSH
+ * MySQL
+ * Apache
+
+## Requirements
+
+* Juju Charms
+
+## Usage
+
+1. Synchronise this library into your charm and add the harden() decorator
+ (from contrib.hardening.harden) to any functions or methods you want to use
+ to trigger hardening of your application/system.
+
+2. Add a config option called 'harden' to your charm config.yaml and set it to
+ a space-delimited list of hardening modules you want to run e.g. "os ssh"
+
+3. Override any config defaults (contrib.hardening.defaults) by adding a file
+ called hardening.yaml to your charm root containing the name(s) of the
+ modules whose settings you want override at root level and then any settings
+ with overrides e.g.
+
+ os:
+ general:
+ desktop_enable: True
+
+4. Now just run your charm as usual and hardening will be applied each time the
+ hook runs.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/__init__.py
new file mode 100644
index 0000000..a133532
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/__init__.py
new file mode 100644
index 0000000..277b8c7
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/__init__.py
@@ -0,0 +1,19 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from os import path
+
+TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/__init__.py
new file mode 100644
index 0000000..d130479
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/__init__.py
@@ -0,0 +1,31 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from charmhelpers.core.hookenv import (
+ log,
+ DEBUG,
+)
+from charmhelpers.contrib.hardening.apache.checks import config
+
+
+def run_apache_checks():
+ log("Starting Apache hardening checks.", level=DEBUG)
+ checks = config.get_audits()
+ for check in checks:
+ log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
+ check.ensure_compliance()
+
+ log("Apache hardening checks complete.", level=DEBUG)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/config.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/config.py
new file mode 100644
index 0000000..8249ca0
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/config.py
@@ -0,0 +1,100 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import re
+import subprocess
+
+
+from charmhelpers.core.hookenv import (
+ log,
+ INFO,
+)
+from charmhelpers.contrib.hardening.audits.file import (
+ FilePermissionAudit,
+ DirectoryPermissionAudit,
+ NoReadWriteForOther,
+ TemplatedFile,
+)
+from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit
+from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR
+from charmhelpers.contrib.hardening import utils
+
+
+def get_audits():
+ """Get Apache hardening config audits.
+
+ :returns: dictionary of audits
+ """
+ if subprocess.call(['which', 'apache2'], stdout=subprocess.PIPE) != 0:
+ log("Apache server does not appear to be installed on this node - "
+ "skipping apache hardening", level=INFO)
+ return []
+
+ context = ApacheConfContext()
+ settings = utils.get_settings('apache')
+ audits = [
+ FilePermissionAudit(paths='/etc/apache2/apache2.conf', user='root',
+ group='root', mode=0o0640),
+
+ TemplatedFile(os.path.join(settings['common']['apache_dir'],
+ 'mods-available/alias.conf'),
+ context,
+ TEMPLATES_DIR,
+ mode=0o0755,
+ user='root',
+ service_actions=[{'service': 'apache2',
+ 'actions': ['restart']}]),
+
+ TemplatedFile(os.path.join(settings['common']['apache_dir'],
+ 'conf-enabled/hardening.conf'),
+ context,
+ TEMPLATES_DIR,
+ mode=0o0640,
+ user='root',
+ service_actions=[{'service': 'apache2',
+ 'actions': ['restart']}]),
+
+ DirectoryPermissionAudit(settings['common']['apache_dir'],
+ user='root',
+ group='root',
+ mode=0o640),
+
+ DisabledModuleAudit(settings['hardening']['modules_to_disable']),
+
+ NoReadWriteForOther(settings['common']['apache_dir']),
+ ]
+
+ return audits
+
+
+class ApacheConfContext(object):
+ """Defines the set of key/value pairs to set in a apache config file.
+
+ This context, when called, will return a dictionary containing the
+ key/value pairs of setting to specify in the
+ /etc/apache/conf-enabled/hardening.conf file.
+ """
+ def __call__(self):
+ settings = utils.get_settings('apache')
+ ctxt = settings['hardening']
+
+ out = subprocess.check_output(['apache2', '-v'])
+ ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+',
+ out).group(1)
+ ctxt['apache_icondir'] = '/usr/share/apache2/icons/'
+ ctxt['traceenable'] = settings['hardening']['traceenable']
+ return ctxt
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/__init__.py
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/alias.conf b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/alias.conf
new file mode 100644
index 0000000..e46a58a
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/alias.conf
@@ -0,0 +1,31 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+# be overwritten.
+###############################################################################
+<IfModule alias_module>
+ #
+ # Aliases: Add here as many aliases as you need (with no limit). The format is
+ # Alias fakename realname
+ #
+ # Note that if you include a trailing / on fakename then the server will
+ # require it to be present in the URL. So "/icons" isn't aliased in this
+ # example, only "/icons/". If the fakename is slash-terminated, then the
+ # realname must also be slash terminated, and if the fakename omits the
+ # trailing slash, the realname must also omit it.
+ #
+ # We include the /icons/ alias for FancyIndexed directory listings. If
+ # you do not use FancyIndexing, you may comment this out.
+ #
+ Alias /icons/ "{{ apache_icondir }}/"
+
+ <Directory "{{ apache_icondir }}">
+ Options -Indexes -MultiViews -FollowSymLinks
+ AllowOverride None
+{% if apache_version == '2.4' -%}
+ Require all granted
+{% else -%}
+ Order allow,deny
+ Allow from all
+{% endif %}
+ </Directory>
+</IfModule>
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/hardening.conf b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/hardening.conf
new file mode 100644
index 0000000..0794541
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/hardening.conf
@@ -0,0 +1,18 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+# be overwritten.
+###############################################################################
+
+<Location / >
+ <LimitExcept {{ allowed_http_methods }} >
+ # http://httpd.apache.org/docs/2.4/upgrading.html
+ {% if apache_version > '2.2' -%}
+ Require all granted
+ {% else -%}
+ Order Allow,Deny
+ Deny from all
+ {% endif %}
+ </LimitExcept>
+</Location>
+
+TraceEnable {{ traceenable }}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/__init__.py
new file mode 100644
index 0000000..6a7057b
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/__init__.py
@@ -0,0 +1,63 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+
+class BaseAudit(object): # NO-QA
+ """Base class for hardening checks.
+
+ The lifecycle of a hardening check is to first check to see if the system
+ is in compliance for the specified check. If it is not in compliance, the
+ check method will return a value which will be supplied to the.
+ """
+ def __init__(self, *args, **kwargs):
+ self.unless = kwargs.get('unless', None)
+ super(BaseAudit, self).__init__()
+
+ def ensure_compliance(self):
+ """Checks to see if the current hardening check is in compliance or
+ not.
+
+ If the check that is performed is not in compliance, then an exception
+ should be raised.
+ """
+ pass
+
+ def _take_action(self):
+ """Determines whether to perform the action or not.
+
+ Checks whether or not an action should be taken. This is determined by
+ the truthy value for the unless parameter. If unless is a callback
+ method, it will be invoked with no parameters in order to determine
+ whether or not the action should be taken. Otherwise, the truthy value
+ of the unless attribute will determine if the action should be
+ performed.
+ """
+ # Do the action if there isn't an unless override.
+ if self.unless is None:
+ return True
+
+ # Invoke the callback if there is one.
+ if hasattr(self.unless, '__call__'):
+ results = self.unless()
+ if results:
+ return False
+ else:
+ return True
+
+ if self.unless:
+ return False
+ else:
+ return True
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apache.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apache.py
new file mode 100644
index 0000000..cf3c987
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apache.py
@@ -0,0 +1,100 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import re
+import subprocess
+
+from six import string_types
+
+from charmhelpers.core.hookenv import (
+ log,
+ INFO,
+ ERROR,
+)
+
+from charmhelpers.contrib.hardening.audits import BaseAudit
+
+
+class DisabledModuleAudit(BaseAudit):
+ """Audits Apache2 modules.
+
+ Determines if the apache2 modules are enabled. If the modules are enabled
+ then they are removed in the ensure_compliance.
+ """
+ def __init__(self, modules):
+ if modules is None:
+ self.modules = []
+ elif isinstance(modules, string_types):
+ self.modules = [modules]
+ else:
+ self.modules = modules
+
+ def ensure_compliance(self):
+ """Ensures that the modules are not loaded."""
+ if not self.modules:
+ return
+
+ try:
+ loaded_modules = self._get_loaded_modules()
+ non_compliant_modules = []
+ for module in self.modules:
+ if module in loaded_modules:
+ log("Module '%s' is enabled but should not be." %
+ (module), level=INFO)
+ non_compliant_modules.append(module)
+
+ if len(non_compliant_modules) == 0:
+ return
+
+ for module in non_compliant_modules:
+ self._disable_module(module)
+ self._restart_apache()
+ except subprocess.CalledProcessError as e:
+ log('Error occurred auditing apache module compliance. '
+ 'This may have been already reported. '
+ 'Output is: %s' % e.output, level=ERROR)
+
+ @staticmethod
+ def _get_loaded_modules():
+ """Returns the modules which are enabled in Apache."""
+ output = subprocess.check_output(['apache2ctl', '-M'])
+ modules = []
+ for line in output.strip().split():
+ # Each line of the enabled module output looks like:
+ # module_name (static|shared)
+ # Plus a header line at the top of the output which is stripped
+ # out by the regex.
+ matcher = re.search(r'^ (\S*)', line)
+ if matcher:
+ modules.append(matcher.group(1))
+ return modules
+
+ @staticmethod
+ def _disable_module(module):
+ """Disables the specified module in Apache."""
+ try:
+ subprocess.check_call(['a2dismod', module])
+ except subprocess.CalledProcessError as e:
+ # Note: catch error here to allow the attempt of disabling
+ # multiple modules in one go rather than failing after the
+ # first module fails.
+ log('Error occurred disabling module %s. '
+ 'Output is: %s' % (module, e.output), level=ERROR)
+
+ @staticmethod
+ def _restart_apache():
+ """Restarts the apache process"""
+ subprocess.check_output(['service', 'apache2', 'restart'])
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apt.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apt.py
new file mode 100644
index 0000000..e94af03
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apt.py
@@ -0,0 +1,105 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import # required for external apt import
+from apt import apt_pkg
+from six import string_types
+
+from charmhelpers.fetch import (
+ apt_cache,
+ apt_purge
+)
+from charmhelpers.core.hookenv import (
+ log,
+ DEBUG,
+ WARNING,
+)
+from charmhelpers.contrib.hardening.audits import BaseAudit
+
+
+class AptConfig(BaseAudit):
+
+ def __init__(self, config, **kwargs):
+ self.config = config
+
+ def verify_config(self):
+ apt_pkg.init()
+ for cfg in self.config:
+ value = apt_pkg.config.get(cfg['key'], cfg.get('default', ''))
+ if value and value != cfg['expected']:
+ log("APT config '%s' has unexpected value '%s' "
+ "(expected='%s')" %
+ (cfg['key'], value, cfg['expected']), level=WARNING)
+
+ def ensure_compliance(self):
+ self.verify_config()
+
+
+class RestrictedPackages(BaseAudit):
+ """Class used to audit restricted packages on the system."""
+
+ def __init__(self, pkgs, **kwargs):
+ super(RestrictedPackages, self).__init__(**kwargs)
+ if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'):
+ self.pkgs = [pkgs]
+ else:
+ self.pkgs = pkgs
+
+ def ensure_compliance(self):
+ cache = apt_cache()
+
+ for p in self.pkgs:
+ if p not in cache:
+ continue
+
+ pkg = cache[p]
+ if not self.is_virtual_package(pkg):
+ if not pkg.current_ver:
+ log("Package '%s' is not installed." % pkg.name,
+ level=DEBUG)
+ continue
+ else:
+ log("Restricted package '%s' is installed" % pkg.name,
+ level=WARNING)
+ self.delete_package(cache, pkg)
+ else:
+ log("Checking restricted virtual package '%s' provides" %
+ pkg.name, level=DEBUG)
+ self.delete_package(cache, pkg)
+
+ def delete_package(self, cache, pkg):
+ """Deletes the package from the system.
+
+ Deletes the package form the system, properly handling virtual
+ packages.
+
+ :param cache: the apt cache
+ :param pkg: the package to remove
+ """
+ if self.is_virtual_package(pkg):
+ log("Package '%s' appears to be virtual - purging provides" %
+ pkg.name, level=DEBUG)
+ for _p in pkg.provides_list:
+ self.delete_package(cache, _p[2].parent_pkg)
+ elif not pkg.current_ver:
+ log("Package '%s' not installed" % pkg.name, level=DEBUG)
+ return
+ else:
+ log("Purging package '%s'" % pkg.name, level=DEBUG)
+ apt_purge(pkg.name)
+
+ def is_virtual_package(self, pkg):
+ return pkg.has_provides and not pkg.has_versions
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/file.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/file.py
new file mode 100644
index 0000000..0fb545a
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/file.py
@@ -0,0 +1,552 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import grp
+import os
+import pwd
+import re
+
+from subprocess import (
+ CalledProcessError,
+ check_output,
+ check_call,
+)
+from traceback import format_exc
+from six import string_types
+from stat import (
+ S_ISGID,
+ S_ISUID
+)
+
+from charmhelpers.core.hookenv import (
+ log,
+ DEBUG,
+ INFO,
+ WARNING,
+ ERROR,
+)
+from charmhelpers.core import unitdata
+from charmhelpers.core.host import file_hash
+from charmhelpers.contrib.hardening.audits import BaseAudit
+from charmhelpers.contrib.hardening.templating import (
+ get_template_path,
+ render_and_write,
+)
+from charmhelpers.contrib.hardening import utils
+
+
+class BaseFileAudit(BaseAudit):
+ """Base class for file audits.
+
+ Provides api stubs for compliance check flow that must be used by any class
+ that implemented this one.
+ """
+
+ def __init__(self, paths, always_comply=False, *args, **kwargs):
+ """
+ :param paths: string path of list of paths of files we want to apply
+ compliance checks are criteria to.
+ :param always_comply: if true compliance criteria is always applied
+ else compliance is skipped for non-existent
+ paths.
+ """
+ super(BaseFileAudit, self).__init__(*args, **kwargs)
+ self.always_comply = always_comply
+ if isinstance(paths, string_types) or not hasattr(paths, '__iter__'):
+ self.paths = [paths]
+ else:
+ self.paths = paths
+
+ def ensure_compliance(self):
+ """Ensure that the all registered files comply to registered criteria.
+ """
+ for p in self.paths:
+ if os.path.exists(p):
+ if self.is_compliant(p):
+ continue
+
+ log('File %s is not in compliance.' % p, level=INFO)
+ else:
+ if not self.always_comply:
+ log("Non-existent path '%s' - skipping compliance check"
+ % (p), level=INFO)
+ continue
+
+ if self._take_action():
+ log("Applying compliance criteria to '%s'" % (p), level=INFO)
+ self.comply(p)
+
+ def is_compliant(self, path):
+ """Audits the path to see if it is compliance.
+
+ :param path: the path to the file that should be checked.
+ """
+ raise NotImplementedError
+
+ def comply(self, path):
+ """Enforces the compliance of a path.
+
+ :param path: the path to the file that should be enforced.
+ """
+ raise NotImplementedError
+
+ @classmethod
+ def _get_stat(cls, path):
+ """Returns the Posix st_stat information for the specified file path.
+
+ :param path: the path to get the st_stat information for.
+ :returns: an st_stat object for the path or None if the path doesn't
+ exist.
+ """
+ return os.stat(path)
+
+
+class FilePermissionAudit(BaseFileAudit):
+ """Implements an audit for file permissions and ownership for a user.
+
+ This class implements functionality that ensures that a specific user/group
+ will own the file(s) specified and that the permissions specified are
+ applied properly to the file.
+ """
+ def __init__(self, paths, user, group=None, mode=0o600, **kwargs):
+ self.user = user
+ self.group = group
+ self.mode = mode
+ super(FilePermissionAudit, self).__init__(paths, user, group, mode,
+ **kwargs)
+
+ @property
+ def user(self):
+ return self._user
+
+ @user.setter
+ def user(self, name):
+ try:
+ user = pwd.getpwnam(name)
+ except KeyError:
+ log('Unknown user %s' % name, level=ERROR)
+ user = None
+ self._user = user
+
+ @property
+ def group(self):
+ return self._group
+
+ @group.setter
+ def group(self, name):
+ try:
+ group = None
+ if name:
+ group = grp.getgrnam(name)
+ else:
+ group = grp.getgrgid(self.user.pw_gid)
+ except KeyError:
+ log('Unknown group %s' % name, level=ERROR)
+ self._group = group
+
+ def is_compliant(self, path):
+ """Checks if the path is in compliance.
+
+ Used to determine if the path specified meets the necessary
+ requirements to be in compliance with the check itself.
+
+ :param path: the file path to check
+ :returns: True if the path is compliant, False otherwise.
+ """
+ stat = self._get_stat(path)
+ user = self.user
+ group = self.group
+
+ compliant = True
+ if stat.st_uid != user.pw_uid or stat.st_gid != group.gr_gid:
+ log('File %s is not owned by %s:%s.' % (path, user.pw_name,
+ group.gr_name),
+ level=INFO)
+ compliant = False
+
+ # POSIX refers to the st_mode bits as corresponding to both the
+ # file type and file permission bits, where the least significant 12
+ # bits (o7777) are the suid (11), sgid (10), sticky bits (9), and the
+ # file permission bits (8-0)
+ perms = stat.st_mode & 0o7777
+ if perms != self.mode:
+ log('File %s has incorrect permissions, currently set to %s' %
+ (path, oct(stat.st_mode & 0o7777)), level=INFO)
+ compliant = False
+
+ return compliant
+
+ def comply(self, path):
+ """Issues a chown and chmod to the file paths specified."""
+ utils.ensure_permissions(path, self.user.pw_name, self.group.gr_name,
+ self.mode)
+
+
+class DirectoryPermissionAudit(FilePermissionAudit):
+ """Performs a permission check for the specified directory path."""
+
+ def __init__(self, paths, user, group=None, mode=0o600,
+ recursive=True, **kwargs):
+ super(DirectoryPermissionAudit, self).__init__(paths, user, group,
+ mode, **kwargs)
+ self.recursive = recursive
+
+ def is_compliant(self, path):
+ """Checks if the directory is compliant.
+
+ Used to determine if the path specified and all of its children
+ directories are in compliance with the check itself.
+
+ :param path: the directory path to check
+ :returns: True if the directory tree is compliant, otherwise False.
+ """
+ if not os.path.isdir(path):
+ log('Path specified %s is not a directory.' % path, level=ERROR)
+ raise ValueError("%s is not a directory." % path)
+
+ if not self.recursive:
+ return super(DirectoryPermissionAudit, self).is_compliant(path)
+
+ compliant = True
+ for root, dirs, _ in os.walk(path):
+ if len(dirs) > 0:
+ continue
+
+ if not super(DirectoryPermissionAudit, self).is_compliant(root):
+ compliant = False
+ continue
+
+ return compliant
+
+ def comply(self, path):
+ for root, dirs, _ in os.walk(path):
+ if len(dirs) > 0:
+ super(DirectoryPermissionAudit, self).comply(root)
+
+
+class ReadOnly(BaseFileAudit):
+ """Audits that files and folders are read only."""
+ def __init__(self, paths, *args, **kwargs):
+ super(ReadOnly, self).__init__(paths=paths, *args, **kwargs)
+
+ def is_compliant(self, path):
+ try:
+ output = check_output(['find', path, '-perm', '-go+w',
+ '-type', 'f']).strip()
+
+ # The find above will find any files which have permission sets
+ # which allow too broad of write access. As such, the path is
+ # compliant if there is no output.
+ if output:
+ return False
+
+ return True
+ except CalledProcessError as e:
+ log('Error occurred checking finding writable files for %s. '
+ 'Error information is: command %s failed with returncode '
+ '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
+ format_exc(e)), level=ERROR)
+ return False
+
+ def comply(self, path):
+ try:
+ check_output(['chmod', 'go-w', '-R', path])
+ except CalledProcessError as e:
+ log('Error occurred removing writeable permissions for %s. '
+ 'Error information is: command %s failed with returncode '
+ '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
+ format_exc(e)), level=ERROR)
+
+
+class NoReadWriteForOther(BaseFileAudit):
+ """Ensures that the files found under the base path are readable or
+ writable by anyone other than the owner or the group.
+ """
+ def __init__(self, paths):
+ super(NoReadWriteForOther, self).__init__(paths)
+
+ def is_compliant(self, path):
+ try:
+ cmd = ['find', path, '-perm', '-o+r', '-type', 'f', '-o',
+ '-perm', '-o+w', '-type', 'f']
+ output = check_output(cmd).strip()
+
+ # The find above here will find any files which have read or
+ # write permissions for other, meaning there is too broad of access
+ # to read/write the file. As such, the path is compliant if there's
+ # no output.
+ if output:
+ return False
+
+ return True
+ except CalledProcessError as e:
+ log('Error occurred while finding files which are readable or '
+ 'writable to the world in %s. '
+ 'Command output is: %s.' % (path, e.output), level=ERROR)
+
+ def comply(self, path):
+ try:
+ check_output(['chmod', '-R', 'o-rw', path])
+ except CalledProcessError as e:
+ log('Error occurred attempting to change modes of files under '
+ 'path %s. Output of command is: %s' % (path, e.output))
+
+
+class NoSUIDSGIDAudit(BaseFileAudit):
+ """Audits that specified files do not have SUID/SGID bits set."""
+ def __init__(self, paths, *args, **kwargs):
+ super(NoSUIDSGIDAudit, self).__init__(paths=paths, *args, **kwargs)
+
+ def is_compliant(self, path):
+ stat = self._get_stat(path)
+ if (stat.st_mode & (S_ISGID | S_ISUID)) != 0:
+ return False
+
+ return True
+
+ def comply(self, path):
+ try:
+ log('Removing suid/sgid from %s.' % path, level=DEBUG)
+ check_output(['chmod', '-s', path])
+ except CalledProcessError as e:
+ log('Error occurred removing suid/sgid from %s.'
+ 'Error information is: command %s failed with returncode '
+ '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
+ format_exc(e)), level=ERROR)
+
+
+class TemplatedFile(BaseFileAudit):
+ """The TemplatedFileAudit audits the contents of a templated file.
+
+ This audit renders a file from a template, sets the appropriate file
+ permissions, then generates a hashsum with which to check the content
+ changed.
+ """
+ def __init__(self, path, context, template_dir, mode, user='root',
+ group='root', service_actions=None, **kwargs):
+ self.context = context
+ self.user = user
+ self.group = group
+ self.mode = mode
+ self.template_dir = template_dir
+ self.service_actions = service_actions
+ super(TemplatedFile, self).__init__(paths=path, always_comply=True,
+ **kwargs)
+
+ def is_compliant(self, path):
+ """Determines if the templated file is compliant.
+
+ A templated file is only compliant if it has not changed (as
+ determined by its sha256 hashsum) AND its file permissions are set
+ appropriately.
+
+ :param path: the path to check compliance.
+ """
+ same_templates = self.templates_match(path)
+ same_content = self.contents_match(path)
+ same_permissions = self.permissions_match(path)
+
+ if same_content and same_permissions and same_templates:
+ return True
+
+ return False
+
+ def run_service_actions(self):
+ """Run any actions on services requested."""
+ if not self.service_actions:
+ return
+
+ for svc_action in self.service_actions:
+ name = svc_action['service']
+ actions = svc_action['actions']
+ log("Running service '%s' actions '%s'" % (name, actions),
+ level=DEBUG)
+ for action in actions:
+ cmd = ['service', name, action]
+ try:
+ check_call(cmd)
+ except CalledProcessError as exc:
+ log("Service name='%s' action='%s' failed - %s" %
+ (name, action, exc), level=WARNING)
+
+ def comply(self, path):
+ """Ensures the contents and the permissions of the file.
+
+ :param path: the path to correct
+ """
+ dirname = os.path.dirname(path)
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+
+ self.pre_write()
+ render_and_write(self.template_dir, path, self.context())
+ utils.ensure_permissions(path, self.user, self.group, self.mode)
+ self.run_service_actions()
+ self.save_checksum(path)
+ self.post_write()
+
+ def pre_write(self):
+ """Invoked prior to writing the template."""
+ pass
+
+ def post_write(self):
+ """Invoked after writing the template."""
+ pass
+
+ def templates_match(self, path):
+ """Determines if the template files are the same.
+
+ The template file equality is determined by the hashsum of the
+ template files themselves. If there is no hashsum, then the content
+ cannot be sure to be the same so treat it as if they changed.
+ Otherwise, return whether or not the hashsums are the same.
+
+ :param path: the path to check
+ :returns: boolean
+ """
+ template_path = get_template_path(self.template_dir, path)
+ key = 'hardening:template:%s' % template_path
+ template_checksum = file_hash(template_path)
+ kv = unitdata.kv()
+ stored_tmplt_checksum = kv.get(key)
+ if not stored_tmplt_checksum:
+ kv.set(key, template_checksum)
+ kv.flush()
+ log('Saved template checksum for %s.' % template_path,
+ level=DEBUG)
+ # Since we don't have a template checksum, then assume it doesn't
+ # match and return that the template is different.
+ return False
+ elif stored_tmplt_checksum != template_checksum:
+ kv.set(key, template_checksum)
+ kv.flush()
+ log('Updated template checksum for %s.' % template_path,
+ level=DEBUG)
+ return False
+
+ # Here the template hasn't changed based upon the calculated
+ # checksum of the template and what was previously stored.
+ return True
+
+ def contents_match(self, path):
+ """Determines if the file content is the same.
+
+ This is determined by comparing hashsum of the file contents and
+ the saved hashsum. If there is no hashsum, then the content cannot
+ be sure to be the same so treat them as if they are not the same.
+ Otherwise, return True if the hashsums are the same, False if they
+ are not the same.
+
+ :param path: the file to check.
+ """
+ checksum = file_hash(path)
+
+ kv = unitdata.kv()
+ stored_checksum = kv.get('hardening:%s' % path)
+ if not stored_checksum:
+ # If the checksum hasn't been generated, return False to ensure
+ # the file is written and the checksum stored.
+ log('Checksum for %s has not been calculated.' % path, level=DEBUG)
+ return False
+ elif stored_checksum != checksum:
+ log('Checksum mismatch for %s.' % path, level=DEBUG)
+ return False
+
+ return True
+
+ def permissions_match(self, path):
+ """Determines if the file owner and permissions match.
+
+ :param path: the path to check.
+ """
+ audit = FilePermissionAudit(path, self.user, self.group, self.mode)
+ return audit.is_compliant(path)
+
+ def save_checksum(self, path):
+ """Calculates and saves the checksum for the path specified.
+
+ :param path: the path of the file to save the checksum.
+ """
+ checksum = file_hash(path)
+ kv = unitdata.kv()
+ kv.set('hardening:%s' % path, checksum)
+ kv.flush()
+
+
+class DeletedFile(BaseFileAudit):
+ """Audit to ensure that a file is deleted."""
+ def __init__(self, paths):
+ super(DeletedFile, self).__init__(paths)
+
+ def is_compliant(self, path):
+ return not os.path.exists(path)
+
+ def comply(self, path):
+ os.remove(path)
+
+
+class FileContentAudit(BaseFileAudit):
+ """Audit the contents of a file."""
+ def __init__(self, paths, cases, **kwargs):
+ # Cases we expect to pass
+ self.pass_cases = cases.get('pass', [])
+ # Cases we expect to fail
+ self.fail_cases = cases.get('fail', [])
+ super(FileContentAudit, self).__init__(paths, **kwargs)
+
+ def is_compliant(self, path):
+ """
+ Given a set of content matching cases i.e. tuple(regex, bool) where
+ bool value denotes whether or not regex is expected to match, check that
+ all cases match as expected with the contents of the file. Cases can be
+ expected to pass of fail.
+
+ :param path: Path of file to check.
+ :returns: Boolean value representing whether or not all cases are
+ found to be compliant.
+ """
+ log("Auditing contents of file '%s'" % (path), level=DEBUG)
+ with open(path, 'r') as fd:
+ contents = fd.read()
+
+ matches = 0
+ for pattern in self.pass_cases:
+ key = re.compile(pattern, flags=re.MULTILINE)
+ results = re.search(key, contents)
+ if results:
+ matches += 1
+ else:
+ log("Pattern '%s' was expected to pass but instead it failed"
+ % (pattern), level=WARNING)
+
+ for pattern in self.fail_cases:
+ key = re.compile(pattern, flags=re.MULTILINE)
+ results = re.search(key, contents)
+ if not results:
+ matches += 1
+ else:
+ log("Pattern '%s' was expected to fail but instead it passed"
+ % (pattern), level=WARNING)
+
+ total = len(self.pass_cases) + len(self.fail_cases)
+ log("Checked %s cases and %s passed" % (total, matches), level=DEBUG)
+ return matches == total
+
+ def comply(self, *args, **kwargs):
+ """NOOP since we just issue warnings. This is to avoid the
+ NotImplememtedError.
+ """
+ log("Not applying any compliance criteria, only checks.", level=INFO)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/__init__.py
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml
new file mode 100644
index 0000000..e5ada29
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml
@@ -0,0 +1,13 @@
+# NOTE: this file contains the default configuration for the 'apache' hardening
+# code. If you want to override any settings you must add them to a file
+# called hardening.yaml in the root directory of your charm using the
+# name 'apache' as the root key followed by any of the following with new
+# values.
+
+common:
+ apache_dir: '/etc/apache2'
+
+hardening:
+ traceenable: 'off'
+ allowed_http_methods: "GET POST"
+ modules_to_disable: [ cgi, cgid ] \ No newline at end of file
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml.schema b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml.schema
new file mode 100644
index 0000000..227589b
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml.schema
@@ -0,0 +1,9 @@
+# NOTE: this schema must contain all valid keys from it's associated defaults
+# file. It is used to validate user-provided overrides.
+common:
+ apache_dir:
+ traceenable:
+
+hardening:
+ allowed_http_methods:
+ modules_to_disable:
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml
new file mode 100644
index 0000000..682d22b
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml
@@ -0,0 +1,38 @@
+# NOTE: this file contains the default configuration for the 'mysql' hardening
+# code. If you want to override any settings you must add them to a file
+# called hardening.yaml in the root directory of your charm using the
+# name 'mysql' as the root key followed by any of the following with new
+# values.
+
+hardening:
+ mysql-conf: /etc/mysql/my.cnf
+ hardening-conf: /etc/mysql/conf.d/hardening.cnf
+
+security:
+ # @see http://www.symantec.com/connect/articles/securing-mysql-step-step
+ # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_chroot
+ chroot: None
+
+ # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_safe-user-create
+ safe-user-create: 1
+
+ # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-auth
+ secure-auth: 1
+
+ # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_symbolic-links
+ skip-symbolic-links: 1
+
+ # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_skip-show-database
+ skip-show-database: True
+
+ # @see http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_local_infile
+ local-infile: 0
+
+ # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_allow-suspicious-udfs
+ allow-suspicious-udfs: 0
+
+ # @see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_automatic_sp_privileges
+ automatic-sp-privileges: 0
+
+ # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-file-priv
+ secure-file-priv: /tmp
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema
new file mode 100644
index 0000000..2edf325
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema
@@ -0,0 +1,15 @@
+# NOTE: this schema must contain all valid keys from it's associated defaults
+# file. It is used to validate user-provided overrides.
+hardening:
+ mysql-conf:
+ hardening-conf:
+security:
+ chroot:
+ safe-user-create:
+ secure-auth:
+ skip-symbolic-links:
+ skip-show-database:
+ local-infile:
+ allow-suspicious-udfs:
+ automatic-sp-privileges:
+ secure-file-priv:
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml
new file mode 100644
index 0000000..ddd4286
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml
@@ -0,0 +1,67 @@
+# NOTE: this file contains the default configuration for the 'os' hardening
+# code. If you want to override any settings you must add them to a file
+# called hardening.yaml in the root directory of your charm using the
+# name 'os' as the root key followed by any of the following with new
+# values.
+
+general:
+ desktop_enable: False # (type:boolean)
+
+environment:
+ extra_user_paths: []
+ umask: 027
+ root_path: /
+
+auth:
+ pw_max_age: 60
+ # discourage password cycling
+ pw_min_age: 7
+ retries: 5
+ lockout_time: 600
+ timeout: 60
+ allow_homeless: False # (type:boolean)
+ pam_passwdqc_enable: True # (type:boolean)
+ pam_passwdqc_options: 'min=disabled,disabled,16,12,8'
+ root_ttys:
+ console
+ tty1
+ tty2
+ tty3
+ tty4
+ tty5
+ tty6
+ uid_min: 1000
+ gid_min: 1000
+ sys_uid_min: 100
+ sys_uid_max: 999
+ sys_gid_min: 100
+ sys_gid_max: 999
+ chfn_restrict:
+
+security:
+ users_allow: []
+ suid_sgid_enforce: True # (type:boolean)
+ # user-defined blacklist and whitelist
+ suid_sgid_blacklist: []
+ suid_sgid_whitelist: []
+ # if this is True, remove any suid/sgid bits from files that were not in the whitelist
+ suid_sgid_dry_run_on_unknown: False # (type:boolean)
+ suid_sgid_remove_from_unknown: False # (type:boolean)
+ # remove packages with known issues
+ packages_clean: True # (type:boolean)
+ packages_list:
+ xinetd
+ inetd
+ ypserv
+ telnet-server
+ rsh-server
+ rsync
+ kernel_enable_module_loading: True # (type:boolean)
+ kernel_enable_core_dump: False # (type:boolean)
+
+sysctl:
+ kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128
+ kernel_enable_sysrq: False # (type:boolean)
+ forwarding: False # (type:boolean)
+ ipv6_enable: False # (type:boolean)
+ arp_restricted: True # (type:boolean)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml.schema b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml.schema
new file mode 100644
index 0000000..88b3966
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml.schema
@@ -0,0 +1,42 @@
+# NOTE: this schema must contain all valid keys from it's associated defaults
+# file. It is used to validate user-provided overrides.
+general:
+ desktop_enable:
+environment:
+ extra_user_paths:
+ umask:
+ root_path:
+auth:
+ pw_max_age:
+ pw_min_age:
+ retries:
+ lockout_time:
+ timeout:
+ allow_homeless:
+ pam_passwdqc_enable:
+ pam_passwdqc_options:
+ root_ttys:
+ uid_min:
+ gid_min:
+ sys_uid_min:
+ sys_uid_max:
+ sys_gid_min:
+ sys_gid_max:
+ chfn_restrict:
+security:
+ users_allow:
+ suid_sgid_enforce:
+ suid_sgid_blacklist:
+ suid_sgid_whitelist:
+ suid_sgid_dry_run_on_unknown:
+ suid_sgid_remove_from_unknown:
+ packages_clean:
+ packages_list:
+ kernel_enable_module_loading:
+ kernel_enable_core_dump:
+sysctl:
+ kernel_secure_sysrq:
+ kernel_enable_sysrq:
+ forwarding:
+ ipv6_enable:
+ arp_restricted:
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml
new file mode 100644
index 0000000..cd529bc
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml
@@ -0,0 +1,49 @@
+# NOTE: this file contains the default configuration for the 'ssh' hardening
+# code. If you want to override any settings you must add them to a file
+# called hardening.yaml in the root directory of your charm using the
+# name 'ssh' as the root key followed by any of the following with new
+# values.
+
+common:
+ service_name: 'ssh'
+ network_ipv6_enable: False # (type:boolean)
+ ports: [22]
+ remote_hosts: []
+
+client:
+ package: 'openssh-client'
+ cbc_required: False # (type:boolean)
+ weak_hmac: False # (type:boolean)
+ weak_kex: False # (type:boolean)
+ roaming: False
+ password_authentication: 'no'
+
+server:
+ host_key_files: ['/etc/ssh/ssh_host_rsa_key', '/etc/ssh/ssh_host_dsa_key',
+ '/etc/ssh/ssh_host_ecdsa_key']
+ cbc_required: False # (type:boolean)
+ weak_hmac: False # (type:boolean)
+ weak_kex: False # (type:boolean)
+ allow_root_with_key: False # (type:boolean)
+ allow_tcp_forwarding: 'no'
+ allow_agent_forwarding: 'no'
+ allow_x11_forwarding: 'no'
+ use_privilege_separation: 'sandbox'
+ listen_to: ['0.0.0.0']
+ use_pam: 'no'
+ package: 'openssh-server'
+ password_authentication: 'no'
+ alive_interval: '600'
+ alive_count: '3'
+ sftp_enable: False # (type:boolean)
+ sftp_group: 'sftponly'
+ sftp_chroot: '/home/%u'
+ deny_users: []
+ allow_users: []
+ deny_groups: []
+ allow_groups: []
+ print_motd: 'no'
+ print_last_log: 'no'
+ use_dns: 'no'
+ max_auth_tries: 2
+ max_sessions: 10
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema
new file mode 100644
index 0000000..d05e054
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema
@@ -0,0 +1,42 @@
+# NOTE: this schema must contain all valid keys from it's associated defaults
+# file. It is used to validate user-provided overrides.
+common:
+ service_name:
+ network_ipv6_enable:
+ ports:
+ remote_hosts:
+client:
+ package:
+ cbc_required:
+ weak_hmac:
+ weak_kex:
+ roaming:
+ password_authentication:
+server:
+ host_key_files:
+ cbc_required:
+ weak_hmac:
+ weak_kex:
+ allow_root_with_key:
+ allow_tcp_forwarding:
+ allow_agent_forwarding:
+ allow_x11_forwarding:
+ use_privilege_separation:
+ listen_to:
+ use_pam:
+ package:
+ password_authentication:
+ alive_interval:
+ alive_count:
+ sftp_enable:
+ sftp_group:
+ sftp_chroot:
+ deny_users:
+ allow_users:
+ deny_groups:
+ allow_groups:
+ print_motd:
+ print_last_log:
+ use_dns:
+ max_auth_tries:
+ max_sessions:
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/harden.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/harden.py
new file mode 100644
index 0000000..ac7568d
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/harden.py
@@ -0,0 +1,84 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import six
+
+from collections import OrderedDict
+
+from charmhelpers.core.hookenv import (
+ config,
+ log,
+ DEBUG,
+ WARNING,
+)
+from charmhelpers.contrib.hardening.host.checks import run_os_checks
+from charmhelpers.contrib.hardening.ssh.checks import run_ssh_checks
+from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks
+from charmhelpers.contrib.hardening.apache.checks import run_apache_checks
+
+
+def harden(overrides=None):
+ """Hardening decorator.
+
+ This is the main entry point for running the hardening stack. In order to
+ run modules of the stack you must add this decorator to charm hook(s) and
+ ensure that your charm config.yaml contains the 'harden' option set to
+ one or more of the supported modules. Setting these will cause the
+ corresponding hardening code to be run when the hook fires.
+
+ This decorator can and should be applied to more than one hook or function
+ such that hardening modules are called multiple times. This is because
+ subsequent calls will perform auditing checks that will report any changes
+ to resources hardened by the first run (and possibly perform compliance
+ actions as a result of any detected infractions).
+
+ :param overrides: Optional list of stack modules used to override those
+ provided with 'harden' config.
+ :returns: Returns value returned by decorated function once executed.
+ """
+ def _harden_inner1(f):
+ log("Hardening function '%s'" % (f.__name__), level=DEBUG)
+
+ def _harden_inner2(*args, **kwargs):
+ RUN_CATALOG = OrderedDict([('os', run_os_checks),
+ ('ssh', run_ssh_checks),
+ ('mysql', run_mysql_checks),
+ ('apache', run_apache_checks)])
+
+ enabled = overrides or (config("harden") or "").split()
+ if enabled:
+ modules_to_run = []
+ # modules will always be performed in the following order
+ for module, func in six.iteritems(RUN_CATALOG):
+ if module in enabled:
+ enabled.remove(module)
+ modules_to_run.append(func)
+
+ if enabled:
+ log("Unknown hardening modules '%s' - ignoring" %
+ (', '.join(enabled)), level=WARNING)
+
+ for hardener in modules_to_run:
+ log("Executing hardening module '%s'" %
+ (hardener.__name__), level=DEBUG)
+ hardener()
+ else:
+ log("No hardening applied to '%s'" % (f.__name__), level=DEBUG)
+
+ return f(*args, **kwargs)
+ return _harden_inner2
+
+ return _harden_inner1
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/__init__.py
new file mode 100644
index 0000000..277b8c7
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/__init__.py
@@ -0,0 +1,19 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from os import path
+
+TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/__init__.py
new file mode 100644
index 0000000..c3bd598
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/__init__.py
@@ -0,0 +1,50 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from charmhelpers.core.hookenv import (
+ log,
+ DEBUG,
+)
+from charmhelpers.contrib.hardening.host.checks import (
+ apt,
+ limits,
+ login,
+ minimize_access,
+ pam,
+ profile,
+ securetty,
+ suid_sgid,
+ sysctl
+)
+
+
+def run_os_checks():
+ log("Starting OS hardening checks.", level=DEBUG)
+ checks = apt.get_audits()
+ checks.extend(limits.get_audits())
+ checks.extend(login.get_audits())
+ checks.extend(minimize_access.get_audits())
+ checks.extend(pam.get_audits())
+ checks.extend(profile.get_audits())
+ checks.extend(securetty.get_audits())
+ checks.extend(suid_sgid.get_audits())
+ checks.extend(sysctl.get_audits())
+
+ for check in checks:
+ log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
+ check.ensure_compliance()
+
+ log("OS hardening checks complete.", level=DEBUG)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/apt.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/apt.py
new file mode 100644
index 0000000..2c221cd
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/apt.py
@@ -0,0 +1,39 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from charmhelpers.contrib.hardening.utils import get_settings
+from charmhelpers.contrib.hardening.audits.apt import (
+ AptConfig,
+ RestrictedPackages,
+)
+
+
+def get_audits():
+ """Get OS hardening apt audits.
+
+ :returns: dictionary of audits
+ """
+ audits = [AptConfig([{'key': 'APT::Get::AllowUnauthenticated',
+ 'expected': 'false'}])]
+
+ settings = get_settings('os')
+ clean_packages = settings['security']['packages_clean']
+ if clean_packages:
+ security_packages = settings['security']['packages_list']
+ if security_packages:
+ audits.append(RestrictedPackages(security_packages))
+
+ return audits
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/limits.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/limits.py
new file mode 100644
index 0000000..8ce9dc2
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/limits.py
@@ -0,0 +1,55 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from charmhelpers.contrib.hardening.audits.file import (
+ DirectoryPermissionAudit,
+ TemplatedFile,
+)
+from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
+from charmhelpers.contrib.hardening import utils
+
+
+def get_audits():
+ """Get OS hardening security limits audits.
+
+ :returns: dictionary of audits
+ """
+ audits = []
+ settings = utils.get_settings('os')
+
+ # Ensure that the /etc/security/limits.d directory is only writable
+ # by the root user, but others can execute and read.
+ audits.append(DirectoryPermissionAudit('/etc/security/limits.d',
+ user='root', group='root',
+ mode=0o755))
+
+ # If core dumps are not enabled, then don't allow core dumps to be
+ # created as they may contain sensitive information.
+ if not settings['security']['kernel_enable_core_dump']:
+ audits.append(TemplatedFile('/etc/security/limits.d/10.hardcore.conf',
+ SecurityLimitsContext(),
+ template_dir=TEMPLATES_DIR,
+ user='root', group='root', mode=0o0440))
+ return audits
+
+
+class SecurityLimitsContext(object):
+
+ def __call__(self):
+ settings = utils.get_settings('os')
+ ctxt = {'disable_core_dump':
+ not settings['security']['kernel_enable_core_dump']}
+ return ctxt
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/login.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/login.py
new file mode 100644
index 0000000..d32c4f6
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/login.py
@@ -0,0 +1,67 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from six import string_types
+
+from charmhelpers.contrib.hardening.audits.file import TemplatedFile
+from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
+from charmhelpers.contrib.hardening import utils
+
+
+def get_audits():
+ """Get OS hardening login.defs audits.
+
+ :returns: dictionary of audits
+ """
+ audits = [TemplatedFile('/etc/login.defs', LoginContext(),
+ template_dir=TEMPLATES_DIR,
+ user='root', group='root', mode=0o0444)]
+ return audits
+
+
+class LoginContext(object):
+
+ def __call__(self):
+ settings = utils.get_settings('os')
+
+ # Octal numbers in yaml end up being turned into decimal,
+ # so check if the umask is entered as a string (e.g. '027')
+ # or as an octal umask as we know it (e.g. 002). If its not
+ # a string assume it to be octal and turn it into an octal
+ # string.
+ umask = settings['environment']['umask']
+ if not isinstance(umask, string_types):
+ umask = '%s' % oct(umask)
+
+ ctxt = {
+ 'additional_user_paths':
+ settings['environment']['extra_user_paths'],
+ 'umask': umask,
+ 'pwd_max_age': settings['auth']['pw_max_age'],
+ 'pwd_min_age': settings['auth']['pw_min_age'],
+ 'uid_min': settings['auth']['uid_min'],
+ 'sys_uid_min': settings['auth']['sys_uid_min'],
+ 'sys_uid_max': settings['auth']['sys_uid_max'],
+ 'gid_min': settings['auth']['gid_min'],
+ 'sys_gid_min': settings['auth']['sys_gid_min'],
+ 'sys_gid_max': settings['auth']['sys_gid_max'],
+ 'login_retries': settings['auth']['retries'],
+ 'login_timeout': settings['auth']['timeout'],
+ 'chfn_restrict': settings['auth']['chfn_restrict'],
+ 'allow_login_without_home': settings['auth']['allow_homeless']
+ }
+
+ return ctxt
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/minimize_access.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/minimize_access.py
new file mode 100644
index 0000000..c471064
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/minimize_access.py
@@ -0,0 +1,52 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from charmhelpers.contrib.hardening.audits.file import (
+ FilePermissionAudit,
+ ReadOnly,
+)
+from charmhelpers.contrib.hardening import utils
+
+
+def get_audits():
+ """Get OS hardening access audits.
+
+ :returns: dictionary of audits
+ """
+ audits = []
+ settings = utils.get_settings('os')
+
+ # Remove write permissions from $PATH folders for all regular users.
+ # This prevents changing system-wide commands from normal users.
+ path_folders = {'/usr/local/sbin',
+ '/usr/local/bin',
+ '/usr/sbin',
+ '/usr/bin',
+ '/bin'}
+ extra_user_paths = settings['environment']['extra_user_paths']
+ path_folders.update(extra_user_paths)
+ audits.append(ReadOnly(path_folders))
+
+ # Only allow the root user to have access to the shadow file.
+ audits.append(FilePermissionAudit('/etc/shadow', 'root', 'root', 0o0600))
+
+ if 'change_user' not in settings['security']['users_allow']:
+ # su should only be accessible to user and group root, unless it is
+ # expressly defined to allow users to change to root via the
+ # security_users_allow config option.
+ audits.append(FilePermissionAudit('/bin/su', 'root', 'root', 0o750))
+
+ return audits
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/pam.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/pam.py
new file mode 100644
index 0000000..383fe28
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/pam.py
@@ -0,0 +1,134 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from subprocess import (
+ check_output,
+ CalledProcessError,
+)
+
+from charmhelpers.core.hookenv import (
+ log,
+ DEBUG,
+ ERROR,
+)
+from charmhelpers.fetch import (
+ apt_install,
+ apt_purge,
+ apt_update,
+)
+from charmhelpers.contrib.hardening.audits.file import (
+ TemplatedFile,
+ DeletedFile,
+)
+from charmhelpers.contrib.hardening import utils
+from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
+
+
+def get_audits():
+ """Get OS hardening PAM authentication audits.
+
+ :returns: dictionary of audits
+ """
+ audits = []
+
+ settings = utils.get_settings('os')
+
+ if settings['auth']['pam_passwdqc_enable']:
+ audits.append(PasswdqcPAM('/etc/passwdqc.conf'))
+
+ if settings['auth']['retries']:
+ audits.append(Tally2PAM('/usr/share/pam-configs/tally2'))
+ else:
+ audits.append(DeletedFile('/usr/share/pam-configs/tally2'))
+
+ return audits
+
+
+class PasswdqcPAMContext(object):
+
+ def __call__(self):
+ ctxt = {}
+ settings = utils.get_settings('os')
+
+ ctxt['auth_pam_passwdqc_options'] = \
+ settings['auth']['pam_passwdqc_options']
+
+ return ctxt
+
+
+class PasswdqcPAM(TemplatedFile):
+ """The PAM Audit verifies the linux PAM settings."""
+ def __init__(self, path):
+ super(PasswdqcPAM, self).__init__(path=path,
+ template_dir=TEMPLATES_DIR,
+ context=PasswdqcPAMContext(),
+ user='root',
+ group='root',
+ mode=0o0640)
+
+ def pre_write(self):
+ # Always remove?
+ for pkg in ['libpam-ccreds', 'libpam-cracklib']:
+ log("Purging package '%s'" % pkg, level=DEBUG),
+ apt_purge(pkg)
+
+ apt_update(fatal=True)
+ for pkg in ['libpam-passwdqc']:
+ log("Installing package '%s'" % pkg, level=DEBUG),
+ apt_install(pkg)
+
+ def post_write(self):
+ """Updates the PAM configuration after the file has been written"""
+ try:
+ check_output(['pam-auth-update', '--package'])
+ except CalledProcessError as e:
+ log('Error calling pam-auth-update: %s' % e, level=ERROR)
+
+
+class Tally2PAMContext(object):
+
+ def __call__(self):
+ ctxt = {}
+ settings = utils.get_settings('os')
+
+ ctxt['auth_lockout_time'] = settings['auth']['lockout_time']
+ ctxt['auth_retries'] = settings['auth']['retries']
+
+ return ctxt
+
+
+class Tally2PAM(TemplatedFile):
+ """The PAM Audit verifies the linux PAM settings."""
+ def __init__(self, path):
+ super(Tally2PAM, self).__init__(path=path,
+ template_dir=TEMPLATES_DIR,
+ context=Tally2PAMContext(),
+ user='root',
+ group='root',
+ mode=0o0640)
+
+ def pre_write(self):
+ # Always remove?
+ apt_purge('libpam-ccreds')
+ apt_update(fatal=True)
+ apt_install('libpam-modules')
+
+ def post_write(self):
+ """Updates the PAM configuration after the file has been written"""
+ try:
+ check_output(['pam-auth-update', '--package'])
+ except CalledProcessError as e:
+ log('Error calling pam-auth-update: %s' % e, level=ERROR)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/profile.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/profile.py
new file mode 100644
index 0000000..f744335
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/profile.py
@@ -0,0 +1,45 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from charmhelpers.contrib.hardening.audits.file import TemplatedFile
+from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
+from charmhelpers.contrib.hardening import utils
+
+
+def get_audits():
+ """Get OS hardening profile audits.
+
+ :returns: dictionary of audits
+ """
+ audits = []
+
+ settings = utils.get_settings('os')
+
+ # If core dumps are not enabled, then don't allow core dumps to be
+ # created as they may contain sensitive information.
+ if not settings['security']['kernel_enable_core_dump']:
+ audits.append(TemplatedFile('/etc/profile.d/pinerolo_profile.sh',
+ ProfileContext(),
+ template_dir=TEMPLATES_DIR,
+ mode=0o0755, user='root', group='root'))
+ return audits
+
+
+class ProfileContext(object):
+
+ def __call__(self):
+ ctxt = {}
+ return ctxt
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/securetty.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/securetty.py
new file mode 100644
index 0000000..e33c73c
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/securetty.py
@@ -0,0 +1,39 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from charmhelpers.contrib.hardening.audits.file import TemplatedFile
+from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
+from charmhelpers.contrib.hardening import utils
+
+
+def get_audits():
+ """Get OS hardening Secure TTY audits.
+
+ :returns: dictionary of audits
+ """
+ audits = []
+ audits.append(TemplatedFile('/etc/securetty', SecureTTYContext(),
+ template_dir=TEMPLATES_DIR,
+ mode=0o0400, user='root', group='root'))
+ return audits
+
+
+class SecureTTYContext(object):
+
+ def __call__(self):
+ settings = utils.get_settings('os')
+ ctxt = {'ttys': settings['auth']['root_ttys']}
+ return ctxt
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/suid_sgid.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/suid_sgid.py
new file mode 100644
index 0000000..0534689
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/suid_sgid.py
@@ -0,0 +1,131 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import subprocess
+
+from charmhelpers.core.hookenv import (
+ log,
+ INFO,
+)
+from charmhelpers.contrib.hardening.audits.file import NoSUIDSGIDAudit
+from charmhelpers.contrib.hardening import utils
+
+
+BLACKLIST = ['/usr/bin/rcp', '/usr/bin/rlogin', '/usr/bin/rsh',
+ '/usr/libexec/openssh/ssh-keysign',
+ '/usr/lib/openssh/ssh-keysign',
+ '/sbin/netreport',
+ '/usr/sbin/usernetctl',
+ '/usr/sbin/userisdnctl',
+ '/usr/sbin/pppd',
+ '/usr/bin/lockfile',
+ '/usr/bin/mail-lock',
+ '/usr/bin/mail-unlock',
+ '/usr/bin/mail-touchlock',
+ '/usr/bin/dotlockfile',
+ '/usr/bin/arping',
+ '/usr/sbin/uuidd',
+ '/usr/bin/mtr',
+ '/usr/lib/evolution/camel-lock-helper-1.2',
+ '/usr/lib/pt_chown',
+ '/usr/lib/eject/dmcrypt-get-device',
+ '/usr/lib/mc/cons.saver']
+
+WHITELIST = ['/bin/mount', '/bin/ping', '/bin/su', '/bin/umount',
+ '/sbin/pam_timestamp_check', '/sbin/unix_chkpwd', '/usr/bin/at',
+ '/usr/bin/gpasswd', '/usr/bin/locate', '/usr/bin/newgrp',
+ '/usr/bin/passwd', '/usr/bin/ssh-agent',
+ '/usr/libexec/utempter/utempter', '/usr/sbin/lockdev',
+ '/usr/sbin/sendmail.sendmail', '/usr/bin/expiry',
+ '/bin/ping6', '/usr/bin/traceroute6.iputils',
+ '/sbin/mount.nfs', '/sbin/umount.nfs',
+ '/sbin/mount.nfs4', '/sbin/umount.nfs4',
+ '/usr/bin/crontab',
+ '/usr/bin/wall', '/usr/bin/write',
+ '/usr/bin/screen',
+ '/usr/bin/mlocate',
+ '/usr/bin/chage', '/usr/bin/chfn', '/usr/bin/chsh',
+ '/bin/fusermount',
+ '/usr/bin/pkexec',
+ '/usr/bin/sudo', '/usr/bin/sudoedit',
+ '/usr/sbin/postdrop', '/usr/sbin/postqueue',
+ '/usr/sbin/suexec',
+ '/usr/lib/squid/ncsa_auth', '/usr/lib/squid/pam_auth',
+ '/usr/kerberos/bin/ksu',
+ '/usr/sbin/ccreds_validate',
+ '/usr/bin/Xorg',
+ '/usr/bin/X',
+ '/usr/lib/dbus-1.0/dbus-daemon-launch-helper',
+ '/usr/lib/vte/gnome-pty-helper',
+ '/usr/lib/libvte9/gnome-pty-helper',
+ '/usr/lib/libvte-2.90-9/gnome-pty-helper']
+
+
+def get_audits():
+ """Get OS hardening suid/sgid audits.
+
+ :returns: dictionary of audits
+ """
+ checks = []
+ settings = utils.get_settings('os')
+ if not settings['security']['suid_sgid_enforce']:
+ log("Skipping suid/sgid hardening", level=INFO)
+ return checks
+
+ # Build the blacklist and whitelist of files for suid/sgid checks.
+ # There are a total of 4 lists:
+ # 1. the system blacklist
+ # 2. the system whitelist
+ # 3. the user blacklist
+ # 4. the user whitelist
+ #
+ # The blacklist is the set of paths which should NOT have the suid/sgid bit
+ # set and the whitelist is the set of paths which MAY have the suid/sgid
+ # bit setl. The user whitelist/blacklist effectively override the system
+ # whitelist/blacklist.
+ u_b = settings['security']['suid_sgid_blacklist']
+ u_w = settings['security']['suid_sgid_whitelist']
+
+ blacklist = set(BLACKLIST) - set(u_w + u_b)
+ whitelist = set(WHITELIST) - set(u_b + u_w)
+
+ checks.append(NoSUIDSGIDAudit(blacklist))
+
+ dry_run = settings['security']['suid_sgid_dry_run_on_unknown']
+
+ if settings['security']['suid_sgid_remove_from_unknown'] or dry_run:
+ # If the policy is a dry_run (e.g. complain only) or remove unknown
+ # suid/sgid bits then find all of the paths which have the suid/sgid
+ # bit set and then remove the whitelisted paths.
+ root_path = settings['environment']['root_path']
+ unknown_paths = find_paths_with_suid_sgid(root_path) - set(whitelist)
+ checks.append(NoSUIDSGIDAudit(unknown_paths, unless=dry_run))
+
+ return checks
+
+
+def find_paths_with_suid_sgid(root_path):
+ """Finds all paths/files which have an suid/sgid bit enabled.
+
+ Starting with the root_path, this will recursively find all paths which
+ have an suid or sgid bit set.
+ """
+ cmd = ['find', root_path, '-perm', '-4000', '-o', '-perm', '-2000',
+ '-type', 'f', '!', '-path', '/proc/*', '-print']
+
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, _ = p.communicate()
+ return set(out.split('\n'))
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/sysctl.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/sysctl.py
new file mode 100644
index 0000000..4a76d74
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/sysctl.py
@@ -0,0 +1,211 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import platform
+import re
+import six
+import subprocess
+
+from charmhelpers.core.hookenv import (
+ log,
+ INFO,
+ WARNING,
+)
+from charmhelpers.contrib.hardening import utils
+from charmhelpers.contrib.hardening.audits.file import (
+ FilePermissionAudit,
+ TemplatedFile,
+)
+from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
+
+
+SYSCTL_DEFAULTS = """net.ipv4.ip_forward=%(net_ipv4_ip_forward)s
+net.ipv6.conf.all.forwarding=%(net_ipv6_conf_all_forwarding)s
+net.ipv4.conf.all.rp_filter=1
+net.ipv4.conf.default.rp_filter=1
+net.ipv4.icmp_echo_ignore_broadcasts=1
+net.ipv4.icmp_ignore_bogus_error_responses=1
+net.ipv4.icmp_ratelimit=100
+net.ipv4.icmp_ratemask=88089
+net.ipv6.conf.all.disable_ipv6=%(net_ipv6_conf_all_disable_ipv6)s
+net.ipv4.tcp_timestamps=%(net_ipv4_tcp_timestamps)s
+net.ipv4.conf.all.arp_ignore=%(net_ipv4_conf_all_arp_ignore)s
+net.ipv4.conf.all.arp_announce=%(net_ipv4_conf_all_arp_announce)s
+net.ipv4.tcp_rfc1337=1
+net.ipv4.tcp_syncookies=1
+net.ipv4.conf.all.shared_media=1
+net.ipv4.conf.default.shared_media=1
+net.ipv4.conf.all.accept_source_route=0
+net.ipv4.conf.default.accept_source_route=0
+net.ipv4.conf.all.accept_redirects=0
+net.ipv4.conf.default.accept_redirects=0
+net.ipv6.conf.all.accept_redirects=0
+net.ipv6.conf.default.accept_redirects=0
+net.ipv4.conf.all.secure_redirects=0
+net.ipv4.conf.default.secure_redirects=0
+net.ipv4.conf.all.send_redirects=0
+net.ipv4.conf.default.send_redirects=0
+net.ipv4.conf.all.log_martians=0
+net.ipv6.conf.default.router_solicitations=0
+net.ipv6.conf.default.accept_ra_rtr_pref=0
+net.ipv6.conf.default.accept_ra_pinfo=0
+net.ipv6.conf.default.accept_ra_defrtr=0
+net.ipv6.conf.default.autoconf=0
+net.ipv6.conf.default.dad_transmits=0
+net.ipv6.conf.default.max_addresses=1
+net.ipv6.conf.all.accept_ra=0
+net.ipv6.conf.default.accept_ra=0
+kernel.modules_disabled=%(kernel_modules_disabled)s
+kernel.sysrq=%(kernel_sysrq)s
+fs.suid_dumpable=%(fs_suid_dumpable)s
+kernel.randomize_va_space=2
+"""
+
+
+def get_audits():
+ """Get OS hardening sysctl audits.
+
+ :returns: dictionary of audits
+ """
+ audits = []
+ settings = utils.get_settings('os')
+
+ # Apply the sysctl settings which are configured to be applied.
+ audits.append(SysctlConf())
+ # Make sure that only root has access to the sysctl.conf file, and
+ # that it is read-only.
+ audits.append(FilePermissionAudit('/etc/sysctl.conf',
+ user='root',
+ group='root', mode=0o0440))
+ # If module loading is not enabled, then ensure that the modules
+ # file has the appropriate permissions and rebuild the initramfs
+ if not settings['security']['kernel_enable_module_loading']:
+ audits.append(ModulesTemplate())
+
+ return audits
+
+
+class ModulesContext(object):
+
+ def __call__(self):
+ settings = utils.get_settings('os')
+ with open('/proc/cpuinfo', 'r') as fd:
+ cpuinfo = fd.readlines()
+
+ for line in cpuinfo:
+ match = re.search(r"^vendor_id\s+:\s+(.+)", line)
+ if match:
+ vendor = match.group(1)
+
+ if vendor == "GenuineIntel":
+ vendor = "intel"
+ elif vendor == "AuthenticAMD":
+ vendor = "amd"
+
+ ctxt = {'arch': platform.processor(),
+ 'cpuVendor': vendor,
+ 'desktop_enable': settings['general']['desktop_enable']}
+
+ return ctxt
+
+
+class ModulesTemplate(object):
+
+ def __init__(self):
+ super(ModulesTemplate, self).__init__('/etc/initramfs-tools/modules',
+ ModulesContext(),
+ templates_dir=TEMPLATES_DIR,
+ user='root', group='root',
+ mode=0o0440)
+
+ def post_write(self):
+ subprocess.check_call(['update-initramfs', '-u'])
+
+
+class SysCtlHardeningContext(object):
+ def __call__(self):
+ settings = utils.get_settings('os')
+ ctxt = {'sysctl': {}}
+
+ log("Applying sysctl settings", level=INFO)
+ extras = {'net_ipv4_ip_forward': 0,
+ 'net_ipv6_conf_all_forwarding': 0,
+ 'net_ipv6_conf_all_disable_ipv6': 1,
+ 'net_ipv4_tcp_timestamps': 0,
+ 'net_ipv4_conf_all_arp_ignore': 0,
+ 'net_ipv4_conf_all_arp_announce': 0,
+ 'kernel_sysrq': 0,
+ 'fs_suid_dumpable': 0,
+ 'kernel_modules_disabled': 1}
+
+ if settings['sysctl']['ipv6_enable']:
+ extras['net_ipv6_conf_all_disable_ipv6'] = 0
+
+ if settings['sysctl']['forwarding']:
+ extras['net_ipv4_ip_forward'] = 1
+ extras['net_ipv6_conf_all_forwarding'] = 1
+
+ if settings['sysctl']['arp_restricted']:
+ extras['net_ipv4_conf_all_arp_ignore'] = 1
+ extras['net_ipv4_conf_all_arp_announce'] = 2
+
+ if settings['security']['kernel_enable_module_loading']:
+ extras['kernel_modules_disabled'] = 0
+
+ if settings['sysctl']['kernel_enable_sysrq']:
+ sysrq_val = settings['sysctl']['kernel_secure_sysrq']
+ extras['kernel_sysrq'] = sysrq_val
+
+ if settings['security']['kernel_enable_core_dump']:
+ extras['fs_suid_dumpable'] = 1
+
+ settings.update(extras)
+ for d in (SYSCTL_DEFAULTS % settings).split():
+ d = d.strip().partition('=')
+ key = d[0].strip()
+ path = os.path.join('/proc/sys', key.replace('.', '/'))
+ if not os.path.exists(path):
+ log("Skipping '%s' since '%s' does not exist" % (key, path),
+ level=WARNING)
+ continue
+
+ ctxt['sysctl'][key] = d[2] or None
+
+ # Translate for python3
+ return {'sysctl_settings':
+ [(k, v) for k, v in six.iteritems(ctxt['sysctl'])]}
+
+
+class SysctlConf(TemplatedFile):
+ """An audit check for sysctl settings."""
+ def __init__(self):
+ self.conffile = '/etc/sysctl.d/99-juju-hardening.conf'
+ super(SysctlConf, self).__init__(self.conffile,
+ SysCtlHardeningContext(),
+ template_dir=TEMPLATES_DIR,
+ user='root', group='root',
+ mode=0o0440)
+
+ def post_write(self):
+ try:
+ subprocess.check_call(['sysctl', '-p', self.conffile])
+ except subprocess.CalledProcessError as e:
+ # NOTE: on some systems if sysctl cannot apply all settings it
+ # will return non-zero as well.
+ log("sysctl command returned an error (maybe some "
+ "keys could not be set) - %s" % (e),
+ level=WARNING)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf
new file mode 100644
index 0000000..0014191
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf
@@ -0,0 +1,8 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+# be overwritten.
+###############################################################################
+{% if disable_core_dump -%}
+# Prevent core dumps for all users. These are usually only needed by developers and may contain sensitive information.
+* hard core 0
+{% endif %} \ No newline at end of file
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf
new file mode 100644
index 0000000..101f1e1
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf
@@ -0,0 +1,7 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+# be overwritten.
+###############################################################################
+{% for key, value in sysctl_settings -%}
+{{ key }}={{ value }}
+{% endfor -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/__init__.py
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/login.defs b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/login.defs
new file mode 100644
index 0000000..db137d6
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/login.defs
@@ -0,0 +1,349 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+# be overwritten.
+###############################################################################
+#
+# /etc/login.defs - Configuration control definitions for the login package.
+#
+# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH.
+# If unspecified, some arbitrary (and possibly incorrect) value will
+# be assumed. All other items are optional - if not specified then
+# the described action or option will be inhibited.
+#
+# Comment lines (lines beginning with "#") and blank lines are ignored.
+#
+# Modified for Linux. --marekm
+
+# REQUIRED for useradd/userdel/usermod
+# Directory where mailboxes reside, _or_ name of file, relative to the
+# home directory. If you _do_ define MAIL_DIR and MAIL_FILE,
+# MAIL_DIR takes precedence.
+#
+# Essentially:
+# - MAIL_DIR defines the location of users mail spool files
+# (for mbox use) by appending the username to MAIL_DIR as defined
+# below.
+# - MAIL_FILE defines the location of the users mail spool files as the
+# fully-qualified filename obtained by prepending the user home
+# directory before $MAIL_FILE
+#
+# NOTE: This is no more used for setting up users MAIL environment variable
+# which is, starting from shadow 4.0.12-1 in Debian, entirely the
+# job of the pam_mail PAM modules
+# See default PAM configuration files provided for
+# login, su, etc.
+#
+# This is a temporary situation: setting these variables will soon
+# move to /etc/default/useradd and the variables will then be
+# no more supported
+MAIL_DIR /var/mail
+#MAIL_FILE .mail
+
+#
+# Enable logging and display of /var/log/faillog login failure info.
+# This option conflicts with the pam_tally PAM module.
+#
+FAILLOG_ENAB yes
+
+#
+# Enable display of unknown usernames when login failures are recorded.
+#
+# WARNING: Unknown usernames may become world readable.
+# See #290803 and #298773 for details about how this could become a security
+# concern
+LOG_UNKFAIL_ENAB no
+
+#
+# Enable logging of successful logins
+#
+LOG_OK_LOGINS yes
+
+#
+# Enable "syslog" logging of su activity - in addition to sulog file logging.
+# SYSLOG_SG_ENAB does the same for newgrp and sg.
+#
+SYSLOG_SU_ENAB yes
+SYSLOG_SG_ENAB yes
+
+#
+# If defined, all su activity is logged to this file.
+#
+#SULOG_FILE /var/log/sulog
+
+#
+# If defined, file which maps tty line to TERM environment parameter.
+# Each line of the file is in a format something like "vt100 tty01".
+#
+#TTYTYPE_FILE /etc/ttytype
+
+#
+# If defined, login failures will be logged here in a utmp format
+# last, when invoked as lastb, will read /var/log/btmp, so...
+#
+FTMP_FILE /var/log/btmp
+
+#
+# If defined, the command name to display when running "su -". For
+# example, if this is defined as "su" then a "ps" will display the
+# command is "-su". If not defined, then "ps" would display the
+# name of the shell actually being run, e.g. something like "-sh".
+#
+SU_NAME su
+
+#
+# If defined, file which inhibits all the usual chatter during the login
+# sequence. If a full pathname, then hushed mode will be enabled if the
+# user's name or shell are found in the file. If not a full pathname, then
+# hushed mode will be enabled if the file exists in the user's home directory.
+#
+HUSHLOGIN_FILE .hushlogin
+#HUSHLOGIN_FILE /etc/hushlogins
+
+#
+# *REQUIRED* The default PATH settings, for superuser and normal users.
+#
+# (they are minimal, add the rest in the shell startup files)
+ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin{% if additional_user_paths %}{{ additional_user_paths }}{% endif %}
+
+#
+# Terminal permissions
+#
+# TTYGROUP Login tty will be assigned this group ownership.
+# TTYPERM Login tty will be set to this permission.
+#
+# If you have a "write" program which is "setgid" to a special group
+# which owns the terminals, define TTYGROUP to the group number and
+# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign
+# TTYPERM to either 622 or 600.
+#
+# In Debian /usr/bin/bsd-write or similar programs are setgid tty
+# However, the default and recommended value for TTYPERM is still 0600
+# to not allow anyone to write to anyone else console or terminal
+
+# Users can still allow other people to write them by issuing
+# the "mesg y" command.
+
+TTYGROUP tty
+TTYPERM 0600
+
+#
+# Login configuration initializations:
+#
+# ERASECHAR Terminal ERASE character ('\010' = backspace).
+# KILLCHAR Terminal KILL character ('\025' = CTRL/U).
+# UMASK Default "umask" value.
+#
+# The ERASECHAR and KILLCHAR are used only on System V machines.
+#
+# UMASK is the default umask value for pam_umask and is used by
+# useradd and newusers to set the mode of the new home directories.
+# 022 is the "historical" value in Debian for UMASK
+# 027, or even 077, could be considered better for privacy
+# There is no One True Answer here : each sysadmin must make up his/her
+# mind.
+#
+# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value
+# for private user groups, i. e. the uid is the same as gid, and username is
+# the same as the primary group name: for these, the user permissions will be
+# used as group permissions, e. g. 022 will become 002.
+#
+# Prefix these values with "0" to get octal, "0x" to get hexadecimal.
+#
+ERASECHAR 0177
+KILLCHAR 025
+UMASK {{ umask }}
+
+# Enable setting of the umask group bits to be the same as owner bits (examples: `022` -> `002`, `077` -> `007`) for non-root users, if the uid is the same as gid, and username is the same as the primary group name.
+# If set to yes, userdel will remove the user´s group if it contains no more members, and useradd will create by default a group with the name of the user.
+USERGROUPS_ENAB yes
+
+#
+# Password aging controls:
+#
+# PASS_MAX_DAYS Maximum number of days a password may be used.
+# PASS_MIN_DAYS Minimum number of days allowed between password changes.
+# PASS_WARN_AGE Number of days warning given before a password expires.
+#
+PASS_MAX_DAYS {{ pwd_max_age }}
+PASS_MIN_DAYS {{ pwd_min_age }}
+PASS_WARN_AGE 7
+
+#
+# Min/max values for automatic uid selection in useradd
+#
+UID_MIN {{ uid_min }}
+UID_MAX 60000
+# System accounts
+SYS_UID_MIN {{ sys_uid_min }}
+SYS_UID_MAX {{ sys_uid_max }}
+
+# Min/max values for automatic gid selection in groupadd
+GID_MIN {{ gid_min }}
+GID_MAX 60000
+# System accounts
+SYS_GID_MIN {{ sys_gid_min }}
+SYS_GID_MAX {{ sys_gid_max }}
+
+#
+# Max number of login retries if password is bad. This will most likely be
+# overriden by PAM, since the default pam_unix module has it's own built
+# in of 3 retries. However, this is a safe fallback in case you are using
+# an authentication module that does not enforce PAM_MAXTRIES.
+#
+LOGIN_RETRIES {{ login_retries }}
+
+#
+# Max time in seconds for login
+#
+LOGIN_TIMEOUT {{ login_timeout }}
+
+#
+# Which fields may be changed by regular users using chfn - use
+# any combination of letters "frwh" (full name, room number, work
+# phone, home phone). If not defined, no changes are allowed.
+# For backward compatibility, "yes" = "rwh" and "no" = "frwh".
+#
+{% if chfn_restrict %}
+CHFN_RESTRICT {{ chfn_restrict }}
+{% endif %}
+
+#
+# Should login be allowed if we can't cd to the home directory?
+# Default in no.
+#
+DEFAULT_HOME {% if allow_login_without_home %} yes {% else %} no {% endif %}
+
+#
+# If defined, this command is run when removing a user.
+# It should remove any at/cron/print jobs etc. owned by
+# the user to be removed (passed as the first argument).
+#
+#USERDEL_CMD /usr/sbin/userdel_local
+
+#
+# Enable setting of the umask group bits to be the same as owner bits
+# (examples: 022 -> 002, 077 -> 007) for non-root users, if the uid is
+# the same as gid, and username is the same as the primary group name.
+#
+# If set to yes, userdel will remove the user´s group if it contains no
+# more members, and useradd will create by default a group with the name
+# of the user.
+#
+USERGROUPS_ENAB yes
+
+#
+# Instead of the real user shell, the program specified by this parameter
+# will be launched, although its visible name (argv[0]) will be the shell's.
+# The program may do whatever it wants (logging, additional authentification,
+# banner, ...) before running the actual shell.
+#
+# FAKE_SHELL /bin/fakeshell
+
+#
+# If defined, either full pathname of a file containing device names or
+# a ":" delimited list of device names. Root logins will be allowed only
+# upon these devices.
+#
+# This variable is used by login and su.
+#
+#CONSOLE /etc/consoles
+#CONSOLE console:tty01:tty02:tty03:tty04
+
+#
+# List of groups to add to the user's supplementary group set
+# when logging in on the console (as determined by the CONSOLE
+# setting). Default is none.
+#
+# Use with caution - it is possible for users to gain permanent
+# access to these groups, even when not logged in on the console.
+# How to do it is left as an exercise for the reader...
+#
+# This variable is used by login and su.
+#
+#CONSOLE_GROUPS floppy:audio:cdrom
+
+#
+# If set to "yes", new passwords will be encrypted using the MD5-based
+# algorithm compatible with the one used by recent releases of FreeBSD.
+# It supports passwords of unlimited length and longer salt strings.
+# Set to "no" if you need to copy encrypted passwords to other systems
+# which don't understand the new algorithm. Default is "no".
+#
+# This variable is deprecated. You should use ENCRYPT_METHOD.
+#
+MD5_CRYPT_ENAB no
+
+#
+# If set to MD5 , MD5-based algorithm will be used for encrypting password
+# If set to SHA256, SHA256-based algorithm will be used for encrypting password
+# If set to SHA512, SHA512-based algorithm will be used for encrypting password
+# If set to DES, DES-based algorithm will be used for encrypting password (default)
+# Overrides the MD5_CRYPT_ENAB option
+#
+# Note: It is recommended to use a value consistent with
+# the PAM modules configuration.
+#
+ENCRYPT_METHOD SHA512
+
+#
+# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512.
+#
+# Define the number of SHA rounds.
+# With a lot of rounds, it is more difficult to brute forcing the password.
+# But note also that it more CPU resources will be needed to authenticate
+# users.
+#
+# If not specified, the libc will choose the default number of rounds (5000).
+# The values must be inside the 1000-999999999 range.
+# If only one of the MIN or MAX values is set, then this value will be used.
+# If MIN > MAX, the highest value will be used.
+#
+# SHA_CRYPT_MIN_ROUNDS 5000
+# SHA_CRYPT_MAX_ROUNDS 5000
+
+################# OBSOLETED BY PAM ##############
+# #
+# These options are now handled by PAM. Please #
+# edit the appropriate file in /etc/pam.d/ to #
+# enable the equivelants of them.
+#
+###############
+
+#MOTD_FILE
+#DIALUPS_CHECK_ENAB
+#LASTLOG_ENAB
+#MAIL_CHECK_ENAB
+#OBSCURE_CHECKS_ENAB
+#PORTTIME_CHECKS_ENAB
+#SU_WHEEL_ONLY
+#CRACKLIB_DICTPATH
+#PASS_CHANGE_TRIES
+#PASS_ALWAYS_WARN
+#ENVIRON_FILE
+#NOLOGINS_FILE
+#ISSUE_FILE
+#PASS_MIN_LEN
+#PASS_MAX_LEN
+#ULIMIT
+#ENV_HZ
+#CHFN_AUTH
+#CHSH_AUTH
+#FAIL_DELAY
+
+################# OBSOLETED #######################
+# #
+# These options are no more handled by shadow. #
+# #
+# Shadow utilities will display a warning if they #
+# still appear. #
+# #
+###################################################
+
+# CLOSE_SESSIONS
+# LOGIN_STRING
+# NO_PASSWORD_CONSOLE
+# QMAIL_DIR
+
+
+
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/modules b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/modules
new file mode 100644
index 0000000..ef0354e
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/modules
@@ -0,0 +1,117 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+# be overwritten.
+###############################################################################
+# /etc/modules: kernel modules to load at boot time.
+#
+# This file contains the names of kernel modules that should be loaded
+# at boot time, one per line. Lines beginning with "#" are ignored.
+# Parameters can be specified after the module name.
+
+# Arch
+# ----
+#
+# Modules for certains builds, contains support modules and some CPU-specific optimizations.
+
+{% if arch == "x86_64" -%}
+# Optimize for x86_64 cryptographic features
+twofish-x86_64-3way
+twofish-x86_64
+aes-x86_64
+salsa20-x86_64
+blowfish-x86_64
+{% endif -%}
+
+{% if cpuVendor == "intel" -%}
+# Intel-specific optimizations
+ghash-clmulni-intel
+aesni-intel
+kvm-intel
+{% endif -%}
+
+{% if cpuVendor == "amd" -%}
+# AMD-specific optimizations
+kvm-amd
+{% endif -%}
+
+kvm
+
+
+# Crypto
+# ------
+
+# Some core modules which comprise strong cryptography.
+blowfish_common
+blowfish_generic
+ctr
+cts
+lrw
+lzo
+rmd160
+rmd256
+rmd320
+serpent
+sha512_generic
+twofish_common
+twofish_generic
+xts
+zlib
+
+
+# Drivers
+# -------
+
+# Basics
+lp
+rtc
+loop
+
+# Filesystems
+ext2
+btrfs
+
+{% if desktop_enable -%}
+# Desktop
+psmouse
+snd
+snd_ac97_codec
+snd_intel8x0
+snd_page_alloc
+snd_pcm
+snd_timer
+soundcore
+usbhid
+{% endif -%}
+
+# Lib
+# ---
+xz
+
+
+# Net
+# ---
+
+# All packets needed for netfilter rules (ie iptables, ebtables).
+ip_tables
+x_tables
+iptable_filter
+iptable_nat
+
+# Targets
+ipt_LOG
+ipt_REJECT
+
+# Modules
+xt_connlimit
+xt_tcpudp
+xt_recent
+xt_limit
+xt_conntrack
+nf_conntrack
+nf_conntrack_ipv4
+nf_defrag_ipv4
+xt_state
+nf_nat
+
+# Addons
+xt_pknock \ No newline at end of file
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/passwdqc.conf b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/passwdqc.conf
new file mode 100644
index 0000000..f98d14e
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/passwdqc.conf
@@ -0,0 +1,11 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+# be overwritten.
+###############################################################################
+Name: passwdqc password strength enforcement
+Default: yes
+Priority: 1024
+Conflicts: cracklib
+Password-Type: Primary
+Password:
+ requisite pam_passwdqc.so {{ auth_pam_passwdqc_options }}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh
new file mode 100644
index 0000000..fd2de79
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh
@@ -0,0 +1,8 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+# be overwritten.
+###############################################################################
+# Disable core dumps via soft limits for all users. Compliance to this setting
+# is voluntary and can be modified by users up to a hard limit. This setting is
+# a sane default.
+ulimit -S -c 0 > /dev/null 2>&1
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/securetty b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/securetty
new file mode 100644
index 0000000..15b18d4
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/securetty
@@ -0,0 +1,11 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+# be overwritten.
+###############################################################################
+# A list of TTYs, from which root can log in
+# see `man securetty` for reference
+{% if ttys -%}
+{% for tty in ttys -%}
+{{ tty }}
+{% endfor -%}
+{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/tally2 b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/tally2
new file mode 100644
index 0000000..d962029
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/tally2
@@ -0,0 +1,14 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+# be overwritten.
+###############################################################################
+Name: tally2 lockout after failed attempts enforcement
+Default: yes
+Priority: 1024
+Conflicts: cracklib
+Auth-Type: Primary
+Auth-Initial:
+ required pam_tally2.so deny={{ auth_retries }} onerr=fail unlock_time={{ auth_lockout_time }}
+Account-Type: Primary
+Account-Initial:
+ required pam_tally2.so
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/__init__.py
new file mode 100644
index 0000000..277b8c7
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/__init__.py
@@ -0,0 +1,19 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from os import path
+
+TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/__init__.py
new file mode 100644
index 0000000..d4f0ec1
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/__init__.py
@@ -0,0 +1,31 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from charmhelpers.core.hookenv import (
+ log,
+ DEBUG,
+)
+from charmhelpers.contrib.hardening.mysql.checks import config
+
+
+def run_mysql_checks():
+ log("Starting MySQL hardening checks.", level=DEBUG)
+ checks = config.get_audits()
+ for check in checks:
+ log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
+ check.ensure_compliance()
+
+ log("MySQL hardening checks complete.", level=DEBUG)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/config.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/config.py
new file mode 100644
index 0000000..3af8b89
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/config.py
@@ -0,0 +1,89 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import six
+import subprocess
+
+from charmhelpers.core.hookenv import (
+ log,
+ WARNING,
+)
+from charmhelpers.contrib.hardening.audits.file import (
+ FilePermissionAudit,
+ DirectoryPermissionAudit,
+ TemplatedFile,
+)
+from charmhelpers.contrib.hardening.mysql import TEMPLATES_DIR
+from charmhelpers.contrib.hardening import utils
+
+
+def get_audits():
+ """Get MySQL hardening config audits.
+
+ :returns: dictionary of audits
+ """
+ if subprocess.call(['which', 'mysql'], stdout=subprocess.PIPE) != 0:
+ log("MySQL does not appear to be installed on this node - "
+ "skipping mysql hardening", level=WARNING)
+ return []
+
+ settings = utils.get_settings('mysql')
+ hardening_settings = settings['hardening']
+ my_cnf = hardening_settings['mysql-conf']
+
+ audits = [
+ FilePermissionAudit(paths=[my_cnf], user='root',
+ group='root', mode=0o0600),
+
+ TemplatedFile(hardening_settings['hardening-conf'],
+ MySQLConfContext(),
+ TEMPLATES_DIR,
+ mode=0o0750,
+ user='mysql',
+ group='root',
+ service_actions=[{'service': 'mysql',
+ 'actions': ['restart']}]),
+
+ # MySQL and Percona charms do not allow configuration of the
+ # data directory, so use the default.
+ DirectoryPermissionAudit('/var/lib/mysql',
+ user='mysql',
+ group='mysql',
+ recursive=False,
+ mode=0o755),
+
+ DirectoryPermissionAudit('/etc/mysql',
+ user='root',
+ group='root',
+ recursive=False,
+ mode=0o700),
+ ]
+
+ return audits
+
+
+class MySQLConfContext(object):
+ """Defines the set of key/value pairs to set in a mysql config file.
+
+ This context, when called, will return a dictionary containing the
+ key/value pairs of setting to specify in the
+ /etc/mysql/conf.d/hardening.cnf file.
+ """
+ def __call__(self):
+ settings = utils.get_settings('mysql')
+ # Translate for python3
+ return {'mysql_settings':
+ [(k, v) for k, v in six.iteritems(settings['security'])]}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/__init__.py
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf
new file mode 100644
index 0000000..8242586
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf
@@ -0,0 +1,12 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+# be overwritten.
+###############################################################################
+[mysqld]
+{% for setting, value in mysql_settings -%}
+{% if value == 'True' -%}
+{{ setting }}
+{% elif value != 'None' and value != None -%}
+{{ setting }} = {{ value }}
+{% endif -%}
+{% endfor -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/__init__.py
new file mode 100644
index 0000000..277b8c7
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/__init__.py
@@ -0,0 +1,19 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from os import path
+
+TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/__init__.py
new file mode 100644
index 0000000..b85150d
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/__init__.py
@@ -0,0 +1,31 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from charmhelpers.core.hookenv import (
+ log,
+ DEBUG,
+)
+from charmhelpers.contrib.hardening.ssh.checks import config
+
+
+def run_ssh_checks():
+ log("Starting SSH hardening checks.", level=DEBUG)
+ checks = config.get_audits()
+ for check in checks:
+ log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
+ check.ensure_compliance()
+
+ log("SSH hardening checks complete.", level=DEBUG)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/config.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/config.py
new file mode 100644
index 0000000..3fb6ae8
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/config.py
@@ -0,0 +1,394 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from charmhelpers.core.hookenv import (
+ log,
+ DEBUG,
+)
+from charmhelpers.fetch import (
+ apt_install,
+ apt_update,
+)
+from charmhelpers.core.host import lsb_release
+from charmhelpers.contrib.hardening.audits.file import (
+ TemplatedFile,
+ FileContentAudit,
+)
+from charmhelpers.contrib.hardening.ssh import TEMPLATES_DIR
+from charmhelpers.contrib.hardening import utils
+
+
+def get_audits():
+ """Get SSH hardening config audits.
+
+ :returns: dictionary of audits
+ """
+ audits = [SSHConfig(), SSHDConfig(), SSHConfigFileContentAudit(),
+ SSHDConfigFileContentAudit()]
+ return audits
+
+
+class SSHConfigContext(object):
+
+ type = 'client'
+
+ def get_macs(self, allow_weak_mac):
+ if allow_weak_mac:
+ weak_macs = 'weak'
+ else:
+ weak_macs = 'default'
+
+ default = 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160'
+ macs = {'default': default,
+ 'weak': default + ',hmac-sha1'}
+
+ default = ('hmac-sha2-512-etm@openssh.com,'
+ 'hmac-sha2-256-etm@openssh.com,'
+ 'hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,'
+ 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160')
+ macs_66 = {'default': default,
+ 'weak': default + ',hmac-sha1'}
+
+ # Use newer ciphers on Ubuntu Trusty and above
+ if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
+ log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG)
+ macs = macs_66
+
+ return macs[weak_macs]
+
+ def get_kexs(self, allow_weak_kex):
+ if allow_weak_kex:
+ weak_kex = 'weak'
+ else:
+ weak_kex = 'default'
+
+ default = 'diffie-hellman-group-exchange-sha256'
+ weak = (default + ',diffie-hellman-group14-sha1,'
+ 'diffie-hellman-group-exchange-sha1,'
+ 'diffie-hellman-group1-sha1')
+ kex = {'default': default,
+ 'weak': weak}
+
+ default = ('curve25519-sha256@libssh.org,'
+ 'diffie-hellman-group-exchange-sha256')
+ weak = (default + ',diffie-hellman-group14-sha1,'
+ 'diffie-hellman-group-exchange-sha1,'
+ 'diffie-hellman-group1-sha1')
+ kex_66 = {'default': default,
+ 'weak': weak}
+
+ # Use newer kex on Ubuntu Trusty and above
+ if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
+ log('Detected Ubuntu 14.04 or newer, using new key exchange '
+ 'algorithms', level=DEBUG)
+ kex = kex_66
+
+ return kex[weak_kex]
+
+ def get_ciphers(self, cbc_required):
+ if cbc_required:
+ weak_ciphers = 'weak'
+ else:
+ weak_ciphers = 'default'
+
+ default = 'aes256-ctr,aes192-ctr,aes128-ctr'
+ cipher = {'default': default,
+ 'weak': default + 'aes256-cbc,aes192-cbc,aes128-cbc'}
+
+ default = ('chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,'
+ 'aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr')
+ ciphers_66 = {'default': default,
+ 'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'}
+
+ # Use newer ciphers on ubuntu Trusty and above
+ if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
+ log('Detected Ubuntu 14.04 or newer, using new ciphers',
+ level=DEBUG)
+ cipher = ciphers_66
+
+ return cipher[weak_ciphers]
+
+ def __call__(self):
+ settings = utils.get_settings('ssh')
+ if settings['common']['network_ipv6_enable']:
+ addr_family = 'any'
+ else:
+ addr_family = 'inet'
+
+ ctxt = {
+ 'addr_family': addr_family,
+ 'remote_hosts': settings['common']['remote_hosts'],
+ 'password_auth_allowed':
+ settings['client']['password_authentication'],
+ 'ports': settings['common']['ports'],
+ 'ciphers': self.get_ciphers(settings['client']['cbc_required']),
+ 'macs': self.get_macs(settings['client']['weak_hmac']),
+ 'kexs': self.get_kexs(settings['client']['weak_kex']),
+ 'roaming': settings['client']['roaming'],
+ }
+ return ctxt
+
+
+class SSHConfig(TemplatedFile):
+ def __init__(self):
+ path = '/etc/ssh/ssh_config'
+ super(SSHConfig, self).__init__(path=path,
+ template_dir=TEMPLATES_DIR,
+ context=SSHConfigContext(),
+ user='root',
+ group='root',
+ mode=0o0644)
+
+ def pre_write(self):
+ settings = utils.get_settings('ssh')
+ apt_update(fatal=True)
+ apt_install(settings['client']['package'])
+ if not os.path.exists('/etc/ssh'):
+ os.makedir('/etc/ssh')
+ # NOTE: don't recurse
+ utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
+ maxdepth=0)
+
+ def post_write(self):
+ # NOTE: don't recurse
+ utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
+ maxdepth=0)
+
+
+class SSHDConfigContext(SSHConfigContext):
+
+ type = 'server'
+
+ def __call__(self):
+ settings = utils.get_settings('ssh')
+ if settings['common']['network_ipv6_enable']:
+ addr_family = 'any'
+ else:
+ addr_family = 'inet'
+
+ ctxt = {
+ 'ssh_ip': settings['server']['listen_to'],
+ 'password_auth_allowed':
+ settings['server']['password_authentication'],
+ 'ports': settings['common']['ports'],
+ 'addr_family': addr_family,
+ 'ciphers': self.get_ciphers(settings['server']['cbc_required']),
+ 'macs': self.get_macs(settings['server']['weak_hmac']),
+ 'kexs': self.get_kexs(settings['server']['weak_kex']),
+ 'host_key_files': settings['server']['host_key_files'],
+ 'allow_root_with_key': settings['server']['allow_root_with_key'],
+ 'password_authentication':
+ settings['server']['password_authentication'],
+ 'use_priv_sep': settings['server']['use_privilege_separation'],
+ 'use_pam': settings['server']['use_pam'],
+ 'allow_x11_forwarding': settings['server']['allow_x11_forwarding'],
+ 'print_motd': settings['server']['print_motd'],
+ 'print_last_log': settings['server']['print_last_log'],
+ 'client_alive_interval':
+ settings['server']['alive_interval'],
+ 'client_alive_count': settings['server']['alive_count'],
+ 'allow_tcp_forwarding': settings['server']['allow_tcp_forwarding'],
+ 'allow_agent_forwarding':
+ settings['server']['allow_agent_forwarding'],
+ 'deny_users': settings['server']['deny_users'],
+ 'allow_users': settings['server']['allow_users'],
+ 'deny_groups': settings['server']['deny_groups'],
+ 'allow_groups': settings['server']['allow_groups'],
+ 'use_dns': settings['server']['use_dns'],
+ 'sftp_enable': settings['server']['sftp_enable'],
+ 'sftp_group': settings['server']['sftp_group'],
+ 'sftp_chroot': settings['server']['sftp_chroot'],
+ 'max_auth_tries': settings['server']['max_auth_tries'],
+ 'max_sessions': settings['server']['max_sessions'],
+ }
+ return ctxt
+
+
+class SSHDConfig(TemplatedFile):
+ def __init__(self):
+ path = '/etc/ssh/sshd_config'
+ super(SSHDConfig, self).__init__(path=path,
+ template_dir=TEMPLATES_DIR,
+ context=SSHDConfigContext(),
+ user='root',
+ group='root',
+ mode=0o0600,
+ service_actions=[{'service': 'ssh',
+ 'actions':
+ ['restart']}])
+
+ def pre_write(self):
+ settings = utils.get_settings('ssh')
+ apt_update(fatal=True)
+ apt_install(settings['server']['package'])
+ if not os.path.exists('/etc/ssh'):
+ os.makedir('/etc/ssh')
+ # NOTE: don't recurse
+ utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
+ maxdepth=0)
+
+ def post_write(self):
+ # NOTE: don't recurse
+ utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
+ maxdepth=0)
+
+
+class SSHConfigFileContentAudit(FileContentAudit):
+ def __init__(self):
+ self.path = '/etc/ssh/ssh_config'
+ super(SSHConfigFileContentAudit, self).__init__(self.path, {})
+
+ def is_compliant(self, *args, **kwargs):
+ self.pass_cases = []
+ self.fail_cases = []
+ settings = utils.get_settings('ssh')
+
+ if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
+ if not settings['server']['weak_hmac']:
+ self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
+ else:
+ self.pass_cases.append(r'^MACs.+,hmac-sha1$')
+
+ if settings['server']['weak_kex']:
+ self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa
+ self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
+ self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
+ self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
+ else:
+ self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa
+ self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa
+
+ if settings['server']['cbc_required']:
+ self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
+ self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
+ self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
+ self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
+ else:
+ self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
+ self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa
+ self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$')
+ self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
+ self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
+ else:
+ if not settings['client']['weak_hmac']:
+ self.fail_cases.append(r'^MACs.+,hmac-sha1$')
+ else:
+ self.pass_cases.append(r'^MACs.+,hmac-sha1$')
+
+ if settings['client']['weak_kex']:
+ self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa
+ self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
+ self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
+ self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
+ else:
+ self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa
+ self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
+ self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
+ self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
+
+ if settings['client']['cbc_required']:
+ self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
+ self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
+ self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
+ self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
+ else:
+ self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
+ self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
+ self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
+ self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
+
+ if settings['client']['roaming']:
+ self.pass_cases.append(r'^UseRoaming yes$')
+ else:
+ self.fail_cases.append(r'^UseRoaming yes$')
+
+ return super(SSHConfigFileContentAudit, self).is_compliant(*args,
+ **kwargs)
+
+
+class SSHDConfigFileContentAudit(FileContentAudit):
+ def __init__(self):
+ self.path = '/etc/ssh/sshd_config'
+ super(SSHDConfigFileContentAudit, self).__init__(self.path, {})
+
+ def is_compliant(self, *args, **kwargs):
+ self.pass_cases = []
+ self.fail_cases = []
+ settings = utils.get_settings('ssh')
+
+ if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
+ if not settings['server']['weak_hmac']:
+ self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
+ else:
+ self.pass_cases.append(r'^MACs.+,hmac-sha1$')
+
+ if settings['server']['weak_kex']:
+ self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa
+ self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
+ self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
+ self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
+ else:
+ self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa
+ self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa
+
+ if settings['server']['cbc_required']:
+ self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
+ self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
+ self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
+ self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
+ else:
+ self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
+ self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa
+ self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$')
+ self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
+ self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
+ else:
+ if not settings['server']['weak_hmac']:
+ self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
+ else:
+ self.pass_cases.append(r'^MACs.+,hmac-sha1$')
+
+ if settings['server']['weak_kex']:
+ self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa
+ self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
+ self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
+ self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
+ else:
+ self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa
+ self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
+ self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
+ self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
+
+ if settings['server']['cbc_required']:
+ self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
+ self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
+ self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
+ self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
+ else:
+ self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
+ self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
+ self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
+ self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
+
+ if settings['server']['sftp_enable']:
+ self.pass_cases.append(r'^Subsystem\ssftp')
+ else:
+ self.fail_cases.append(r'^Subsystem\ssftp')
+
+ return super(SSHDConfigFileContentAudit, self).is_compliant(*args,
+ **kwargs)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/__init__.py
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/ssh_config b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/ssh_config
new file mode 100644
index 0000000..9742d8e
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/ssh_config
@@ -0,0 +1,70 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+# be overwritten.
+###############################################################################
+# This is the ssh client system-wide configuration file. See
+# ssh_config(5) for more information. This file provides defaults for
+# users, and the values can be changed in per-user configuration files
+# or on the command line.
+
+# Configuration data is parsed as follows:
+# 1. command line options
+# 2. user-specific file
+# 3. system-wide file
+# Any configuration value is only changed the first time it is set.
+# Thus, host-specific definitions should be at the beginning of the
+# configuration file, and defaults at the end.
+
+# Site-wide defaults for some commonly used options. For a comprehensive
+# list of available options, their meanings and defaults, please see the
+# ssh_config(5) man page.
+
+# Restrict the following configuration to be limited to this Host.
+{% if remote_hosts -%}
+Host {{ ' '.join(remote_hosts) }}
+{% endif %}
+ForwardAgent no
+ForwardX11 no
+ForwardX11Trusted yes
+RhostsRSAAuthentication no
+RSAAuthentication yes
+PasswordAuthentication {{ password_auth_allowed }}
+HostbasedAuthentication no
+GSSAPIAuthentication no
+GSSAPIDelegateCredentials no
+GSSAPIKeyExchange no
+GSSAPITrustDNS no
+BatchMode no
+CheckHostIP yes
+AddressFamily {{ addr_family }}
+ConnectTimeout 0
+StrictHostKeyChecking ask
+IdentityFile ~/.ssh/identity
+IdentityFile ~/.ssh/id_rsa
+IdentityFile ~/.ssh/id_dsa
+# The port at the destination should be defined
+{% for port in ports -%}
+Port {{ port }}
+{% endfor %}
+Protocol 2
+Cipher 3des
+{% if ciphers -%}
+Ciphers {{ ciphers }}
+{%- endif %}
+{% if macs -%}
+MACs {{ macs }}
+{%- endif %}
+{% if kexs -%}
+KexAlgorithms {{ kexs }}
+{%- endif %}
+EscapeChar ~
+Tunnel no
+TunnelDevice any:any
+PermitLocalCommand no
+VisualHostKey no
+RekeyLimit 1G 1h
+SendEnv LANG LC_*
+HashKnownHosts yes
+{% if roaming -%}
+UseRoaming {{ roaming }}
+{% endif %}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/sshd_config b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/sshd_config
new file mode 100644
index 0000000..5f87298
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/sshd_config
@@ -0,0 +1,159 @@
+###############################################################################
+# WARNING: This configuration file is maintained by Juju. Local changes may
+# be overwritten.
+###############################################################################
+# Package generated configuration file
+# See the sshd_config(5) manpage for details
+
+# What ports, IPs and protocols we listen for
+{% for port in ports -%}
+Port {{ port }}
+{% endfor -%}
+AddressFamily {{ addr_family }}
+# Use these options to restrict which interfaces/protocols sshd will bind to
+{% if ssh_ip -%}
+{% for ip in ssh_ip -%}
+ListenAddress {{ ip }}
+{% endfor %}
+{%- else -%}
+ListenAddress ::
+ListenAddress 0.0.0.0
+{% endif -%}
+Protocol 2
+{% if ciphers -%}
+Ciphers {{ ciphers }}
+{% endif -%}
+{% if macs -%}
+MACs {{ macs }}
+{% endif -%}
+{% if kexs -%}
+KexAlgorithms {{ kexs }}
+{% endif -%}
+# HostKeys for protocol version 2
+{% for keyfile in host_key_files -%}
+HostKey {{ keyfile }}
+{% endfor -%}
+
+# Privilege Separation is turned on for security
+{% if use_priv_sep -%}
+UsePrivilegeSeparation {{ use_priv_sep }}
+{% endif -%}
+
+# Lifetime and size of ephemeral version 1 server key
+KeyRegenerationInterval 3600
+ServerKeyBits 1024
+
+# Logging
+SyslogFacility AUTH
+LogLevel VERBOSE
+
+# Authentication:
+LoginGraceTime 30s
+{% if allow_root_with_key -%}
+PermitRootLogin without-password
+{% else -%}
+PermitRootLogin no
+{% endif %}
+PermitTunnel no
+PermitUserEnvironment no
+StrictModes yes
+
+RSAAuthentication yes
+PubkeyAuthentication yes
+AuthorizedKeysFile %h/.ssh/authorized_keys
+
+# Don't read the user's ~/.rhosts and ~/.shosts files
+IgnoreRhosts yes
+# For this to work you will also need host keys in /etc/ssh_known_hosts
+RhostsRSAAuthentication no
+# similar for protocol version 2
+HostbasedAuthentication no
+# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
+IgnoreUserKnownHosts yes
+
+# To enable empty passwords, change to yes (NOT RECOMMENDED)
+PermitEmptyPasswords no
+
+# Change to yes to enable challenge-response passwords (beware issues with
+# some PAM modules and threads)
+ChallengeResponseAuthentication no
+
+# Change to no to disable tunnelled clear text passwords
+PasswordAuthentication {{ password_authentication }}
+
+# Kerberos options
+KerberosAuthentication no
+KerberosGetAFSToken no
+KerberosOrLocalPasswd no
+KerberosTicketCleanup yes
+
+# GSSAPI options
+GSSAPIAuthentication no
+GSSAPICleanupCredentials yes
+
+X11Forwarding {{ allow_x11_forwarding }}
+X11DisplayOffset 10
+X11UseLocalhost yes
+GatewayPorts no
+PrintMotd {{ print_motd }}
+PrintLastLog {{ print_last_log }}
+TCPKeepAlive no
+UseLogin no
+
+ClientAliveInterval {{ client_alive_interval }}
+ClientAliveCountMax {{ client_alive_count }}
+AllowTcpForwarding {{ allow_tcp_forwarding }}
+AllowAgentForwarding {{ allow_agent_forwarding }}
+
+MaxStartups 10:30:100
+#Banner /etc/issue.net
+
+# Allow client to pass locale environment variables
+AcceptEnv LANG LC_*
+
+# Set this to 'yes' to enable PAM authentication, account processing,
+# and session processing. If this is enabled, PAM authentication will
+# be allowed through the ChallengeResponseAuthentication and
+# PasswordAuthentication. Depending on your PAM configuration,
+# PAM authentication via ChallengeResponseAuthentication may bypass
+# the setting of "PermitRootLogin without-password".
+# If you just want the PAM account and session checks to run without
+# PAM authentication, then enable this but set PasswordAuthentication
+# and ChallengeResponseAuthentication to 'no'.
+UsePAM {{ use_pam }}
+
+{% if deny_users -%}
+DenyUsers {{ deny_users }}
+{% endif -%}
+{% if allow_users -%}
+AllowUsers {{ allow_users }}
+{% endif -%}
+{% if deny_groups -%}
+DenyGroups {{ deny_groups }}
+{% endif -%}
+{% if allow_groups -%}
+AllowGroups allow_groups
+{% endif -%}
+UseDNS {{ use_dns }}
+MaxAuthTries {{ max_auth_tries }}
+MaxSessions {{ max_sessions }}
+
+{% if sftp_enable -%}
+# Configuration, in case SFTP is used
+## override default of no subsystems
+## Subsystem sftp /opt/app/openssh5/libexec/sftp-server
+Subsystem sftp internal-sftp -l VERBOSE
+
+## These lines must appear at the *end* of sshd_config
+Match Group {{ sftp_group }}
+ForceCommand internal-sftp -l VERBOSE
+ChrootDirectory {{ sftp_chroot }}
+{% else -%}
+# Configuration, in case SFTP is used
+## override default of no subsystems
+## Subsystem sftp /opt/app/openssh5/libexec/sftp-server
+## These lines must appear at the *end* of sshd_config
+Match Group sftponly
+ForceCommand internal-sftp -l VERBOSE
+ChrootDirectory /sftpchroot/home/%u
+{% endif %}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/templating.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/templating.py
new file mode 100644
index 0000000..d2ab7dc
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/templating.py
@@ -0,0 +1,71 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from charmhelpers.core.hookenv import (
+ log,
+ DEBUG,
+ WARNING,
+)
+
+try:
+ from jinja2 import FileSystemLoader, Environment
+except ImportError:
+ from charmhelpers.fetch import apt_install
+ from charmhelpers.fetch import apt_update
+ apt_update(fatal=True)
+ apt_install('python-jinja2', fatal=True)
+ from jinja2 import FileSystemLoader, Environment
+
+
+# NOTE: function separated from main rendering code to facilitate easier
+# mocking in unit tests.
+def write(path, data):
+ with open(path, 'wb') as out:
+ out.write(data)
+
+
+def get_template_path(template_dir, path):
+ """Returns the template file which would be used to render the path.
+
+ The path to the template file is returned.
+ :param template_dir: the directory the templates are located in
+ :param path: the file path to be written to.
+ :returns: path to the template file
+ """
+ return os.path.join(template_dir, os.path.basename(path))
+
+
+def render_and_write(template_dir, path, context):
+ """Renders the specified template into the file.
+
+ :param template_dir: the directory to load the template from
+ :param path: the path to write the templated contents to
+ :param context: the parameters to pass to the rendering engine
+ """
+ env = Environment(loader=FileSystemLoader(template_dir))
+ template_file = os.path.basename(path)
+ template = env.get_template(template_file)
+ log('Rendering from template: %s' % template.name, level=DEBUG)
+ rendered_content = template.render(context)
+ if not rendered_content:
+ log("Render returned None - skipping '%s'" % path,
+ level=WARNING)
+ return
+
+ write(path, rendered_content.encode('utf-8').strip())
+ log('Wrote template %s' % path, level=DEBUG)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/utils.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/utils.py
new file mode 100644
index 0000000..a6743a4
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/utils.py
@@ -0,0 +1,157 @@
+# Copyright 2016 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import glob
+import grp
+import os
+import pwd
+import six
+import yaml
+
+from charmhelpers.core.hookenv import (
+ log,
+ DEBUG,
+ INFO,
+ WARNING,
+ ERROR,
+)
+
+
+# Global settings cache. Since each hook fire entails a fresh module import it
+# is safe to hold this in memory and not risk missing config changes (since
+# they will result in a new hook fire and thus re-import).
+__SETTINGS__ = {}
+
+
+def _get_defaults(modules):
+ """Load the default config for the provided modules.
+
+ :param modules: stack modules config defaults to lookup.
+ :returns: modules default config dictionary.
+ """
+ default = os.path.join(os.path.dirname(__file__),
+ 'defaults/%s.yaml' % (modules))
+ return yaml.safe_load(open(default))
+
+
+def _get_schema(modules):
+ """Load the config schema for the provided modules.
+
+ NOTE: this schema is intended to have 1-1 relationship with they keys in
+ the default config and is used a means to verify valid overrides provided
+ by the user.
+
+ :param modules: stack modules config schema to lookup.
+ :returns: modules default schema dictionary.
+ """
+ schema = os.path.join(os.path.dirname(__file__),
+ 'defaults/%s.yaml.schema' % (modules))
+ return yaml.safe_load(open(schema))
+
+
+def _get_user_provided_overrides(modules):
+ """Load user-provided config overrides.
+
+ :param modules: stack modules to lookup in user overrides yaml file.
+ :returns: overrides dictionary.
+ """
+ overrides = os.path.join(os.environ['JUJU_CHARM_DIR'],
+ 'hardening.yaml')
+ if os.path.exists(overrides):
+ log("Found user-provided config overrides file '%s'" %
+ (overrides), level=DEBUG)
+ settings = yaml.safe_load(open(overrides))
+ if settings and settings.get(modules):
+ log("Applying '%s' overrides" % (modules), level=DEBUG)
+ return settings.get(modules)
+
+ log("No overrides found for '%s'" % (modules), level=DEBUG)
+ else:
+ log("No hardening config overrides file '%s' found in charm "
+ "root dir" % (overrides), level=DEBUG)
+
+ return {}
+
+
+def _apply_overrides(settings, overrides, schema):
+ """Get overrides config overlayed onto modules defaults.
+
+ :param modules: require stack modules config.
+ :returns: dictionary of modules config with user overrides applied.
+ """
+ if overrides:
+ for k, v in six.iteritems(overrides):
+ if k in schema:
+ if schema[k] is None:
+ settings[k] = v
+ elif type(schema[k]) is dict:
+ settings[k] = _apply_overrides(settings[k], overrides[k],
+ schema[k])
+ else:
+ raise Exception("Unexpected type found in schema '%s'" %
+ type(schema[k]), level=ERROR)
+ else:
+ log("Unknown override key '%s' - ignoring" % (k), level=INFO)
+
+ return settings
+
+
+def get_settings(modules):
+ global __SETTINGS__
+ if modules in __SETTINGS__:
+ return __SETTINGS__[modules]
+
+ schema = _get_schema(modules)
+ settings = _get_defaults(modules)
+ overrides = _get_user_provided_overrides(modules)
+ __SETTINGS__[modules] = _apply_overrides(settings, overrides, schema)
+ return __SETTINGS__[modules]
+
+
+def ensure_permissions(path, user, group, permissions, maxdepth=-1):
+ """Ensure permissions for path.
+
+ If path is a file, apply to file and return. If path is a directory,
+ apply recursively (if required) to directory contents and return.
+
+ :param user: user name
+ :param group: group name
+ :param permissions: octal permissions
+ :param maxdepth: maximum recursion depth. A negative maxdepth allows
+ infinite recursion and maxdepth=0 means no recursion.
+ :returns: None
+ """
+ if not os.path.exists(path):
+ log("File '%s' does not exist - cannot set permissions" % (path),
+ level=WARNING)
+ return
+
+ _user = pwd.getpwnam(user)
+ os.chown(path, _user.pw_uid, grp.getgrnam(group).gr_gid)
+ os.chmod(path, permissions)
+
+ if maxdepth == 0:
+ log("Max recursion depth reached - skipping further recursion",
+ level=DEBUG)
+ return
+ elif maxdepth > 0:
+ maxdepth -= 1
+
+ if os.path.isdir(path):
+ contents = glob.glob("%s/*" % (path))
+ for c in contents:
+ ensure_permissions(c, user=user, group=group,
+ permissions=permissions, maxdepth=maxdepth)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/network/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/network/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/network/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/network/ip.py b/charms/trusty/ceilometer/charmhelpers/contrib/network/ip.py
new file mode 100644
index 0000000..6bba07b
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/network/ip.py
@@ -0,0 +1,499 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import glob
+import re
+import subprocess
+import six
+import socket
+
+from functools import partial
+
+from charmhelpers.core.hookenv import unit_get
+from charmhelpers.fetch import apt_install, apt_update
+from charmhelpers.core.hookenv import (
+ log,
+ WARNING,
+)
+
+try:
+ import netifaces
+except ImportError:
+ apt_update(fatal=True)
+ apt_install('python-netifaces', fatal=True)
+ import netifaces
+
+try:
+ import netaddr
+except ImportError:
+ apt_update(fatal=True)
+ apt_install('python-netaddr', fatal=True)
+ import netaddr
+
+
+def _validate_cidr(network):
+ try:
+ netaddr.IPNetwork(network)
+ except (netaddr.core.AddrFormatError, ValueError):
+ raise ValueError("Network (%s) is not in CIDR presentation format" %
+ network)
+
+
+def no_ip_found_error_out(network):
+ errmsg = ("No IP address found in network(s): %s" % network)
+ raise ValueError(errmsg)
+
+
+def get_address_in_network(network, fallback=None, fatal=False):
+ """Get an IPv4 or IPv6 address within the network from the host.
+
+ :param network (str): CIDR presentation format. For example,
+ '192.168.1.0/24'. Supports multiple networks as a space-delimited list.
+ :param fallback (str): If no address is found, return fallback.
+ :param fatal (boolean): If no address is found, fallback is not
+ set and fatal is True then exit(1).
+ """
+ if network is None:
+ if fallback is not None:
+ return fallback
+
+ if fatal:
+ no_ip_found_error_out(network)
+ else:
+ return None
+
+ networks = network.split() or [network]
+ for network in networks:
+ _validate_cidr(network)
+ network = netaddr.IPNetwork(network)
+ for iface in netifaces.interfaces():
+ addresses = netifaces.ifaddresses(iface)
+ if network.version == 4 and netifaces.AF_INET in addresses:
+ addr = addresses[netifaces.AF_INET][0]['addr']
+ netmask = addresses[netifaces.AF_INET][0]['netmask']
+ cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
+ if cidr in network:
+ return str(cidr.ip)
+
+ if network.version == 6 and netifaces.AF_INET6 in addresses:
+ for addr in addresses[netifaces.AF_INET6]:
+ if not addr['addr'].startswith('fe80'):
+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
+ addr['netmask']))
+ if cidr in network:
+ return str(cidr.ip)
+
+ if fallback is not None:
+ return fallback
+
+ if fatal:
+ no_ip_found_error_out(network)
+
+ return None
+
+
+def is_ipv6(address):
+ """Determine whether provided address is IPv6 or not."""
+ try:
+ address = netaddr.IPAddress(address)
+ except netaddr.AddrFormatError:
+ # probably a hostname - so not an address at all!
+ return False
+
+ return address.version == 6
+
+
+def is_address_in_network(network, address):
+ """
+ Determine whether the provided address is within a network range.
+
+ :param network (str): CIDR presentation format. For example,
+ '192.168.1.0/24'.
+ :param address: An individual IPv4 or IPv6 address without a net
+ mask or subnet prefix. For example, '192.168.1.1'.
+ :returns boolean: Flag indicating whether address is in network.
+ """
+ try:
+ network = netaddr.IPNetwork(network)
+ except (netaddr.core.AddrFormatError, ValueError):
+ raise ValueError("Network (%s) is not in CIDR presentation format" %
+ network)
+
+ try:
+ address = netaddr.IPAddress(address)
+ except (netaddr.core.AddrFormatError, ValueError):
+ raise ValueError("Address (%s) is not in correct presentation format" %
+ address)
+
+ if address in network:
+ return True
+ else:
+ return False
+
+
+def _get_for_address(address, key):
+ """Retrieve an attribute of or the physical interface that
+ the IP address provided could be bound to.
+
+ :param address (str): An individual IPv4 or IPv6 address without a net
+ mask or subnet prefix. For example, '192.168.1.1'.
+ :param key: 'iface' for the physical interface name or an attribute
+ of the configured interface, for example 'netmask'.
+ :returns str: Requested attribute or None if address is not bindable.
+ """
+ address = netaddr.IPAddress(address)
+ for iface in netifaces.interfaces():
+ addresses = netifaces.ifaddresses(iface)
+ if address.version == 4 and netifaces.AF_INET in addresses:
+ addr = addresses[netifaces.AF_INET][0]['addr']
+ netmask = addresses[netifaces.AF_INET][0]['netmask']
+ network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
+ cidr = network.cidr
+ if address in cidr:
+ if key == 'iface':
+ return iface
+ else:
+ return addresses[netifaces.AF_INET][0][key]
+
+ if address.version == 6 and netifaces.AF_INET6 in addresses:
+ for addr in addresses[netifaces.AF_INET6]:
+ if not addr['addr'].startswith('fe80'):
+ network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
+ addr['netmask']))
+ cidr = network.cidr
+ if address in cidr:
+ if key == 'iface':
+ return iface
+ elif key == 'netmask' and cidr:
+ return str(cidr).split('/')[1]
+ else:
+ return addr[key]
+
+ return None
+
+
+get_iface_for_address = partial(_get_for_address, key='iface')
+
+
+get_netmask_for_address = partial(_get_for_address, key='netmask')
+
+
+def resolve_network_cidr(ip_address):
+ '''
+ Resolves the full address cidr of an ip_address based on
+ configured network interfaces
+ '''
+ netmask = get_netmask_for_address(ip_address)
+ return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
+
+
+def format_ipv6_addr(address):
+ """If address is IPv6, wrap it in '[]' otherwise return None.
+
+ This is required by most configuration files when specifying IPv6
+ addresses.
+ """
+ if is_ipv6(address):
+ return "[%s]" % address
+
+ return None
+
+
+def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
+ fatal=True, exc_list=None):
+ """Return the assigned IP address for a given interface, if any.
+
+ :param iface: network interface on which address(es) are expected to
+ be found.
+ :param inet_type: inet address family
+ :param inc_aliases: include alias interfaces in search
+ :param fatal: if True, raise exception if address not found
+ :param exc_list: list of addresses to ignore
+ :return: list of ip addresses
+ """
+ # Extract nic if passed /dev/ethX
+ if '/' in iface:
+ iface = iface.split('/')[-1]
+
+ if not exc_list:
+ exc_list = []
+
+ try:
+ inet_num = getattr(netifaces, inet_type)
+ except AttributeError:
+ raise Exception("Unknown inet type '%s'" % str(inet_type))
+
+ interfaces = netifaces.interfaces()
+ if inc_aliases:
+ ifaces = []
+ for _iface in interfaces:
+ if iface == _iface or _iface.split(':')[0] == iface:
+ ifaces.append(_iface)
+
+ if fatal and not ifaces:
+ raise Exception("Invalid interface '%s'" % iface)
+
+ ifaces.sort()
+ else:
+ if iface not in interfaces:
+ if fatal:
+ raise Exception("Interface '%s' not found " % (iface))
+ else:
+ return []
+
+ else:
+ ifaces = [iface]
+
+ addresses = []
+ for netiface in ifaces:
+ net_info = netifaces.ifaddresses(netiface)
+ if inet_num in net_info:
+ for entry in net_info[inet_num]:
+ if 'addr' in entry and entry['addr'] not in exc_list:
+ addresses.append(entry['addr'])
+
+ if fatal and not addresses:
+ raise Exception("Interface '%s' doesn't have any %s addresses." %
+ (iface, inet_type))
+
+ return sorted(addresses)
+
+
+get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
+
+
+def get_iface_from_addr(addr):
+ """Work out on which interface the provided address is configured."""
+ for iface in netifaces.interfaces():
+ addresses = netifaces.ifaddresses(iface)
+ for inet_type in addresses:
+ for _addr in addresses[inet_type]:
+ _addr = _addr['addr']
+ # link local
+ ll_key = re.compile("(.+)%.*")
+ raw = re.match(ll_key, _addr)
+ if raw:
+ _addr = raw.group(1)
+
+ if _addr == addr:
+ log("Address '%s' is configured on iface '%s'" %
+ (addr, iface))
+ return iface
+
+ msg = "Unable to infer net iface on which '%s' is configured" % (addr)
+ raise Exception(msg)
+
+
+def sniff_iface(f):
+ """Ensure decorated function is called with a value for iface.
+
+ If no iface provided, inject net iface inferred from unit private address.
+ """
+ def iface_sniffer(*args, **kwargs):
+ if not kwargs.get('iface', None):
+ kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
+
+ return f(*args, **kwargs)
+
+ return iface_sniffer
+
+
+@sniff_iface
+def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
+ dynamic_only=True):
+ """Get assigned IPv6 address for a given interface.
+
+ Returns list of addresses found. If no address found, returns empty list.
+
+ If iface is None, we infer the current primary interface by doing a reverse
+ lookup on the unit private-address.
+
+ We currently only support scope global IPv6 addresses i.e. non-temporary
+ addresses. If no global IPv6 address is found, return the first one found
+ in the ipv6 address list.
+
+ :param iface: network interface on which ipv6 address(es) are expected to
+ be found.
+ :param inc_aliases: include alias interfaces in search
+ :param fatal: if True, raise exception if address not found
+ :param exc_list: list of addresses to ignore
+ :param dynamic_only: only recognise dynamic addresses
+ :return: list of ipv6 addresses
+ """
+ addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
+ inc_aliases=inc_aliases, fatal=fatal,
+ exc_list=exc_list)
+
+ if addresses:
+ global_addrs = []
+ for addr in addresses:
+ key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
+ m = re.match(key_scope_link_local, addr)
+ if m:
+ eui_64_mac = m.group(1)
+ iface = m.group(2)
+ else:
+ global_addrs.append(addr)
+
+ if global_addrs:
+ # Make sure any found global addresses are not temporary
+ cmd = ['ip', 'addr', 'show', iface]
+ out = subprocess.check_output(cmd).decode('UTF-8')
+ if dynamic_only:
+ key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*")
+ else:
+ key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
+
+ addrs = []
+ for line in out.split('\n'):
+ line = line.strip()
+ m = re.match(key, line)
+ if m and 'temporary' not in line:
+ # Return the first valid address we find
+ for addr in global_addrs:
+ if m.group(1) == addr:
+ if not dynamic_only or \
+ m.group(1).endswith(eui_64_mac):
+ addrs.append(addr)
+
+ if addrs:
+ return addrs
+
+ if fatal:
+ raise Exception("Interface '%s' does not have a scope global "
+ "non-temporary ipv6 address." % iface)
+
+ return []
+
+
+def get_bridges(vnic_dir='/sys/devices/virtual/net'):
+ """Return a list of bridges on the system."""
+ b_regex = "%s/*/bridge" % vnic_dir
+ return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
+
+
+def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
+ """Return a list of nics comprising a given bridge on the system."""
+ brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
+ return [x.split('/')[-1] for x in glob.glob(brif_regex)]
+
+
+def is_bridge_member(nic):
+ """Check if a given nic is a member of a bridge."""
+ for bridge in get_bridges():
+ if nic in get_bridge_nics(bridge):
+ return True
+
+ return False
+
+
+def is_ip(address):
+ """
+ Returns True if address is a valid IP address.
+ """
+ try:
+ # Test to see if already an IPv4 address
+ socket.inet_aton(address)
+ return True
+ except socket.error:
+ return False
+
+
+def ns_query(address):
+ try:
+ import dns.resolver
+ except ImportError:
+ apt_install('python-dnspython')
+ import dns.resolver
+
+ if isinstance(address, dns.name.Name):
+ rtype = 'PTR'
+ elif isinstance(address, six.string_types):
+ rtype = 'A'
+ else:
+ return None
+
+ answers = dns.resolver.query(address, rtype)
+ if answers:
+ return str(answers[0])
+ return None
+
+
+def get_host_ip(hostname, fallback=None):
+ """
+ Resolves the IP for a given hostname, or returns
+ the input if it is already an IP.
+ """
+ if is_ip(hostname):
+ return hostname
+
+ ip_addr = ns_query(hostname)
+ if not ip_addr:
+ try:
+ ip_addr = socket.gethostbyname(hostname)
+ except:
+ log("Failed to resolve hostname '%s'" % (hostname),
+ level=WARNING)
+ return fallback
+ return ip_addr
+
+
+def get_hostname(address, fqdn=True):
+ """
+ Resolves hostname for given IP, or returns the input
+ if it is already a hostname.
+ """
+ if is_ip(address):
+ try:
+ import dns.reversename
+ except ImportError:
+ apt_install("python-dnspython")
+ import dns.reversename
+
+ rev = dns.reversename.from_address(address)
+ result = ns_query(rev)
+
+ if not result:
+ try:
+ result = socket.gethostbyaddr(address)[0]
+ except:
+ return None
+ else:
+ result = address
+
+ if fqdn:
+ # strip trailing .
+ if result.endswith('.'):
+ return result[:-1]
+ else:
+ return result
+ else:
+ return result.split('.')[0]
+
+
+def port_has_listener(address, port):
+ """
+ Returns True if the address:port is open and being listened to,
+ else False.
+
+ @param address: an IP address or hostname
+ @param port: integer port
+
+ Note calls 'zc' via a subprocess shell
+ """
+ cmd = ['nc', '-z', address, str(port)]
+ result = subprocess.call(cmd)
+ return not(bool(result))
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/alternatives.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/alternatives.py
new file mode 100644
index 0000000..ef77caf
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/alternatives.py
@@ -0,0 +1,33 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+''' Helper for managing alternatives for file conflict resolution '''
+
+import subprocess
+import shutil
+import os
+
+
+def install_alternative(name, target, source, priority=50):
+ ''' Install alternative configuration '''
+ if (os.path.exists(target) and not os.path.islink(target)):
+ # Move existing file/directory away before installing
+ shutil.move(target, '{}.bak'.format(target))
+ cmd = [
+ 'update-alternatives', '--force', '--install',
+ target, name, source, str(priority)
+ ]
+ subprocess.check_call(cmd)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/deployment.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/deployment.py
new file mode 100644
index 0000000..d21c9c7
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -0,0 +1,304 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import logging
+import re
+import sys
+import six
+from collections import OrderedDict
+from charmhelpers.contrib.amulet.deployment import (
+ AmuletDeployment
+)
+
+DEBUG = logging.DEBUG
+ERROR = logging.ERROR
+
+
+class OpenStackAmuletDeployment(AmuletDeployment):
+ """OpenStack amulet deployment.
+
+ This class inherits from AmuletDeployment and has additional support
+ that is specifically for use by OpenStack charms.
+ """
+
+ def __init__(self, series=None, openstack=None, source=None,
+ stable=True, log_level=DEBUG):
+ """Initialize the deployment environment."""
+ super(OpenStackAmuletDeployment, self).__init__(series)
+ self.log = self.get_logger(level=log_level)
+ self.log.info('OpenStackAmuletDeployment: init')
+ self.openstack = openstack
+ self.source = source
+ self.stable = stable
+ # Note(coreycb): this needs to be changed when new next branches come
+ # out.
+ self.current_next = "trusty"
+
+ def get_logger(self, name="deployment-logger", level=logging.DEBUG):
+ """Get a logger object that will log to stdout."""
+ log = logging
+ logger = log.getLogger(name)
+ fmt = log.Formatter("%(asctime)s %(funcName)s "
+ "%(levelname)s: %(message)s")
+
+ handler = log.StreamHandler(stream=sys.stdout)
+ handler.setLevel(level)
+ handler.setFormatter(fmt)
+
+ logger.addHandler(handler)
+ logger.setLevel(level)
+
+ return logger
+
+ def _determine_branch_locations(self, other_services):
+ """Determine the branch locations for the other services.
+
+ Determine if the local branch being tested is derived from its
+ stable or next (dev) branch, and based on this, use the corresonding
+ stable or next branches for the other_services."""
+
+ self.log.info('OpenStackAmuletDeployment: determine branch locations')
+
+ # Charms outside the lp:~openstack-charmers namespace
+ base_charms = ['mysql', 'mongodb', 'nrpe']
+
+ # Force these charms to current series even when using an older series.
+ # ie. Use trusty/nrpe even when series is precise, as the P charm
+ # does not possess the necessary external master config and hooks.
+ force_series_current = ['nrpe']
+
+ if self.series in ['precise', 'trusty']:
+ base_series = self.series
+ else:
+ base_series = self.current_next
+
+ for svc in other_services:
+ if svc['name'] in force_series_current:
+ base_series = self.current_next
+ # If a location has been explicitly set, use it
+ if svc.get('location'):
+ continue
+ if self.stable:
+ temp = 'lp:charms/{}/{}'
+ svc['location'] = temp.format(base_series,
+ svc['name'])
+ else:
+ if svc['name'] in base_charms:
+ temp = 'lp:charms/{}/{}'
+ svc['location'] = temp.format(base_series,
+ svc['name'])
+ else:
+ temp = 'lp:~openstack-charmers/charms/{}/{}/next'
+ svc['location'] = temp.format(self.current_next,
+ svc['name'])
+
+ return other_services
+
+ def _add_services(self, this_service, other_services):
+ """Add services to the deployment and set openstack-origin/source."""
+ self.log.info('OpenStackAmuletDeployment: adding services')
+
+ other_services = self._determine_branch_locations(other_services)
+
+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
+ other_services)
+
+ services = other_services
+ services.append(this_service)
+
+ # Charms which should use the source config option
+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
+ 'ceph-osd', 'ceph-radosgw', 'ceph-mon']
+
+ # Charms which can not use openstack-origin, ie. many subordinates
+ no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
+ 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
+ 'cinder-backup', 'nexentaedge-data',
+ 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
+ 'cinder-nexentaedge', 'nexentaedge-mgmt']
+
+ if self.openstack:
+ for svc in services:
+ if svc['name'] not in use_source + no_origin:
+ config = {'openstack-origin': self.openstack}
+ self.d.configure(svc['name'], config)
+
+ if self.source:
+ for svc in services:
+ if svc['name'] in use_source and svc['name'] not in no_origin:
+ config = {'source': self.source}
+ self.d.configure(svc['name'], config)
+
+ def _configure_services(self, configs):
+ """Configure all of the services."""
+ self.log.info('OpenStackAmuletDeployment: configure services')
+ for service, config in six.iteritems(configs):
+ self.d.configure(service, config)
+
+ def _auto_wait_for_status(self, message=None, exclude_services=None,
+ include_only=None, timeout=1800):
+ """Wait for all units to have a specific extended status, except
+ for any defined as excluded. Unless specified via message, any
+ status containing any case of 'ready' will be considered a match.
+
+ Examples of message usage:
+
+ Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
+ message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
+
+ Wait for all units to reach this status (exact match):
+ message = re.compile('^Unit is ready and clustered$')
+
+ Wait for all units to reach any one of these (exact match):
+ message = re.compile('Unit is ready|OK|Ready')
+
+ Wait for at least one unit to reach this status (exact match):
+ message = {'ready'}
+
+ See Amulet's sentry.wait_for_messages() for message usage detail.
+ https://github.com/juju/amulet/blob/master/amulet/sentry.py
+
+ :param message: Expected status match
+ :param exclude_services: List of juju service names to ignore,
+ not to be used in conjuction with include_only.
+ :param include_only: List of juju service names to exclusively check,
+ not to be used in conjuction with exclude_services.
+ :param timeout: Maximum time in seconds to wait for status match
+ :returns: None. Raises if timeout is hit.
+ """
+ self.log.info('Waiting for extended status on units...')
+
+ all_services = self.d.services.keys()
+
+ if exclude_services and include_only:
+ raise ValueError('exclude_services can not be used '
+ 'with include_only')
+
+ if message:
+ if isinstance(message, re._pattern_type):
+ match = message.pattern
+ else:
+ match = message
+
+ self.log.debug('Custom extended status wait match: '
+ '{}'.format(match))
+ else:
+ self.log.debug('Default extended status wait match: contains '
+ 'READY (case-insensitive)')
+ message = re.compile('.*ready.*', re.IGNORECASE)
+
+ if exclude_services:
+ self.log.debug('Excluding services from extended status match: '
+ '{}'.format(exclude_services))
+ else:
+ exclude_services = []
+
+ if include_only:
+ services = include_only
+ else:
+ services = list(set(all_services) - set(exclude_services))
+
+ self.log.debug('Waiting up to {}s for extended status on services: '
+ '{}'.format(timeout, services))
+ service_messages = {service: message for service in services}
+ self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
+ self.log.info('OK')
+
+ def _get_openstack_release(self):
+ """Get openstack release.
+
+ Return an integer representing the enum value of the openstack
+ release.
+ """
+ # Must be ordered by OpenStack release (not by Ubuntu release):
+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
+ self.precise_havana, self.precise_icehouse,
+ self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
+ self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
+ self.wily_liberty, self.trusty_mitaka,
+ self.xenial_mitaka) = range(14)
+
+ releases = {
+ ('precise', None): self.precise_essex,
+ ('precise', 'cloud:precise-folsom'): self.precise_folsom,
+ ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
+ ('precise', 'cloud:precise-havana'): self.precise_havana,
+ ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
+ ('trusty', None): self.trusty_icehouse,
+ ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
+ ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
+ ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
+ ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
+ ('utopic', None): self.utopic_juno,
+ ('vivid', None): self.vivid_kilo,
+ ('wily', None): self.wily_liberty,
+ ('xenial', None): self.xenial_mitaka}
+ return releases[(self.series, self.openstack)]
+
+ def _get_openstack_release_string(self):
+ """Get openstack release string.
+
+ Return a string representing the openstack release.
+ """
+ releases = OrderedDict([
+ ('precise', 'essex'),
+ ('quantal', 'folsom'),
+ ('raring', 'grizzly'),
+ ('saucy', 'havana'),
+ ('trusty', 'icehouse'),
+ ('utopic', 'juno'),
+ ('vivid', 'kilo'),
+ ('wily', 'liberty'),
+ ('xenial', 'mitaka'),
+ ])
+ if self.openstack:
+ os_origin = self.openstack.split(':')[1]
+ return os_origin.split('%s-' % self.series)[1].split('/')[0]
+ else:
+ return releases[self.series]
+
+ def get_ceph_expected_pools(self, radosgw=False):
+ """Return a list of expected ceph pools in a ceph + cinder + glance
+ test scenario, based on OpenStack release and whether ceph radosgw
+ is flagged as present or not."""
+
+ if self._get_openstack_release() >= self.trusty_kilo:
+ # Kilo or later
+ pools = [
+ 'rbd',
+ 'cinder',
+ 'glance'
+ ]
+ else:
+ # Juno or earlier
+ pools = [
+ 'data',
+ 'metadata',
+ 'rbd',
+ 'cinder',
+ 'glance'
+ ]
+
+ if radosgw:
+ pools.extend([
+ '.rgw.root',
+ '.rgw.control',
+ '.rgw',
+ '.rgw.gc',
+ '.users.uid'
+ ])
+
+ return pools
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/utils.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/utils.py
new file mode 100644
index 0000000..ef3bdcc
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/utils.py
@@ -0,0 +1,1012 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import amulet
+import json
+import logging
+import os
+import re
+import six
+import time
+import urllib
+
+import cinderclient.v1.client as cinder_client
+import glanceclient.v1.client as glance_client
+import heatclient.v1.client as heat_client
+import keystoneclient.v2_0 as keystone_client
+from keystoneclient.auth.identity import v3 as keystone_id_v3
+from keystoneclient import session as keystone_session
+from keystoneclient.v3 import client as keystone_client_v3
+
+import novaclient.client as nova_client
+import pika
+import swiftclient
+
+from charmhelpers.contrib.amulet.utils import (
+ AmuletUtils
+)
+
+DEBUG = logging.DEBUG
+ERROR = logging.ERROR
+
+NOVA_CLIENT_VERSION = "2"
+
+
+class OpenStackAmuletUtils(AmuletUtils):
+ """OpenStack amulet utilities.
+
+ This class inherits from AmuletUtils and has additional support
+ that is specifically for use by OpenStack charm tests.
+ """
+
+ def __init__(self, log_level=ERROR):
+ """Initialize the deployment environment."""
+ super(OpenStackAmuletUtils, self).__init__(log_level)
+
+ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
+ public_port, expected):
+ """Validate endpoint data.
+
+ Validate actual endpoint data vs expected endpoint data. The ports
+ are used to find the matching endpoint.
+ """
+ self.log.debug('Validating endpoint data...')
+ self.log.debug('actual: {}'.format(repr(endpoints)))
+ found = False
+ for ep in endpoints:
+ self.log.debug('endpoint: {}'.format(repr(ep)))
+ if (admin_port in ep.adminurl and
+ internal_port in ep.internalurl and
+ public_port in ep.publicurl):
+ found = True
+ actual = {'id': ep.id,
+ 'region': ep.region,
+ 'adminurl': ep.adminurl,
+ 'internalurl': ep.internalurl,
+ 'publicurl': ep.publicurl,
+ 'service_id': ep.service_id}
+ ret = self._validate_dict_data(expected, actual)
+ if ret:
+ return 'unexpected endpoint data - {}'.format(ret)
+
+ if not found:
+ return 'endpoint not found'
+
+ def validate_svc_catalog_endpoint_data(self, expected, actual):
+ """Validate service catalog endpoint data.
+
+ Validate a list of actual service catalog endpoints vs a list of
+ expected service catalog endpoints.
+ """
+ self.log.debug('Validating service catalog endpoint data...')
+ self.log.debug('actual: {}'.format(repr(actual)))
+ for k, v in six.iteritems(expected):
+ if k in actual:
+ ret = self._validate_dict_data(expected[k][0], actual[k][0])
+ if ret:
+ return self.endpoint_error(k, ret)
+ else:
+ return "endpoint {} does not exist".format(k)
+ return ret
+
+ def validate_tenant_data(self, expected, actual):
+ """Validate tenant data.
+
+ Validate a list of actual tenant data vs list of expected tenant
+ data.
+ """
+ self.log.debug('Validating tenant data...')
+ self.log.debug('actual: {}'.format(repr(actual)))
+ for e in expected:
+ found = False
+ for act in actual:
+ a = {'enabled': act.enabled, 'description': act.description,
+ 'name': act.name, 'id': act.id}
+ if e['name'] == a['name']:
+ found = True
+ ret = self._validate_dict_data(e, a)
+ if ret:
+ return "unexpected tenant data - {}".format(ret)
+ if not found:
+ return "tenant {} does not exist".format(e['name'])
+ return ret
+
+ def validate_role_data(self, expected, actual):
+ """Validate role data.
+
+ Validate a list of actual role data vs a list of expected role
+ data.
+ """
+ self.log.debug('Validating role data...')
+ self.log.debug('actual: {}'.format(repr(actual)))
+ for e in expected:
+ found = False
+ for act in actual:
+ a = {'name': act.name, 'id': act.id}
+ if e['name'] == a['name']:
+ found = True
+ ret = self._validate_dict_data(e, a)
+ if ret:
+ return "unexpected role data - {}".format(ret)
+ if not found:
+ return "role {} does not exist".format(e['name'])
+ return ret
+
+ def validate_user_data(self, expected, actual, api_version=None):
+ """Validate user data.
+
+ Validate a list of actual user data vs a list of expected user
+ data.
+ """
+ self.log.debug('Validating user data...')
+ self.log.debug('actual: {}'.format(repr(actual)))
+ for e in expected:
+ found = False
+ for act in actual:
+ if e['name'] == act.name:
+ a = {'enabled': act.enabled, 'name': act.name,
+ 'email': act.email, 'id': act.id}
+ if api_version == 3:
+ a['default_project_id'] = getattr(act,
+ 'default_project_id',
+ 'none')
+ else:
+ a['tenantId'] = act.tenantId
+ found = True
+ ret = self._validate_dict_data(e, a)
+ if ret:
+ return "unexpected user data - {}".format(ret)
+ if not found:
+ return "user {} does not exist".format(e['name'])
+ return ret
+
+ def validate_flavor_data(self, expected, actual):
+ """Validate flavor data.
+
+ Validate a list of actual flavors vs a list of expected flavors.
+ """
+ self.log.debug('Validating flavor data...')
+ self.log.debug('actual: {}'.format(repr(actual)))
+ act = [a.name for a in actual]
+ return self._validate_list_data(expected, act)
+
+ def tenant_exists(self, keystone, tenant):
+ """Return True if tenant exists."""
+ self.log.debug('Checking if tenant exists ({})...'.format(tenant))
+ return tenant in [t.name for t in keystone.tenants.list()]
+
+ def authenticate_cinder_admin(self, keystone_sentry, username,
+ password, tenant):
+ """Authenticates admin user with cinder."""
+ # NOTE(beisner): cinder python client doesn't accept tokens.
+ service_ip = \
+ keystone_sentry.relation('shared-db',
+ 'mysql:shared-db')['private-address']
+ ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
+ return cinder_client.Client(username, password, tenant, ept)
+
+ def authenticate_keystone_admin(self, keystone_sentry, user, password,
+ tenant=None, api_version=None,
+ keystone_ip=None):
+ """Authenticates admin user with the keystone admin endpoint."""
+ self.log.debug('Authenticating keystone admin...')
+ unit = keystone_sentry
+ if not keystone_ip:
+ keystone_ip = unit.relation('shared-db',
+ 'mysql:shared-db')['private-address']
+ base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8'))
+ if not api_version or api_version == 2:
+ ep = base_ep + "/v2.0"
+ return keystone_client.Client(username=user, password=password,
+ tenant_name=tenant, auth_url=ep)
+ else:
+ ep = base_ep + "/v3"
+ auth = keystone_id_v3.Password(
+ user_domain_name='admin_domain',
+ username=user,
+ password=password,
+ domain_name='admin_domain',
+ auth_url=ep,
+ )
+ sess = keystone_session.Session(auth=auth)
+ return keystone_client_v3.Client(session=sess)
+
+ def authenticate_keystone_user(self, keystone, user, password, tenant):
+ """Authenticates a regular user with the keystone public endpoint."""
+ self.log.debug('Authenticating keystone user ({})...'.format(user))
+ ep = keystone.service_catalog.url_for(service_type='identity',
+ endpoint_type='publicURL')
+ return keystone_client.Client(username=user, password=password,
+ tenant_name=tenant, auth_url=ep)
+
+ def authenticate_glance_admin(self, keystone):
+ """Authenticates admin user with glance."""
+ self.log.debug('Authenticating glance admin...')
+ ep = keystone.service_catalog.url_for(service_type='image',
+ endpoint_type='adminURL')
+ return glance_client.Client(ep, token=keystone.auth_token)
+
+ def authenticate_heat_admin(self, keystone):
+ """Authenticates the admin user with heat."""
+ self.log.debug('Authenticating heat admin...')
+ ep = keystone.service_catalog.url_for(service_type='orchestration',
+ endpoint_type='publicURL')
+ return heat_client.Client(endpoint=ep, token=keystone.auth_token)
+
+ def authenticate_nova_user(self, keystone, user, password, tenant):
+ """Authenticates a regular user with nova-api."""
+ self.log.debug('Authenticating nova user ({})...'.format(user))
+ ep = keystone.service_catalog.url_for(service_type='identity',
+ endpoint_type='publicURL')
+ return nova_client.Client(NOVA_CLIENT_VERSION,
+ username=user, api_key=password,
+ project_id=tenant, auth_url=ep)
+
+ def authenticate_swift_user(self, keystone, user, password, tenant):
+ """Authenticates a regular user with swift api."""
+ self.log.debug('Authenticating swift user ({})...'.format(user))
+ ep = keystone.service_catalog.url_for(service_type='identity',
+ endpoint_type='publicURL')
+ return swiftclient.Connection(authurl=ep,
+ user=user,
+ key=password,
+ tenant_name=tenant,
+ auth_version='2.0')
+
+ def create_cirros_image(self, glance, image_name):
+ """Download the latest cirros image and upload it to glance,
+ validate and return a resource pointer.
+
+ :param glance: pointer to authenticated glance connection
+ :param image_name: display name for new image
+ :returns: glance image pointer
+ """
+ self.log.debug('Creating glance cirros image '
+ '({})...'.format(image_name))
+
+ # Download cirros image
+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
+ if http_proxy:
+ proxies = {'http': http_proxy}
+ opener = urllib.FancyURLopener(proxies)
+ else:
+ opener = urllib.FancyURLopener()
+
+ f = opener.open('http://download.cirros-cloud.net/version/released')
+ version = f.read().strip()
+ cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
+ local_path = os.path.join('tests', cirros_img)
+
+ if not os.path.exists(local_path):
+ cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
+ version, cirros_img)
+ opener.retrieve(cirros_url, local_path)
+ f.close()
+
+ # Create glance image
+ with open(local_path) as f:
+ image = glance.images.create(name=image_name, is_public=True,
+ disk_format='qcow2',
+ container_format='bare', data=f)
+
+ # Wait for image to reach active status
+ img_id = image.id
+ ret = self.resource_reaches_status(glance.images, img_id,
+ expected_stat='active',
+ msg='Image status wait')
+ if not ret:
+ msg = 'Glance image failed to reach expected state.'
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ # Re-validate new image
+ self.log.debug('Validating image attributes...')
+ val_img_name = glance.images.get(img_id).name
+ val_img_stat = glance.images.get(img_id).status
+ val_img_pub = glance.images.get(img_id).is_public
+ val_img_cfmt = glance.images.get(img_id).container_format
+ val_img_dfmt = glance.images.get(img_id).disk_format
+ msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
+ 'container fmt:{} disk fmt:{}'.format(
+ val_img_name, val_img_pub, img_id,
+ val_img_stat, val_img_cfmt, val_img_dfmt))
+
+ if val_img_name == image_name and val_img_stat == 'active' \
+ and val_img_pub is True and val_img_cfmt == 'bare' \
+ and val_img_dfmt == 'qcow2':
+ self.log.debug(msg_attr)
+ else:
+ msg = ('Volume validation failed, {}'.format(msg_attr))
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ return image
+
+ def delete_image(self, glance, image):
+ """Delete the specified image."""
+
+ # /!\ DEPRECATION WARNING
+ self.log.warn('/!\\ DEPRECATION WARNING: use '
+ 'delete_resource instead of delete_image.')
+ self.log.debug('Deleting glance image ({})...'.format(image))
+ return self.delete_resource(glance.images, image, msg='glance image')
+
+ def create_instance(self, nova, image_name, instance_name, flavor):
+ """Create the specified instance."""
+ self.log.debug('Creating instance '
+ '({}|{}|{})'.format(instance_name, image_name, flavor))
+ image = nova.images.find(name=image_name)
+ flavor = nova.flavors.find(name=flavor)
+ instance = nova.servers.create(name=instance_name, image=image,
+ flavor=flavor)
+
+ count = 1
+ status = instance.status
+ while status != 'ACTIVE' and count < 60:
+ time.sleep(3)
+ instance = nova.servers.get(instance.id)
+ status = instance.status
+ self.log.debug('instance status: {}'.format(status))
+ count += 1
+
+ if status != 'ACTIVE':
+ self.log.error('instance creation timed out')
+ return None
+
+ return instance
+
+ def delete_instance(self, nova, instance):
+ """Delete the specified instance."""
+
+ # /!\ DEPRECATION WARNING
+ self.log.warn('/!\\ DEPRECATION WARNING: use '
+ 'delete_resource instead of delete_instance.')
+ self.log.debug('Deleting instance ({})...'.format(instance))
+ return self.delete_resource(nova.servers, instance,
+ msg='nova instance')
+
+ def create_or_get_keypair(self, nova, keypair_name="testkey"):
+ """Create a new keypair, or return pointer if it already exists."""
+ try:
+ _keypair = nova.keypairs.get(keypair_name)
+ self.log.debug('Keypair ({}) already exists, '
+ 'using it.'.format(keypair_name))
+ return _keypair
+ except:
+ self.log.debug('Keypair ({}) does not exist, '
+ 'creating it.'.format(keypair_name))
+
+ _keypair = nova.keypairs.create(name=keypair_name)
+ return _keypair
+
+ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
+ img_id=None, src_vol_id=None, snap_id=None):
+ """Create cinder volume, optionally from a glance image, OR
+ optionally as a clone of an existing volume, OR optionally
+ from a snapshot. Wait for the new volume status to reach
+ the expected status, validate and return a resource pointer.
+
+ :param vol_name: cinder volume display name
+ :param vol_size: size in gigabytes
+ :param img_id: optional glance image id
+ :param src_vol_id: optional source volume id to clone
+ :param snap_id: optional snapshot id to use
+ :returns: cinder volume pointer
+ """
+ # Handle parameter input and avoid impossible combinations
+ if img_id and not src_vol_id and not snap_id:
+ # Create volume from image
+ self.log.debug('Creating cinder volume from glance image...')
+ bootable = 'true'
+ elif src_vol_id and not img_id and not snap_id:
+ # Clone an existing volume
+ self.log.debug('Cloning cinder volume...')
+ bootable = cinder.volumes.get(src_vol_id).bootable
+ elif snap_id and not src_vol_id and not img_id:
+ # Create volume from snapshot
+ self.log.debug('Creating cinder volume from snapshot...')
+ snap = cinder.volume_snapshots.find(id=snap_id)
+ vol_size = snap.size
+ snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
+ bootable = cinder.volumes.get(snap_vol_id).bootable
+ elif not img_id and not src_vol_id and not snap_id:
+ # Create volume
+ self.log.debug('Creating cinder volume...')
+ bootable = 'false'
+ else:
+ # Impossible combination of parameters
+ msg = ('Invalid method use - name:{} size:{} img_id:{} '
+ 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
+ img_id, src_vol_id,
+ snap_id))
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ # Create new volume
+ try:
+ vol_new = cinder.volumes.create(display_name=vol_name,
+ imageRef=img_id,
+ size=vol_size,
+ source_volid=src_vol_id,
+ snapshot_id=snap_id)
+ vol_id = vol_new.id
+ except Exception as e:
+ msg = 'Failed to create volume: {}'.format(e)
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ # Wait for volume to reach available status
+ ret = self.resource_reaches_status(cinder.volumes, vol_id,
+ expected_stat="available",
+ msg="Volume status wait")
+ if not ret:
+ msg = 'Cinder volume failed to reach expected state.'
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ # Re-validate new volume
+ self.log.debug('Validating volume attributes...')
+ val_vol_name = cinder.volumes.get(vol_id).display_name
+ val_vol_boot = cinder.volumes.get(vol_id).bootable
+ val_vol_stat = cinder.volumes.get(vol_id).status
+ val_vol_size = cinder.volumes.get(vol_id).size
+ msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
+ '{} size:{}'.format(val_vol_name, vol_id,
+ val_vol_stat, val_vol_boot,
+ val_vol_size))
+
+ if val_vol_boot == bootable and val_vol_stat == 'available' \
+ and val_vol_name == vol_name and val_vol_size == vol_size:
+ self.log.debug(msg_attr)
+ else:
+ msg = ('Volume validation failed, {}'.format(msg_attr))
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ return vol_new
+
+ def delete_resource(self, resource, resource_id,
+ msg="resource", max_wait=120):
+ """Delete one openstack resource, such as one instance, keypair,
+ image, volume, stack, etc., and confirm deletion within max wait time.
+
+ :param resource: pointer to os resource type, ex:glance_client.images
+ :param resource_id: unique name or id for the openstack resource
+ :param msg: text to identify purpose in logging
+ :param max_wait: maximum wait time in seconds
+ :returns: True if successful, otherwise False
+ """
+ self.log.debug('Deleting OpenStack resource '
+ '{} ({})'.format(resource_id, msg))
+ num_before = len(list(resource.list()))
+ resource.delete(resource_id)
+
+ tries = 0
+ num_after = len(list(resource.list()))
+ while num_after != (num_before - 1) and tries < (max_wait / 4):
+ self.log.debug('{} delete check: '
+ '{} [{}:{}] {}'.format(msg, tries,
+ num_before,
+ num_after,
+ resource_id))
+ time.sleep(4)
+ num_after = len(list(resource.list()))
+ tries += 1
+
+ self.log.debug('{}: expected, actual count = {}, '
+ '{}'.format(msg, num_before - 1, num_after))
+
+ if num_after == (num_before - 1):
+ return True
+ else:
+ self.log.error('{} delete timed out'.format(msg))
+ return False
+
+ def resource_reaches_status(self, resource, resource_id,
+ expected_stat='available',
+ msg='resource', max_wait=120):
+ """Wait for an openstack resources status to reach an
+ expected status within a specified time. Useful to confirm that
+ nova instances, cinder vols, snapshots, glance images, heat stacks
+ and other resources eventually reach the expected status.
+
+ :param resource: pointer to os resource type, ex: heat_client.stacks
+ :param resource_id: unique id for the openstack resource
+ :param expected_stat: status to expect resource to reach
+ :param msg: text to identify purpose in logging
+ :param max_wait: maximum wait time in seconds
+ :returns: True if successful, False if status is not reached
+ """
+
+ tries = 0
+ resource_stat = resource.get(resource_id).status
+ while resource_stat != expected_stat and tries < (max_wait / 4):
+ self.log.debug('{} status check: '
+ '{} [{}:{}] {}'.format(msg, tries,
+ resource_stat,
+ expected_stat,
+ resource_id))
+ time.sleep(4)
+ resource_stat = resource.get(resource_id).status
+ tries += 1
+
+ self.log.debug('{}: expected, actual status = {}, '
+ '{}'.format(msg, resource_stat, expected_stat))
+
+ if resource_stat == expected_stat:
+ return True
+ else:
+ self.log.debug('{} never reached expected status: '
+ '{}'.format(resource_id, expected_stat))
+ return False
+
+ def get_ceph_osd_id_cmd(self, index):
+ """Produce a shell command that will return a ceph-osd id."""
+ return ("`initctl list | grep 'ceph-osd ' | "
+ "awk 'NR=={} {{ print $2 }}' | "
+ "grep -o '[0-9]*'`".format(index + 1))
+
+ def get_ceph_pools(self, sentry_unit):
+ """Return a dict of ceph pools from a single ceph unit, with
+ pool name as keys, pool id as vals."""
+ pools = {}
+ cmd = 'sudo ceph osd lspools'
+ output, code = sentry_unit.run(cmd)
+ if code != 0:
+ msg = ('{} `{}` returned {} '
+ '{}'.format(sentry_unit.info['unit_name'],
+ cmd, code, output))
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
+ for pool in str(output).split(','):
+ pool_id_name = pool.split(' ')
+ if len(pool_id_name) == 2:
+ pool_id = pool_id_name[0]
+ pool_name = pool_id_name[1]
+ pools[pool_name] = int(pool_id)
+
+ self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
+ pools))
+ return pools
+
+ def get_ceph_df(self, sentry_unit):
+ """Return dict of ceph df json output, including ceph pool state.
+
+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
+ :returns: Dict of ceph df output
+ """
+ cmd = 'sudo ceph df --format=json'
+ output, code = sentry_unit.run(cmd)
+ if code != 0:
+ msg = ('{} `{}` returned {} '
+ '{}'.format(sentry_unit.info['unit_name'],
+ cmd, code, output))
+ amulet.raise_status(amulet.FAIL, msg=msg)
+ return json.loads(output)
+
+ def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
+ """Take a sample of attributes of a ceph pool, returning ceph
+ pool name, object count and disk space used for the specified
+ pool ID number.
+
+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
+ :param pool_id: Ceph pool ID
+ :returns: List of pool name, object count, kb disk space used
+ """
+ df = self.get_ceph_df(sentry_unit)
+ pool_name = df['pools'][pool_id]['name']
+ obj_count = df['pools'][pool_id]['stats']['objects']
+ kb_used = df['pools'][pool_id]['stats']['kb_used']
+ self.log.debug('Ceph {} pool (ID {}): {} objects, '
+ '{} kb used'.format(pool_name, pool_id,
+ obj_count, kb_used))
+ return pool_name, obj_count, kb_used
+
+ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
+ """Validate ceph pool samples taken over time, such as pool
+ object counts or pool kb used, before adding, after adding, and
+ after deleting items which affect those pool attributes. The
+ 2nd element is expected to be greater than the 1st; 3rd is expected
+ to be less than the 2nd.
+
+ :param samples: List containing 3 data samples
+ :param sample_type: String for logging and usage context
+ :returns: None if successful, Failure message otherwise
+ """
+ original, created, deleted = range(3)
+ if samples[created] <= samples[original] or \
+ samples[deleted] >= samples[created]:
+ return ('Ceph {} samples ({}) '
+ 'unexpected.'.format(sample_type, samples))
+ else:
+ self.log.debug('Ceph {} samples (OK): '
+ '{}'.format(sample_type, samples))
+ return None
+
+ # rabbitmq/amqp specific helpers:
+
+ def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
+ """Wait for rmq units extended status to show cluster readiness,
+ after an optional initial sleep period. Initial sleep is likely
+ necessary to be effective following a config change, as status
+ message may not instantly update to non-ready."""
+
+ if init_sleep:
+ time.sleep(init_sleep)
+
+ message = re.compile('^Unit is ready and clustered$')
+ deployment._auto_wait_for_status(message=message,
+ timeout=timeout,
+ include_only=['rabbitmq-server'])
+
+ def add_rmq_test_user(self, sentry_units,
+ username="testuser1", password="changeme"):
+ """Add a test user via the first rmq juju unit, check connection as
+ the new user against all sentry units.
+
+ :param sentry_units: list of sentry unit pointers
+ :param username: amqp user name, default to testuser1
+ :param password: amqp user password
+ :returns: None if successful. Raise on error.
+ """
+ self.log.debug('Adding rmq user ({})...'.format(username))
+
+ # Check that user does not already exist
+ cmd_user_list = 'rabbitmqctl list_users'
+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
+ if username in output:
+ self.log.warning('User ({}) already exists, returning '
+ 'gracefully.'.format(username))
+ return
+
+ perms = '".*" ".*" ".*"'
+ cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
+ 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
+
+ # Add user via first unit
+ for cmd in cmds:
+ output, _ = self.run_cmd_unit(sentry_units[0], cmd)
+
+ # Check connection against the other sentry_units
+ self.log.debug('Checking user connect against units...')
+ for sentry_unit in sentry_units:
+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
+ username=username,
+ password=password)
+ connection.close()
+
+ def delete_rmq_test_user(self, sentry_units, username="testuser1"):
+ """Delete a rabbitmq user via the first rmq juju unit.
+
+ :param sentry_units: list of sentry unit pointers
+ :param username: amqp user name, default to testuser1
+ :param password: amqp user password
+ :returns: None if successful or no such user.
+ """
+ self.log.debug('Deleting rmq user ({})...'.format(username))
+
+ # Check that the user exists
+ cmd_user_list = 'rabbitmqctl list_users'
+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
+
+ if username not in output:
+ self.log.warning('User ({}) does not exist, returning '
+ 'gracefully.'.format(username))
+ return
+
+ # Delete the user
+ cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
+
+ def get_rmq_cluster_status(self, sentry_unit):
+ """Execute rabbitmq cluster status command on a unit and return
+ the full output.
+
+ :param unit: sentry unit
+ :returns: String containing console output of cluster status command
+ """
+ cmd = 'rabbitmqctl cluster_status'
+ output, _ = self.run_cmd_unit(sentry_unit, cmd)
+ self.log.debug('{} cluster_status:\n{}'.format(
+ sentry_unit.info['unit_name'], output))
+ return str(output)
+
+ def get_rmq_cluster_running_nodes(self, sentry_unit):
+ """Parse rabbitmqctl cluster_status output string, return list of
+ running rabbitmq cluster nodes.
+
+ :param unit: sentry unit
+ :returns: List containing node names of running nodes
+ """
+ # NOTE(beisner): rabbitmqctl cluster_status output is not
+ # json-parsable, do string chop foo, then json.loads that.
+ str_stat = self.get_rmq_cluster_status(sentry_unit)
+ if 'running_nodes' in str_stat:
+ pos_start = str_stat.find("{running_nodes,") + 15
+ pos_end = str_stat.find("]},", pos_start) + 1
+ str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
+ run_nodes = json.loads(str_run_nodes)
+ return run_nodes
+ else:
+ return []
+
+ def validate_rmq_cluster_running_nodes(self, sentry_units):
+ """Check that all rmq unit hostnames are represented in the
+ cluster_status output of all units.
+
+ :param host_names: dict of juju unit names to host names
+ :param units: list of sentry unit pointers (all rmq units)
+ :returns: None if successful, otherwise return error message
+ """
+ host_names = self.get_unit_hostnames(sentry_units)
+ errors = []
+
+ # Query every unit for cluster_status running nodes
+ for query_unit in sentry_units:
+ query_unit_name = query_unit.info['unit_name']
+ running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
+
+ # Confirm that every unit is represented in the queried unit's
+ # cluster_status running nodes output.
+ for validate_unit in sentry_units:
+ val_host_name = host_names[validate_unit.info['unit_name']]
+ val_node_name = 'rabbit@{}'.format(val_host_name)
+
+ if val_node_name not in running_nodes:
+ errors.append('Cluster member check failed on {}: {} not '
+ 'in {}\n'.format(query_unit_name,
+ val_node_name,
+ running_nodes))
+ if errors:
+ return ''.join(errors)
+
+ def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
+ """Check a single juju rmq unit for ssl and port in the config file."""
+ host = sentry_unit.info['public-address']
+ unit_name = sentry_unit.info['unit_name']
+
+ conf_file = '/etc/rabbitmq/rabbitmq.config'
+ conf_contents = str(self.file_contents_safe(sentry_unit,
+ conf_file, max_wait=16))
+ # Checks
+ conf_ssl = 'ssl' in conf_contents
+ conf_port = str(port) in conf_contents
+
+ # Port explicitly checked in config
+ if port and conf_port and conf_ssl:
+ self.log.debug('SSL is enabled @{}:{} '
+ '({})'.format(host, port, unit_name))
+ return True
+ elif port and not conf_port and conf_ssl:
+ self.log.debug('SSL is enabled @{} but not on port {} '
+ '({})'.format(host, port, unit_name))
+ return False
+ # Port not checked (useful when checking that ssl is disabled)
+ elif not port and conf_ssl:
+ self.log.debug('SSL is enabled @{}:{} '
+ '({})'.format(host, port, unit_name))
+ return True
+ elif not conf_ssl:
+ self.log.debug('SSL not enabled @{}:{} '
+ '({})'.format(host, port, unit_name))
+ return False
+ else:
+ msg = ('Unknown condition when checking SSL status @{}:{} '
+ '({})'.format(host, port, unit_name))
+ amulet.raise_status(amulet.FAIL, msg)
+
+ def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
+ """Check that ssl is enabled on rmq juju sentry units.
+
+ :param sentry_units: list of all rmq sentry units
+ :param port: optional ssl port override to validate
+ :returns: None if successful, otherwise return error message
+ """
+ for sentry_unit in sentry_units:
+ if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
+ return ('Unexpected condition: ssl is disabled on unit '
+ '({})'.format(sentry_unit.info['unit_name']))
+ return None
+
+ def validate_rmq_ssl_disabled_units(self, sentry_units):
+ """Check that ssl is enabled on listed rmq juju sentry units.
+
+ :param sentry_units: list of all rmq sentry units
+ :returns: True if successful. Raise on error.
+ """
+ for sentry_unit in sentry_units:
+ if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
+ return ('Unexpected condition: ssl is enabled on unit '
+ '({})'.format(sentry_unit.info['unit_name']))
+ return None
+
+ def configure_rmq_ssl_on(self, sentry_units, deployment,
+ port=None, max_wait=60):
+ """Turn ssl charm config option on, with optional non-default
+ ssl port specification. Confirm that it is enabled on every
+ unit.
+
+ :param sentry_units: list of sentry units
+ :param deployment: amulet deployment object pointer
+ :param port: amqp port, use defaults if None
+ :param max_wait: maximum time to wait in seconds to confirm
+ :returns: None if successful. Raise on error.
+ """
+ self.log.debug('Setting ssl charm config option: on')
+
+ # Enable RMQ SSL
+ config = {'ssl': 'on'}
+ if port:
+ config['ssl_port'] = port
+
+ deployment.d.configure('rabbitmq-server', config)
+
+ # Wait for unit status
+ self.rmq_wait_for_cluster(deployment)
+
+ # Confirm
+ tries = 0
+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
+ while ret and tries < (max_wait / 4):
+ time.sleep(4)
+ self.log.debug('Attempt {}: {}'.format(tries, ret))
+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
+ tries += 1
+
+ if ret:
+ amulet.raise_status(amulet.FAIL, ret)
+
+ def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
+ """Turn ssl charm config option off, confirm that it is disabled
+ on every unit.
+
+ :param sentry_units: list of sentry units
+ :param deployment: amulet deployment object pointer
+ :param max_wait: maximum time to wait in seconds to confirm
+ :returns: None if successful. Raise on error.
+ """
+ self.log.debug('Setting ssl charm config option: off')
+
+ # Disable RMQ SSL
+ config = {'ssl': 'off'}
+ deployment.d.configure('rabbitmq-server', config)
+
+ # Wait for unit status
+ self.rmq_wait_for_cluster(deployment)
+
+ # Confirm
+ tries = 0
+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
+ while ret and tries < (max_wait / 4):
+ time.sleep(4)
+ self.log.debug('Attempt {}: {}'.format(tries, ret))
+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
+ tries += 1
+
+ if ret:
+ amulet.raise_status(amulet.FAIL, ret)
+
+ def connect_amqp_by_unit(self, sentry_unit, ssl=False,
+ port=None, fatal=True,
+ username="testuser1", password="changeme"):
+ """Establish and return a pika amqp connection to the rabbitmq service
+ running on a rmq juju unit.
+
+ :param sentry_unit: sentry unit pointer
+ :param ssl: boolean, default to False
+ :param port: amqp port, use defaults if None
+ :param fatal: boolean, default to True (raises on connect error)
+ :param username: amqp user name, default to testuser1
+ :param password: amqp user password
+ :returns: pika amqp connection pointer or None if failed and non-fatal
+ """
+ host = sentry_unit.info['public-address']
+ unit_name = sentry_unit.info['unit_name']
+
+ # Default port logic if port is not specified
+ if ssl and not port:
+ port = 5671
+ elif not ssl and not port:
+ port = 5672
+
+ self.log.debug('Connecting to amqp on {}:{} ({}) as '
+ '{}...'.format(host, port, unit_name, username))
+
+ try:
+ credentials = pika.PlainCredentials(username, password)
+ parameters = pika.ConnectionParameters(host=host, port=port,
+ credentials=credentials,
+ ssl=ssl,
+ connection_attempts=3,
+ retry_delay=5,
+ socket_timeout=1)
+ connection = pika.BlockingConnection(parameters)
+ assert connection.server_properties['product'] == 'RabbitMQ'
+ self.log.debug('Connect OK')
+ return connection
+ except Exception as e:
+ msg = ('amqp connection failed to {}:{} as '
+ '{} ({})'.format(host, port, username, str(e)))
+ if fatal:
+ amulet.raise_status(amulet.FAIL, msg)
+ else:
+ self.log.warn(msg)
+ return None
+
+ def publish_amqp_message_by_unit(self, sentry_unit, message,
+ queue="test", ssl=False,
+ username="testuser1",
+ password="changeme",
+ port=None):
+ """Publish an amqp message to a rmq juju unit.
+
+ :param sentry_unit: sentry unit pointer
+ :param message: amqp message string
+ :param queue: message queue, default to test
+ :param username: amqp user name, default to testuser1
+ :param password: amqp user password
+ :param ssl: boolean, default to False
+ :param port: amqp port, use defaults if None
+ :returns: None. Raises exception if publish failed.
+ """
+ self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
+ message))
+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
+ port=port,
+ username=username,
+ password=password)
+
+ # NOTE(beisner): extra debug here re: pika hang potential:
+ # https://github.com/pika/pika/issues/297
+ # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
+ self.log.debug('Defining channel...')
+ channel = connection.channel()
+ self.log.debug('Declaring queue...')
+ channel.queue_declare(queue=queue, auto_delete=False, durable=True)
+ self.log.debug('Publishing message...')
+ channel.basic_publish(exchange='', routing_key=queue, body=message)
+ self.log.debug('Closing channel...')
+ channel.close()
+ self.log.debug('Closing connection...')
+ connection.close()
+
+ def get_amqp_message_by_unit(self, sentry_unit, queue="test",
+ username="testuser1",
+ password="changeme",
+ ssl=False, port=None):
+ """Get an amqp message from a rmq juju unit.
+
+ :param sentry_unit: sentry unit pointer
+ :param queue: message queue, default to test
+ :param username: amqp user name, default to testuser1
+ :param password: amqp user password
+ :param ssl: boolean, default to False
+ :param port: amqp port, use defaults if None
+ :returns: amqp message body as string. Raise if get fails.
+ """
+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
+ port=port,
+ username=username,
+ password=password)
+ channel = connection.channel()
+ method_frame, _, body = channel.basic_get(queue)
+
+ if method_frame:
+ self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
+ body))
+ channel.basic_ack(method_frame.delivery_tag)
+ channel.close()
+ connection.close()
+ return body
+ else:
+ msg = 'No message retrieved.'
+ amulet.raise_status(amulet.FAIL, msg)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/context.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/context.py
new file mode 100644
index 0000000..c07b33d
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/context.py
@@ -0,0 +1,1583 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import glob
+import json
+import os
+import re
+import time
+from base64 import b64decode
+from subprocess import check_call, CalledProcessError
+
+import six
+import yaml
+
+from charmhelpers.fetch import (
+ apt_install,
+ filter_installed_packages,
+)
+from charmhelpers.core.hookenv import (
+ config,
+ is_relation_made,
+ local_unit,
+ log,
+ relation_get,
+ relation_ids,
+ related_units,
+ relation_set,
+ unit_get,
+ unit_private_ip,
+ charm_name,
+ DEBUG,
+ INFO,
+ WARNING,
+ ERROR,
+ status_set,
+)
+
+from charmhelpers.core.sysctl import create as sysctl_create
+from charmhelpers.core.strutils import bool_from_string
+
+from charmhelpers.core.host import (
+ get_bond_master,
+ is_phy_iface,
+ list_nics,
+ get_nic_hwaddr,
+ mkdir,
+ write_file,
+ pwgen,
+)
+from charmhelpers.contrib.hahelpers.cluster import (
+ determine_apache_port,
+ determine_api_port,
+ https,
+ is_clustered,
+)
+from charmhelpers.contrib.hahelpers.apache import (
+ get_cert,
+ get_ca_cert,
+ install_ca_cert,
+)
+from charmhelpers.contrib.openstack.neutron import (
+ neutron_plugin_attribute,
+ parse_data_port_mappings,
+)
+from charmhelpers.contrib.openstack.ip import (
+ resolve_address,
+ INTERNAL,
+)
+from charmhelpers.contrib.network.ip import (
+ get_address_in_network,
+ get_ipv4_addr,
+ get_ipv6_addr,
+ get_netmask_for_address,
+ format_ipv6_addr,
+ is_address_in_network,
+ is_bridge_member,
+)
+from charmhelpers.contrib.openstack.utils import get_host_ip
+from charmhelpers.core.unitdata import kv
+
+try:
+ import psutil
+except ImportError:
+ apt_install('python-psutil', fatal=True)
+ import psutil
+
+CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
+ADDRESS_TYPES = ['admin', 'internal', 'public']
+
+
+class OSContextError(Exception):
+ pass
+
+
+def ensure_packages(packages):
+ """Install but do not upgrade required plugin packages."""
+ required = filter_installed_packages(packages)
+ if required:
+ apt_install(required, fatal=True)
+
+
+def context_complete(ctxt):
+ _missing = []
+ for k, v in six.iteritems(ctxt):
+ if v is None or v == '':
+ _missing.append(k)
+
+ if _missing:
+ log('Missing required data: %s' % ' '.join(_missing), level=INFO)
+ return False
+
+ return True
+
+
+def config_flags_parser(config_flags):
+ """Parses config flags string into dict.
+
+ This parsing method supports a few different formats for the config
+ flag values to be parsed:
+
+ 1. A string in the simple format of key=value pairs, with the possibility
+ of specifying multiple key value pairs within the same string. For
+ example, a string in the format of 'key1=value1, key2=value2' will
+ return a dict of:
+
+ {'key1': 'value1',
+ 'key2': 'value2'}.
+
+ 2. A string in the above format, but supporting a comma-delimited list
+ of values for the same key. For example, a string in the format of
+ 'key1=value1, key2=value3,value4,value5' will return a dict of:
+
+ {'key1', 'value1',
+ 'key2', 'value2,value3,value4'}
+
+ 3. A string containing a colon character (:) prior to an equal
+ character (=) will be treated as yaml and parsed as such. This can be
+ used to specify more complex key value pairs. For example,
+ a string in the format of 'key1: subkey1=value1, subkey2=value2' will
+ return a dict of:
+
+ {'key1', 'subkey1=value1, subkey2=value2'}
+
+ The provided config_flags string may be a list of comma-separated values
+ which themselves may be comma-separated list of values.
+ """
+ # If we find a colon before an equals sign then treat it as yaml.
+ # Note: limit it to finding the colon first since this indicates assignment
+ # for inline yaml.
+ colon = config_flags.find(':')
+ equals = config_flags.find('=')
+ if colon > 0:
+ if colon < equals or equals < 0:
+ return yaml.safe_load(config_flags)
+
+ if config_flags.find('==') >= 0:
+ log("config_flags is not in expected format (key=value)", level=ERROR)
+ raise OSContextError
+
+ # strip the following from each value.
+ post_strippers = ' ,'
+ # we strip any leading/trailing '=' or ' ' from the string then
+ # split on '='.
+ split = config_flags.strip(' =').split('=')
+ limit = len(split)
+ flags = {}
+ for i in range(0, limit - 1):
+ current = split[i]
+ next = split[i + 1]
+ vindex = next.rfind(',')
+ if (i == limit - 2) or (vindex < 0):
+ value = next
+ else:
+ value = next[:vindex]
+
+ if i == 0:
+ key = current
+ else:
+ # if this not the first entry, expect an embedded key.
+ index = current.rfind(',')
+ if index < 0:
+ log("Invalid config value(s) at index %s" % (i), level=ERROR)
+ raise OSContextError
+ key = current[index + 1:]
+
+ # Add to collection.
+ flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
+
+ return flags
+
+
+class OSContextGenerator(object):
+ """Base class for all context generators."""
+ interfaces = []
+ related = False
+ complete = False
+ missing_data = []
+
+ def __call__(self):
+ raise NotImplementedError
+
+ def context_complete(self, ctxt):
+ """Check for missing data for the required context data.
+ Set self.missing_data if it exists and return False.
+ Set self.complete if no missing data and return True.
+ """
+ # Fresh start
+ self.complete = False
+ self.missing_data = []
+ for k, v in six.iteritems(ctxt):
+ if v is None or v == '':
+ if k not in self.missing_data:
+ self.missing_data.append(k)
+
+ if self.missing_data:
+ self.complete = False
+ log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
+ else:
+ self.complete = True
+ return self.complete
+
+ def get_related(self):
+ """Check if any of the context interfaces have relation ids.
+ Set self.related and return True if one of the interfaces
+ has relation ids.
+ """
+ # Fresh start
+ self.related = False
+ try:
+ for interface in self.interfaces:
+ if relation_ids(interface):
+ self.related = True
+ return self.related
+ except AttributeError as e:
+ log("{} {}"
+ "".format(self, e), 'INFO')
+ return self.related
+
+
+class SharedDBContext(OSContextGenerator):
+ interfaces = ['shared-db']
+
+ def __init__(self,
+ database=None, user=None, relation_prefix=None, ssl_dir=None):
+ """Allows inspecting relation for settings prefixed with
+ relation_prefix. This is useful for parsing access for multiple
+ databases returned via the shared-db interface (eg, nova_password,
+ quantum_password)
+ """
+ self.relation_prefix = relation_prefix
+ self.database = database
+ self.user = user
+ self.ssl_dir = ssl_dir
+ self.rel_name = self.interfaces[0]
+
+ def __call__(self):
+ self.database = self.database or config('database')
+ self.user = self.user or config('database-user')
+ if None in [self.database, self.user]:
+ log("Could not generate shared_db context. Missing required charm "
+ "config options. (database name and user)", level=ERROR)
+ raise OSContextError
+
+ ctxt = {}
+
+ # NOTE(jamespage) if mysql charm provides a network upon which
+ # access to the database should be made, reconfigure relation
+ # with the service units local address and defer execution
+ access_network = relation_get('access-network')
+ if access_network is not None:
+ if self.relation_prefix is not None:
+ hostname_key = "{}_hostname".format(self.relation_prefix)
+ else:
+ hostname_key = "hostname"
+ access_hostname = get_address_in_network(access_network,
+ unit_get('private-address'))
+ set_hostname = relation_get(attribute=hostname_key,
+ unit=local_unit())
+ if set_hostname != access_hostname:
+ relation_set(relation_settings={hostname_key: access_hostname})
+ return None # Defer any further hook execution for now....
+
+ password_setting = 'password'
+ if self.relation_prefix:
+ password_setting = self.relation_prefix + '_password'
+
+ for rid in relation_ids(self.interfaces[0]):
+ self.related = True
+ for unit in related_units(rid):
+ rdata = relation_get(rid=rid, unit=unit)
+ host = rdata.get('db_host')
+ host = format_ipv6_addr(host) or host
+ ctxt = {
+ 'database_host': host,
+ 'database': self.database,
+ 'database_user': self.user,
+ 'database_password': rdata.get(password_setting),
+ 'database_type': 'mysql'
+ }
+ if self.context_complete(ctxt):
+ db_ssl(rdata, ctxt, self.ssl_dir)
+ return ctxt
+ return {}
+
+
+class PostgresqlDBContext(OSContextGenerator):
+ interfaces = ['pgsql-db']
+
+ def __init__(self, database=None):
+ self.database = database
+
+ def __call__(self):
+ self.database = self.database or config('database')
+ if self.database is None:
+ log('Could not generate postgresql_db context. Missing required '
+ 'charm config options. (database name)', level=ERROR)
+ raise OSContextError
+
+ ctxt = {}
+ for rid in relation_ids(self.interfaces[0]):
+ self.related = True
+ for unit in related_units(rid):
+ rel_host = relation_get('host', rid=rid, unit=unit)
+ rel_user = relation_get('user', rid=rid, unit=unit)
+ rel_passwd = relation_get('password', rid=rid, unit=unit)
+ ctxt = {'database_host': rel_host,
+ 'database': self.database,
+ 'database_user': rel_user,
+ 'database_password': rel_passwd,
+ 'database_type': 'postgresql'}
+ if self.context_complete(ctxt):
+ return ctxt
+
+ return {}
+
+
+def db_ssl(rdata, ctxt, ssl_dir):
+ if 'ssl_ca' in rdata and ssl_dir:
+ ca_path = os.path.join(ssl_dir, 'db-client.ca')
+ with open(ca_path, 'w') as fh:
+ fh.write(b64decode(rdata['ssl_ca']))
+
+ ctxt['database_ssl_ca'] = ca_path
+ elif 'ssl_ca' in rdata:
+ log("Charm not setup for ssl support but ssl ca found", level=INFO)
+ return ctxt
+
+ if 'ssl_cert' in rdata:
+ cert_path = os.path.join(
+ ssl_dir, 'db-client.cert')
+ if not os.path.exists(cert_path):
+ log("Waiting 1m for ssl client cert validity", level=INFO)
+ time.sleep(60)
+
+ with open(cert_path, 'w') as fh:
+ fh.write(b64decode(rdata['ssl_cert']))
+
+ ctxt['database_ssl_cert'] = cert_path
+ key_path = os.path.join(ssl_dir, 'db-client.key')
+ with open(key_path, 'w') as fh:
+ fh.write(b64decode(rdata['ssl_key']))
+
+ ctxt['database_ssl_key'] = key_path
+
+ return ctxt
+
+
+class IdentityServiceContext(OSContextGenerator):
+
+ def __init__(self, service=None, service_user=None, rel_name='identity-service'):
+ self.service = service
+ self.service_user = service_user
+ self.rel_name = rel_name
+ self.interfaces = [self.rel_name]
+
+ def __call__(self):
+ log('Generating template context for ' + self.rel_name, level=DEBUG)
+ ctxt = {}
+
+ if self.service and self.service_user:
+ # This is required for pki token signing if we don't want /tmp to
+ # be used.
+ cachedir = '/var/cache/%s' % (self.service)
+ if not os.path.isdir(cachedir):
+ log("Creating service cache dir %s" % (cachedir), level=DEBUG)
+ mkdir(path=cachedir, owner=self.service_user,
+ group=self.service_user, perms=0o700)
+
+ ctxt['signing_dir'] = cachedir
+
+ for rid in relation_ids(self.rel_name):
+ self.related = True
+ for unit in related_units(rid):
+ rdata = relation_get(rid=rid, unit=unit)
+ serv_host = rdata.get('service_host')
+ serv_host = format_ipv6_addr(serv_host) or serv_host
+ auth_host = rdata.get('auth_host')
+ auth_host = format_ipv6_addr(auth_host) or auth_host
+ svc_protocol = rdata.get('service_protocol') or 'http'
+ auth_protocol = rdata.get('auth_protocol') or 'http'
+ api_version = rdata.get('api_version') or '2.0'
+ ctxt.update({'service_port': rdata.get('service_port'),
+ 'service_host': serv_host,
+ 'auth_host': auth_host,
+ 'auth_port': rdata.get('auth_port'),
+ 'admin_tenant_name': rdata.get('service_tenant'),
+ 'admin_user': rdata.get('service_username'),
+ 'admin_password': rdata.get('service_password'),
+ 'service_protocol': svc_protocol,
+ 'auth_protocol': auth_protocol,
+ 'api_version': api_version})
+
+ if self.context_complete(ctxt):
+ # NOTE(jamespage) this is required for >= icehouse
+ # so a missing value just indicates keystone needs
+ # upgrading
+ ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
+ return ctxt
+
+ return {}
+
+
+class AMQPContext(OSContextGenerator):
+
+ def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
+ self.ssl_dir = ssl_dir
+ self.rel_name = rel_name
+ self.relation_prefix = relation_prefix
+ self.interfaces = [rel_name]
+
+ def __call__(self):
+ log('Generating template context for amqp', level=DEBUG)
+ conf = config()
+ if self.relation_prefix:
+ user_setting = '%s-rabbit-user' % (self.relation_prefix)
+ vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
+ else:
+ user_setting = 'rabbit-user'
+ vhost_setting = 'rabbit-vhost'
+
+ try:
+ username = conf[user_setting]
+ vhost = conf[vhost_setting]
+ except KeyError as e:
+ log('Could not generate shared_db context. Missing required charm '
+ 'config options: %s.' % e, level=ERROR)
+ raise OSContextError
+
+ ctxt = {}
+ for rid in relation_ids(self.rel_name):
+ ha_vip_only = False
+ self.related = True
+ for unit in related_units(rid):
+ if relation_get('clustered', rid=rid, unit=unit):
+ ctxt['clustered'] = True
+ vip = relation_get('vip', rid=rid, unit=unit)
+ vip = format_ipv6_addr(vip) or vip
+ ctxt['rabbitmq_host'] = vip
+ else:
+ host = relation_get('private-address', rid=rid, unit=unit)
+ host = format_ipv6_addr(host) or host
+ ctxt['rabbitmq_host'] = host
+
+ ctxt.update({
+ 'rabbitmq_user': username,
+ 'rabbitmq_password': relation_get('password', rid=rid,
+ unit=unit),
+ 'rabbitmq_virtual_host': vhost,
+ })
+
+ ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
+ if ssl_port:
+ ctxt['rabbit_ssl_port'] = ssl_port
+
+ ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
+ if ssl_ca:
+ ctxt['rabbit_ssl_ca'] = ssl_ca
+
+ if relation_get('ha_queues', rid=rid, unit=unit) is not None:
+ ctxt['rabbitmq_ha_queues'] = True
+
+ ha_vip_only = relation_get('ha-vip-only',
+ rid=rid, unit=unit) is not None
+
+ if self.context_complete(ctxt):
+ if 'rabbit_ssl_ca' in ctxt:
+ if not self.ssl_dir:
+ log("Charm not setup for ssl support but ssl ca "
+ "found", level=INFO)
+ break
+
+ ca_path = os.path.join(
+ self.ssl_dir, 'rabbit-client-ca.pem')
+ with open(ca_path, 'w') as fh:
+ fh.write(b64decode(ctxt['rabbit_ssl_ca']))
+ ctxt['rabbit_ssl_ca'] = ca_path
+
+ # Sufficient information found = break out!
+ break
+
+ # Used for active/active rabbitmq >= grizzly
+ if (('clustered' not in ctxt or ha_vip_only) and
+ len(related_units(rid)) > 1):
+ rabbitmq_hosts = []
+ for unit in related_units(rid):
+ host = relation_get('private-address', rid=rid, unit=unit)
+ host = format_ipv6_addr(host) or host
+ rabbitmq_hosts.append(host)
+
+ ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
+
+ oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
+ if oslo_messaging_flags:
+ ctxt['oslo_messaging_flags'] = config_flags_parser(
+ oslo_messaging_flags)
+
+ if not self.complete:
+ return {}
+
+ return ctxt
+
+
+class CephContext(OSContextGenerator):
+ """Generates context for /etc/ceph/ceph.conf templates."""
+ interfaces = ['ceph']
+
+ def __call__(self):
+ if not relation_ids('ceph'):
+ return {}
+
+ log('Generating template context for ceph', level=DEBUG)
+ mon_hosts = []
+ ctxt = {
+ 'use_syslog': str(config('use-syslog')).lower()
+ }
+ for rid in relation_ids('ceph'):
+ for unit in related_units(rid):
+ if not ctxt.get('auth'):
+ ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
+ if not ctxt.get('key'):
+ ctxt['key'] = relation_get('key', rid=rid, unit=unit)
+ ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
+ unit=unit)
+ unit_priv_addr = relation_get('private-address', rid=rid,
+ unit=unit)
+ ceph_addr = ceph_pub_addr or unit_priv_addr
+ ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
+ mon_hosts.append(ceph_addr)
+
+ ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
+
+ if not os.path.isdir('/etc/ceph'):
+ os.mkdir('/etc/ceph')
+
+ if not self.context_complete(ctxt):
+ return {}
+
+ ensure_packages(['ceph-common'])
+ return ctxt
+
+
+class HAProxyContext(OSContextGenerator):
+ """Provides half a context for the haproxy template, which describes
+ all peers to be included in the cluster. Each charm needs to include
+ its own context generator that describes the port mapping.
+ """
+ interfaces = ['cluster']
+
+ def __init__(self, singlenode_mode=False):
+ self.singlenode_mode = singlenode_mode
+
+ def __call__(self):
+ if not relation_ids('cluster') and not self.singlenode_mode:
+ return {}
+
+ if config('prefer-ipv6'):
+ addr = get_ipv6_addr(exc_list=[config('vip')])[0]
+ else:
+ addr = get_host_ip(unit_get('private-address'))
+
+ l_unit = local_unit().replace('/', '-')
+ cluster_hosts = {}
+
+ # NOTE(jamespage): build out map of configured network endpoints
+ # and associated backends
+ for addr_type in ADDRESS_TYPES:
+ cfg_opt = 'os-{}-network'.format(addr_type)
+ laddr = get_address_in_network(config(cfg_opt))
+ if laddr:
+ netmask = get_netmask_for_address(laddr)
+ cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
+ netmask),
+ 'backends': {l_unit: laddr}}
+ for rid in relation_ids('cluster'):
+ for unit in related_units(rid):
+ _laddr = relation_get('{}-address'.format(addr_type),
+ rid=rid, unit=unit)
+ if _laddr:
+ _unit = unit.replace('/', '-')
+ cluster_hosts[laddr]['backends'][_unit] = _laddr
+
+ # NOTE(jamespage) add backend based on private address - this
+ # with either be the only backend or the fallback if no acls
+ # match in the frontend
+ cluster_hosts[addr] = {}
+ netmask = get_netmask_for_address(addr)
+ cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
+ 'backends': {l_unit: addr}}
+ for rid in relation_ids('cluster'):
+ for unit in related_units(rid):
+ _laddr = relation_get('private-address',
+ rid=rid, unit=unit)
+ if _laddr:
+ _unit = unit.replace('/', '-')
+ cluster_hosts[addr]['backends'][_unit] = _laddr
+
+ ctxt = {
+ 'frontends': cluster_hosts,
+ 'default_backend': addr
+ }
+
+ if config('haproxy-server-timeout'):
+ ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
+
+ if config('haproxy-client-timeout'):
+ ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
+
+ if config('haproxy-queue-timeout'):
+ ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout')
+
+ if config('haproxy-connect-timeout'):
+ ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
+
+ if config('prefer-ipv6'):
+ ctxt['ipv6'] = True
+ ctxt['local_host'] = 'ip6-localhost'
+ ctxt['haproxy_host'] = '::'
+ else:
+ ctxt['local_host'] = '127.0.0.1'
+ ctxt['haproxy_host'] = '0.0.0.0'
+
+ ctxt['stat_port'] = '8888'
+
+ db = kv()
+ ctxt['stat_password'] = db.get('stat-password')
+ if not ctxt['stat_password']:
+ ctxt['stat_password'] = db.set('stat-password',
+ pwgen(32))
+ db.flush()
+
+ for frontend in cluster_hosts:
+ if (len(cluster_hosts[frontend]['backends']) > 1 or
+ self.singlenode_mode):
+ # Enable haproxy when we have enough peers.
+ log('Ensuring haproxy enabled in /etc/default/haproxy.',
+ level=DEBUG)
+ with open('/etc/default/haproxy', 'w') as out:
+ out.write('ENABLED=1\n')
+
+ return ctxt
+
+ log('HAProxy context is incomplete, this unit has no peers.',
+ level=INFO)
+ return {}
+
+
+class ImageServiceContext(OSContextGenerator):
+ interfaces = ['image-service']
+
+ def __call__(self):
+ """Obtains the glance API server from the image-service relation.
+ Useful in nova and cinder (currently).
+ """
+ log('Generating template context for image-service.', level=DEBUG)
+ rids = relation_ids('image-service')
+ if not rids:
+ return {}
+
+ for rid in rids:
+ for unit in related_units(rid):
+ api_server = relation_get('glance-api-server',
+ rid=rid, unit=unit)
+ if api_server:
+ return {'glance_api_servers': api_server}
+
+ log("ImageService context is incomplete. Missing required relation "
+ "data.", level=INFO)
+ return {}
+
+
+class ApacheSSLContext(OSContextGenerator):
+ """Generates a context for an apache vhost configuration that configures
+ HTTPS reverse proxying for one or many endpoints. Generated context
+ looks something like::
+
+ {
+ 'namespace': 'cinder',
+ 'private_address': 'iscsi.mycinderhost.com',
+ 'endpoints': [(8776, 8766), (8777, 8767)]
+ }
+
+ The endpoints list consists of a tuples mapping external ports
+ to internal ports.
+ """
+ interfaces = ['https']
+
+ # charms should inherit this context and set external ports
+ # and service namespace accordingly.
+ external_ports = []
+ service_namespace = None
+
+ def enable_modules(self):
+ cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
+ check_call(cmd)
+
+ def configure_cert(self, cn=None):
+ ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
+ mkdir(path=ssl_dir)
+ cert, key = get_cert(cn)
+ if cn:
+ cert_filename = 'cert_{}'.format(cn)
+ key_filename = 'key_{}'.format(cn)
+ else:
+ cert_filename = 'cert'
+ key_filename = 'key'
+
+ write_file(path=os.path.join(ssl_dir, cert_filename),
+ content=b64decode(cert))
+ write_file(path=os.path.join(ssl_dir, key_filename),
+ content=b64decode(key))
+
+ def configure_ca(self):
+ ca_cert = get_ca_cert()
+ if ca_cert:
+ install_ca_cert(b64decode(ca_cert))
+
+ def canonical_names(self):
+ """Figure out which canonical names clients will access this service.
+ """
+ cns = []
+ for r_id in relation_ids('identity-service'):
+ for unit in related_units(r_id):
+ rdata = relation_get(rid=r_id, unit=unit)
+ for k in rdata:
+ if k.startswith('ssl_key_'):
+ cns.append(k.lstrip('ssl_key_'))
+
+ return sorted(list(set(cns)))
+
+ def get_network_addresses(self):
+ """For each network configured, return corresponding address and vip
+ (if available).
+
+ Returns a list of tuples of the form:
+
+ [(address_in_net_a, vip_in_net_a),
+ (address_in_net_b, vip_in_net_b),
+ ...]
+
+ or, if no vip(s) available:
+
+ [(address_in_net_a, address_in_net_a),
+ (address_in_net_b, address_in_net_b),
+ ...]
+ """
+ addresses = []
+ if config('vip'):
+ vips = config('vip').split()
+ else:
+ vips = []
+
+ for net_type in ['os-internal-network', 'os-admin-network',
+ 'os-public-network']:
+ addr = get_address_in_network(config(net_type),
+ unit_get('private-address'))
+ if len(vips) > 1 and is_clustered():
+ if not config(net_type):
+ log("Multiple networks configured but net_type "
+ "is None (%s)." % net_type, level=WARNING)
+ continue
+
+ for vip in vips:
+ if is_address_in_network(config(net_type), vip):
+ addresses.append((addr, vip))
+ break
+
+ elif is_clustered() and config('vip'):
+ addresses.append((addr, config('vip')))
+ else:
+ addresses.append((addr, addr))
+
+ return sorted(addresses)
+
+ def __call__(self):
+ if isinstance(self.external_ports, six.string_types):
+ self.external_ports = [self.external_ports]
+
+ if not self.external_ports or not https():
+ return {}
+
+ self.configure_ca()
+ self.enable_modules()
+
+ ctxt = {'namespace': self.service_namespace,
+ 'endpoints': [],
+ 'ext_ports': []}
+
+ cns = self.canonical_names()
+ if cns:
+ for cn in cns:
+ self.configure_cert(cn)
+ else:
+ # Expect cert/key provided in config (currently assumed that ca
+ # uses ip for cn)
+ cn = resolve_address(endpoint_type=INTERNAL)
+ self.configure_cert(cn)
+
+ addresses = self.get_network_addresses()
+ for address, endpoint in sorted(set(addresses)):
+ for api_port in self.external_ports:
+ ext_port = determine_apache_port(api_port,
+ singlenode_mode=True)
+ int_port = determine_api_port(api_port, singlenode_mode=True)
+ portmap = (address, endpoint, int(ext_port), int(int_port))
+ ctxt['endpoints'].append(portmap)
+ ctxt['ext_ports'].append(int(ext_port))
+
+ ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
+ return ctxt
+
+
+class NeutronContext(OSContextGenerator):
+ interfaces = []
+
+ @property
+ def plugin(self):
+ return None
+
+ @property
+ def network_manager(self):
+ return None
+
+ @property
+ def packages(self):
+ return neutron_plugin_attribute(self.plugin, 'packages',
+ self.network_manager)
+
+ @property
+ def neutron_security_groups(self):
+ return None
+
+ def _ensure_packages(self):
+ for pkgs in self.packages:
+ ensure_packages(pkgs)
+
+ def _save_flag_file(self):
+ if self.network_manager == 'quantum':
+ _file = '/etc/nova/quantum_plugin.conf'
+ else:
+ _file = '/etc/nova/neutron_plugin.conf'
+
+ with open(_file, 'wb') as out:
+ out.write(self.plugin + '\n')
+
+ def ovs_ctxt(self):
+ driver = neutron_plugin_attribute(self.plugin, 'driver',
+ self.network_manager)
+ config = neutron_plugin_attribute(self.plugin, 'config',
+ self.network_manager)
+ ovs_ctxt = {'core_plugin': driver,
+ 'neutron_plugin': 'ovs',
+ 'neutron_security_groups': self.neutron_security_groups,
+ 'local_ip': unit_private_ip(),
+ 'config': config}
+
+ return ovs_ctxt
+
+ def nuage_ctxt(self):
+ driver = neutron_plugin_attribute(self.plugin, 'driver',
+ self.network_manager)
+ config = neutron_plugin_attribute(self.plugin, 'config',
+ self.network_manager)
+ nuage_ctxt = {'core_plugin': driver,
+ 'neutron_plugin': 'vsp',
+ 'neutron_security_groups': self.neutron_security_groups,
+ 'local_ip': unit_private_ip(),
+ 'config': config}
+
+ return nuage_ctxt
+
+ def nvp_ctxt(self):
+ driver = neutron_plugin_attribute(self.plugin, 'driver',
+ self.network_manager)
+ config = neutron_plugin_attribute(self.plugin, 'config',
+ self.network_manager)
+ nvp_ctxt = {'core_plugin': driver,
+ 'neutron_plugin': 'nvp',
+ 'neutron_security_groups': self.neutron_security_groups,
+ 'local_ip': unit_private_ip(),
+ 'config': config}
+
+ return nvp_ctxt
+
+ def n1kv_ctxt(self):
+ driver = neutron_plugin_attribute(self.plugin, 'driver',
+ self.network_manager)
+ n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
+ self.network_manager)
+ n1kv_user_config_flags = config('n1kv-config-flags')
+ restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
+ n1kv_ctxt = {'core_plugin': driver,
+ 'neutron_plugin': 'n1kv',
+ 'neutron_security_groups': self.neutron_security_groups,
+ 'local_ip': unit_private_ip(),
+ 'config': n1kv_config,
+ 'vsm_ip': config('n1kv-vsm-ip'),
+ 'vsm_username': config('n1kv-vsm-username'),
+ 'vsm_password': config('n1kv-vsm-password'),
+ 'restrict_policy_profiles': restrict_policy_profiles}
+
+ if n1kv_user_config_flags:
+ flags = config_flags_parser(n1kv_user_config_flags)
+ n1kv_ctxt['user_config_flags'] = flags
+
+ return n1kv_ctxt
+
+ def calico_ctxt(self):
+ driver = neutron_plugin_attribute(self.plugin, 'driver',
+ self.network_manager)
+ config = neutron_plugin_attribute(self.plugin, 'config',
+ self.network_manager)
+ calico_ctxt = {'core_plugin': driver,
+ 'neutron_plugin': 'Calico',
+ 'neutron_security_groups': self.neutron_security_groups,
+ 'local_ip': unit_private_ip(),
+ 'config': config}
+
+ return calico_ctxt
+
+ def neutron_ctxt(self):
+ if https():
+ proto = 'https'
+ else:
+ proto = 'http'
+
+ if is_clustered():
+ host = config('vip')
+ else:
+ host = unit_get('private-address')
+
+ ctxt = {'network_manager': self.network_manager,
+ 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
+ return ctxt
+
+ def pg_ctxt(self):
+ driver = neutron_plugin_attribute(self.plugin, 'driver',
+ self.network_manager)
+ config = neutron_plugin_attribute(self.plugin, 'config',
+ self.network_manager)
+ ovs_ctxt = {'core_plugin': driver,
+ 'neutron_plugin': 'plumgrid',
+ 'neutron_security_groups': self.neutron_security_groups,
+ 'local_ip': unit_private_ip(),
+ 'config': config}
+ return ovs_ctxt
+
+ def midonet_ctxt(self):
+ driver = neutron_plugin_attribute(self.plugin, 'driver',
+ self.network_manager)
+ midonet_config = neutron_plugin_attribute(self.plugin, 'config',
+ self.network_manager)
+ mido_ctxt = {'core_plugin': driver,
+ 'neutron_plugin': 'midonet',
+ 'neutron_security_groups': self.neutron_security_groups,
+ 'local_ip': unit_private_ip(),
+ 'config': midonet_config}
+
+ return mido_ctxt
+
+ def __call__(self):
+ if self.network_manager not in ['quantum', 'neutron']:
+ return {}
+
+ if not self.plugin:
+ return {}
+
+ ctxt = self.neutron_ctxt()
+
+ if self.plugin == 'ovs':
+ ctxt.update(self.ovs_ctxt())
+ elif self.plugin in ['nvp', 'nsx']:
+ ctxt.update(self.nvp_ctxt())
+ elif self.plugin == 'n1kv':
+ ctxt.update(self.n1kv_ctxt())
+ elif self.plugin == 'Calico':
+ ctxt.update(self.calico_ctxt())
+ elif self.plugin == 'vsp':
+ ctxt.update(self.nuage_ctxt())
+ elif self.plugin == 'plumgrid':
+ ctxt.update(self.pg_ctxt())
+ elif self.plugin == 'midonet':
+ ctxt.update(self.midonet_ctxt())
+
+ alchemy_flags = config('neutron-alchemy-flags')
+ if alchemy_flags:
+ flags = config_flags_parser(alchemy_flags)
+ ctxt['neutron_alchemy_flags'] = flags
+
+ self._save_flag_file()
+ return ctxt
+
+
+class NeutronPortContext(OSContextGenerator):
+
+ def resolve_ports(self, ports):
+ """Resolve NICs not yet bound to bridge(s)
+
+ If hwaddress provided then returns resolved hwaddress otherwise NIC.
+ """
+ if not ports:
+ return None
+
+ hwaddr_to_nic = {}
+ hwaddr_to_ip = {}
+ for nic in list_nics():
+ # Ignore virtual interfaces (bond masters will be identified from
+ # their slaves)
+ if not is_phy_iface(nic):
+ continue
+
+ _nic = get_bond_master(nic)
+ if _nic:
+ log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
+ level=DEBUG)
+ nic = _nic
+
+ hwaddr = get_nic_hwaddr(nic)
+ hwaddr_to_nic[hwaddr] = nic
+ addresses = get_ipv4_addr(nic, fatal=False)
+ addresses += get_ipv6_addr(iface=nic, fatal=False)
+ hwaddr_to_ip[hwaddr] = addresses
+
+ resolved = []
+ mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
+ for entry in ports:
+ if re.match(mac_regex, entry):
+ # NIC is in known NICs and does NOT hace an IP address
+ if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
+ # If the nic is part of a bridge then don't use it
+ if is_bridge_member(hwaddr_to_nic[entry]):
+ continue
+
+ # Entry is a MAC address for a valid interface that doesn't
+ # have an IP address assigned yet.
+ resolved.append(hwaddr_to_nic[entry])
+ else:
+ # If the passed entry is not a MAC address, assume it's a valid
+ # interface, and that the user put it there on purpose (we can
+ # trust it to be the real external network).
+ resolved.append(entry)
+
+ # Ensure no duplicates
+ return list(set(resolved))
+
+
+class OSConfigFlagContext(OSContextGenerator):
+ """Provides support for user-defined config flags.
+
+ Users can define a comma-seperated list of key=value pairs
+ in the charm configuration and apply them at any point in
+ any file by using a template flag.
+
+ Sometimes users might want config flags inserted within a
+ specific section so this class allows users to specify the
+ template flag name, allowing for multiple template flags
+ (sections) within the same context.
+
+ NOTE: the value of config-flags may be a comma-separated list of
+ key=value pairs and some Openstack config files support
+ comma-separated lists as values.
+ """
+
+ def __init__(self, charm_flag='config-flags',
+ template_flag='user_config_flags'):
+ """
+ :param charm_flag: config flags in charm configuration.
+ :param template_flag: insert point for user-defined flags in template
+ file.
+ """
+ super(OSConfigFlagContext, self).__init__()
+ self._charm_flag = charm_flag
+ self._template_flag = template_flag
+
+ def __call__(self):
+ config_flags = config(self._charm_flag)
+ if not config_flags:
+ return {}
+
+ return {self._template_flag:
+ config_flags_parser(config_flags)}
+
+
+class LibvirtConfigFlagsContext(OSContextGenerator):
+ """
+ This context provides support for extending
+ the libvirt section through user-defined flags.
+ """
+ def __call__(self):
+ ctxt = {}
+ libvirt_flags = config('libvirt-flags')
+ if libvirt_flags:
+ ctxt['libvirt_flags'] = config_flags_parser(
+ libvirt_flags)
+ return ctxt
+
+
+class SubordinateConfigContext(OSContextGenerator):
+
+ """
+ Responsible for inspecting relations to subordinates that
+ may be exporting required config via a json blob.
+
+ The subordinate interface allows subordinates to export their
+ configuration requirements to the principle for multiple config
+ files and multiple serivces. Ie, a subordinate that has interfaces
+ to both glance and nova may export to following yaml blob as json::
+
+ glance:
+ /etc/glance/glance-api.conf:
+ sections:
+ DEFAULT:
+ - [key1, value1]
+ /etc/glance/glance-registry.conf:
+ MYSECTION:
+ - [key2, value2]
+ nova:
+ /etc/nova/nova.conf:
+ sections:
+ DEFAULT:
+ - [key3, value3]
+
+
+ It is then up to the principle charms to subscribe this context to
+ the service+config file it is interestd in. Configuration data will
+ be available in the template context, in glance's case, as::
+
+ ctxt = {
+ ... other context ...
+ 'subordinate_configuration': {
+ 'DEFAULT': {
+ 'key1': 'value1',
+ },
+ 'MYSECTION': {
+ 'key2': 'value2',
+ },
+ }
+ }
+ """
+
+ def __init__(self, service, config_file, interface):
+ """
+ :param service : Service name key to query in any subordinate
+ data found
+ :param config_file : Service's config file to query sections
+ :param interface : Subordinate interface to inspect
+ """
+ self.config_file = config_file
+ if isinstance(service, list):
+ self.services = service
+ else:
+ self.services = [service]
+ if isinstance(interface, list):
+ self.interfaces = interface
+ else:
+ self.interfaces = [interface]
+
+ def __call__(self):
+ ctxt = {'sections': {}}
+ rids = []
+ for interface in self.interfaces:
+ rids.extend(relation_ids(interface))
+ for rid in rids:
+ for unit in related_units(rid):
+ sub_config = relation_get('subordinate_configuration',
+ rid=rid, unit=unit)
+ if sub_config and sub_config != '':
+ try:
+ sub_config = json.loads(sub_config)
+ except:
+ log('Could not parse JSON from '
+ 'subordinate_configuration setting from %s'
+ % rid, level=ERROR)
+ continue
+
+ for service in self.services:
+ if service not in sub_config:
+ log('Found subordinate_configuration on %s but it '
+ 'contained nothing for %s service'
+ % (rid, service), level=INFO)
+ continue
+
+ sub_config = sub_config[service]
+ if self.config_file not in sub_config:
+ log('Found subordinate_configuration on %s but it '
+ 'contained nothing for %s'
+ % (rid, self.config_file), level=INFO)
+ continue
+
+ sub_config = sub_config[self.config_file]
+ for k, v in six.iteritems(sub_config):
+ if k == 'sections':
+ for section, config_list in six.iteritems(v):
+ log("adding section '%s'" % (section),
+ level=DEBUG)
+ if ctxt[k].get(section):
+ ctxt[k][section].extend(config_list)
+ else:
+ ctxt[k][section] = config_list
+ else:
+ ctxt[k] = v
+ log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
+ return ctxt
+
+
+class LogLevelContext(OSContextGenerator):
+
+ def __call__(self):
+ ctxt = {}
+ ctxt['debug'] = \
+ False if config('debug') is None else config('debug')
+ ctxt['verbose'] = \
+ False if config('verbose') is None else config('verbose')
+
+ return ctxt
+
+
+class SyslogContext(OSContextGenerator):
+
+ def __call__(self):
+ ctxt = {'use_syslog': config('use-syslog')}
+ return ctxt
+
+
+class BindHostContext(OSContextGenerator):
+
+ def __call__(self):
+ if config('prefer-ipv6'):
+ return {'bind_host': '::'}
+ else:
+ return {'bind_host': '0.0.0.0'}
+
+
+class WorkerConfigContext(OSContextGenerator):
+
+ @property
+ def num_cpus(self):
+ # NOTE: use cpu_count if present (16.04 support)
+ if hasattr(psutil, 'cpu_count'):
+ return psutil.cpu_count()
+ else:
+ return psutil.NUM_CPUS
+
+ def __call__(self):
+ multiplier = config('worker-multiplier') or 0
+ ctxt = {"workers": self.num_cpus * multiplier}
+ return ctxt
+
+
+class ZeroMQContext(OSContextGenerator):
+ interfaces = ['zeromq-configuration']
+
+ def __call__(self):
+ ctxt = {}
+ if is_relation_made('zeromq-configuration', 'host'):
+ for rid in relation_ids('zeromq-configuration'):
+ for unit in related_units(rid):
+ ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
+ ctxt['zmq_host'] = relation_get('host', unit, rid)
+ ctxt['zmq_redis_address'] = relation_get(
+ 'zmq_redis_address', unit, rid)
+
+ return ctxt
+
+
+class NotificationDriverContext(OSContextGenerator):
+
+ def __init__(self, zmq_relation='zeromq-configuration',
+ amqp_relation='amqp'):
+ """
+ :param zmq_relation: Name of Zeromq relation to check
+ """
+ self.zmq_relation = zmq_relation
+ self.amqp_relation = amqp_relation
+
+ def __call__(self):
+ ctxt = {'notifications': 'False'}
+ if is_relation_made(self.amqp_relation):
+ ctxt['notifications'] = "True"
+
+ return ctxt
+
+
+class SysctlContext(OSContextGenerator):
+ """This context check if the 'sysctl' option exists on configuration
+ then creates a file with the loaded contents"""
+ def __call__(self):
+ sysctl_dict = config('sysctl')
+ if sysctl_dict:
+ sysctl_create(sysctl_dict,
+ '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
+ return {'sysctl': sysctl_dict}
+
+
+class NeutronAPIContext(OSContextGenerator):
+ '''
+ Inspects current neutron-plugin-api relation for neutron settings. Return
+ defaults if it is not present.
+ '''
+ interfaces = ['neutron-plugin-api']
+
+ def __call__(self):
+ self.neutron_defaults = {
+ 'l2_population': {
+ 'rel_key': 'l2-population',
+ 'default': False,
+ },
+ 'overlay_network_type': {
+ 'rel_key': 'overlay-network-type',
+ 'default': 'gre',
+ },
+ 'neutron_security_groups': {
+ 'rel_key': 'neutron-security-groups',
+ 'default': False,
+ },
+ 'network_device_mtu': {
+ 'rel_key': 'network-device-mtu',
+ 'default': None,
+ },
+ 'enable_dvr': {
+ 'rel_key': 'enable-dvr',
+ 'default': False,
+ },
+ 'enable_l3ha': {
+ 'rel_key': 'enable-l3ha',
+ 'default': False,
+ },
+ }
+ ctxt = self.get_neutron_options({})
+ for rid in relation_ids('neutron-plugin-api'):
+ for unit in related_units(rid):
+ rdata = relation_get(rid=rid, unit=unit)
+ if 'l2-population' in rdata:
+ ctxt.update(self.get_neutron_options(rdata))
+
+ return ctxt
+
+ def get_neutron_options(self, rdata):
+ settings = {}
+ for nkey in self.neutron_defaults.keys():
+ defv = self.neutron_defaults[nkey]['default']
+ rkey = self.neutron_defaults[nkey]['rel_key']
+ if rkey in rdata.keys():
+ if type(defv) is bool:
+ settings[nkey] = bool_from_string(rdata[rkey])
+ else:
+ settings[nkey] = rdata[rkey]
+ else:
+ settings[nkey] = defv
+ return settings
+
+
+class ExternalPortContext(NeutronPortContext):
+
+ def __call__(self):
+ ctxt = {}
+ ports = config('ext-port')
+ if ports:
+ ports = [p.strip() for p in ports.split()]
+ ports = self.resolve_ports(ports)
+ if ports:
+ ctxt = {"ext_port": ports[0]}
+ napi_settings = NeutronAPIContext()()
+ mtu = napi_settings.get('network_device_mtu')
+ if mtu:
+ ctxt['ext_port_mtu'] = mtu
+
+ return ctxt
+
+
+class DataPortContext(NeutronPortContext):
+
+ def __call__(self):
+ ports = config('data-port')
+ if ports:
+ # Map of {port/mac:bridge}
+ portmap = parse_data_port_mappings(ports)
+ ports = portmap.keys()
+ # Resolve provided ports or mac addresses and filter out those
+ # already attached to a bridge.
+ resolved = self.resolve_ports(ports)
+ # FIXME: is this necessary?
+ normalized = {get_nic_hwaddr(port): port for port in resolved
+ if port not in ports}
+ normalized.update({port: port for port in resolved
+ if port in ports})
+ if resolved:
+ return {normalized[port]: bridge for port, bridge in
+ six.iteritems(portmap) if port in normalized.keys()}
+
+ return None
+
+
+class PhyNICMTUContext(DataPortContext):
+
+ def __call__(self):
+ ctxt = {}
+ mappings = super(PhyNICMTUContext, self).__call__()
+ if mappings and mappings.keys():
+ ports = sorted(mappings.keys())
+ napi_settings = NeutronAPIContext()()
+ mtu = napi_settings.get('network_device_mtu')
+ all_ports = set()
+ # If any of ports is a vlan device, its underlying device must have
+ # mtu applied first.
+ for port in ports:
+ for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
+ lport = os.path.basename(lport)
+ all_ports.add(lport.split('_')[1])
+
+ all_ports = list(all_ports)
+ all_ports.extend(ports)
+ if mtu:
+ ctxt["devs"] = '\\n'.join(all_ports)
+ ctxt['mtu'] = mtu
+
+ return ctxt
+
+
+class NetworkServiceContext(OSContextGenerator):
+
+ def __init__(self, rel_name='quantum-network-service'):
+ self.rel_name = rel_name
+ self.interfaces = [rel_name]
+
+ def __call__(self):
+ for rid in relation_ids(self.rel_name):
+ for unit in related_units(rid):
+ rdata = relation_get(rid=rid, unit=unit)
+ ctxt = {
+ 'keystone_host': rdata.get('keystone_host'),
+ 'service_port': rdata.get('service_port'),
+ 'auth_port': rdata.get('auth_port'),
+ 'service_tenant': rdata.get('service_tenant'),
+ 'service_username': rdata.get('service_username'),
+ 'service_password': rdata.get('service_password'),
+ 'quantum_host': rdata.get('quantum_host'),
+ 'quantum_port': rdata.get('quantum_port'),
+ 'quantum_url': rdata.get('quantum_url'),
+ 'region': rdata.get('region'),
+ 'service_protocol':
+ rdata.get('service_protocol') or 'http',
+ 'auth_protocol':
+ rdata.get('auth_protocol') or 'http',
+ 'api_version':
+ rdata.get('api_version') or '2.0',
+ }
+ if self.context_complete(ctxt):
+ return ctxt
+ return {}
+
+
+class InternalEndpointContext(OSContextGenerator):
+ """Internal endpoint context.
+
+ This context provides the endpoint type used for communication between
+ services e.g. between Nova and Cinder internally. Openstack uses Public
+ endpoints by default so this allows admins to optionally use internal
+ endpoints.
+ """
+ def __call__(self):
+ return {'use_internal_endpoints': config('use-internal-endpoints')}
+
+
+class AppArmorContext(OSContextGenerator):
+ """Base class for apparmor contexts."""
+
+ def __init__(self):
+ self._ctxt = None
+ self.aa_profile = None
+ self.aa_utils_packages = ['apparmor-utils']
+
+ @property
+ def ctxt(self):
+ if self._ctxt is not None:
+ return self._ctxt
+ self._ctxt = self._determine_ctxt()
+ return self._ctxt
+
+ def _determine_ctxt(self):
+ """
+ Validate aa-profile-mode settings is disable, enforce, or complain.
+
+ :return ctxt: Dictionary of the apparmor profile or None
+ """
+ if config('aa-profile-mode') in ['disable', 'enforce', 'complain']:
+ ctxt = {'aa-profile-mode': config('aa-profile-mode')}
+ else:
+ ctxt = None
+ return ctxt
+
+ def __call__(self):
+ return self.ctxt
+
+ def install_aa_utils(self):
+ """
+ Install packages required for apparmor configuration.
+ """
+ log("Installing apparmor utils.")
+ ensure_packages(self.aa_utils_packages)
+
+ def manually_disable_aa_profile(self):
+ """
+ Manually disable an apparmor profile.
+
+ If aa-profile-mode is set to disabled (default) this is required as the
+ template has been written but apparmor is yet unaware of the profile
+ and aa-disable aa-profile fails. Without this the profile would kick
+ into enforce mode on the next service restart.
+
+ """
+ profile_path = '/etc/apparmor.d'
+ disable_path = '/etc/apparmor.d/disable'
+ if not os.path.lexists(os.path.join(disable_path, self.aa_profile)):
+ os.symlink(os.path.join(profile_path, self.aa_profile),
+ os.path.join(disable_path, self.aa_profile))
+
+ def setup_aa_profile(self):
+ """
+ Setup an apparmor profile.
+ The ctxt dictionary will contain the apparmor profile mode and
+ the apparmor profile name.
+ Makes calls out to aa-disable, aa-complain, or aa-enforce to setup
+ the apparmor profile.
+ """
+ self()
+ if not self.ctxt:
+ log("Not enabling apparmor Profile")
+ return
+ self.install_aa_utils()
+ cmd = ['aa-{}'.format(self.ctxt['aa-profile-mode'])]
+ cmd.append(self.ctxt['aa-profile'])
+ log("Setting up the apparmor profile for {} in {} mode."
+ "".format(self.ctxt['aa-profile'], self.ctxt['aa-profile-mode']))
+ try:
+ check_call(cmd)
+ except CalledProcessError as e:
+ # If aa-profile-mode is set to disabled (default) manual
+ # disabling is required as the template has been written but
+ # apparmor is yet unaware of the profile and aa-disable aa-profile
+ # fails. If aa-disable learns to read profile files first this can
+ # be removed.
+ if self.ctxt['aa-profile-mode'] == 'disable':
+ log("Manually disabling the apparmor profile for {}."
+ "".format(self.ctxt['aa-profile']))
+ self.manually_disable_aa_profile()
+ return
+ status_set('blocked', "Apparmor profile {} failed to be set to {}."
+ "".format(self.ctxt['aa-profile'],
+ self.ctxt['aa-profile-mode']))
+ raise e
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/__init__.py
new file mode 100644
index 0000000..7587679
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/__init__.py
@@ -0,0 +1,18 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+# dummy __init__.py to fool syncer into thinking this is a syncable python
+# module
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy.sh b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy.sh
new file mode 100755
index 0000000..0df0717
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+#--------------------------------------------
+# This file is managed by Juju
+#--------------------------------------------
+#
+# Copyright 2009,2012 Canonical Ltd.
+# Author: Tom Haddon
+
+CRITICAL=0
+NOTACTIVE=''
+LOGFILE=/var/log/nagios/check_haproxy.log
+AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}')
+
+typeset -i N_INSTANCES=0
+for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg)
+do
+ N_INSTANCES=N_INSTANCES+1
+ output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' --regex=",${appserver},.*,UP.*" -e ' 200 OK')
+ if [ $? != 0 ]; then
+ date >> $LOGFILE
+ echo $output >> $LOGFILE
+ /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v | grep ",${appserver}," >> $LOGFILE 2>&1
+ CRITICAL=1
+ NOTACTIVE="${NOTACTIVE} $appserver"
+ fi
+done
+
+if [ $CRITICAL = 1 ]; then
+ echo "CRITICAL:${NOTACTIVE}"
+ exit 2
+fi
+
+echo "OK: All haproxy instances ($N_INSTANCES) looking good"
+exit 0
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
new file mode 100755
index 0000000..3ebb532
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+#--------------------------------------------
+# This file is managed by Juju
+#--------------------------------------------
+#
+# Copyright 2009,2012 Canonical Ltd.
+# Author: Tom Haddon
+
+# These should be config options at some stage
+CURRQthrsh=0
+MAXQthrsh=100
+
+AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
+
+HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
+
+for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}')
+do
+ CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3)
+ MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4)
+
+ if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then
+ echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ"
+ exit 2
+ fi
+done
+
+echo "OK: All haproxy queue depths looking good"
+exit 0
+
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/ip.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/ip.py
new file mode 100644
index 0000000..532a1dc
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/ip.py
@@ -0,0 +1,179 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+
+from charmhelpers.core.hookenv import (
+ config,
+ unit_get,
+ service_name,
+ network_get_primary_address,
+)
+from charmhelpers.contrib.network.ip import (
+ get_address_in_network,
+ is_address_in_network,
+ is_ipv6,
+ get_ipv6_addr,
+ resolve_network_cidr,
+)
+from charmhelpers.contrib.hahelpers.cluster import is_clustered
+
+PUBLIC = 'public'
+INTERNAL = 'int'
+ADMIN = 'admin'
+
+ADDRESS_MAP = {
+ PUBLIC: {
+ 'binding': 'public',
+ 'config': 'os-public-network',
+ 'fallback': 'public-address',
+ 'override': 'os-public-hostname',
+ },
+ INTERNAL: {
+ 'binding': 'internal',
+ 'config': 'os-internal-network',
+ 'fallback': 'private-address',
+ 'override': 'os-internal-hostname',
+ },
+ ADMIN: {
+ 'binding': 'admin',
+ 'config': 'os-admin-network',
+ 'fallback': 'private-address',
+ 'override': 'os-admin-hostname',
+ }
+}
+
+
+def canonical_url(configs, endpoint_type=PUBLIC):
+ """Returns the correct HTTP URL to this host given the state of HTTPS
+ configuration, hacluster and charm configuration.
+
+ :param configs: OSTemplateRenderer config templating object to inspect
+ for a complete https context.
+ :param endpoint_type: str endpoint type to resolve.
+ :param returns: str base URL for services on the current service unit.
+ """
+ scheme = _get_scheme(configs)
+
+ address = resolve_address(endpoint_type)
+ if is_ipv6(address):
+ address = "[{}]".format(address)
+
+ return '%s://%s' % (scheme, address)
+
+
+def _get_scheme(configs):
+ """Returns the scheme to use for the url (either http or https)
+ depending upon whether https is in the configs value.
+
+ :param configs: OSTemplateRenderer config templating object to inspect
+ for a complete https context.
+ :returns: either 'http' or 'https' depending on whether https is
+ configured within the configs context.
+ """
+ scheme = 'http'
+ if configs and 'https' in configs.complete_contexts():
+ scheme = 'https'
+ return scheme
+
+
+def _get_address_override(endpoint_type=PUBLIC):
+ """Returns any address overrides that the user has defined based on the
+ endpoint type.
+
+ Note: this function allows for the service name to be inserted into the
+ address if the user specifies {service_name}.somehost.org.
+
+ :param endpoint_type: the type of endpoint to retrieve the override
+ value for.
+ :returns: any endpoint address or hostname that the user has overridden
+ or None if an override is not present.
+ """
+ override_key = ADDRESS_MAP[endpoint_type]['override']
+ addr_override = config(override_key)
+ if not addr_override:
+ return None
+ else:
+ return addr_override.format(service_name=service_name())
+
+
+def resolve_address(endpoint_type=PUBLIC):
+ """Return unit address depending on net config.
+
+ If unit is clustered with vip(s) and has net splits defined, return vip on
+ correct network. If clustered with no nets defined, return primary vip.
+
+ If not clustered, return unit address ensuring address is on configured net
+ split if one is configured, or a Juju 2.0 extra-binding has been used.
+
+ :param endpoint_type: Network endpoing type
+ """
+ resolved_address = _get_address_override(endpoint_type)
+ if resolved_address:
+ return resolved_address
+
+ vips = config('vip')
+ if vips:
+ vips = vips.split()
+
+ net_type = ADDRESS_MAP[endpoint_type]['config']
+ net_addr = config(net_type)
+ net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
+ binding = ADDRESS_MAP[endpoint_type]['binding']
+ clustered = is_clustered()
+
+ if clustered and vips:
+ if net_addr:
+ for vip in vips:
+ if is_address_in_network(net_addr, vip):
+ resolved_address = vip
+ break
+ else:
+ # NOTE: endeavour to check vips against network space
+ # bindings
+ try:
+ bound_cidr = resolve_network_cidr(
+ network_get_primary_address(binding)
+ )
+ for vip in vips:
+ if is_address_in_network(bound_cidr, vip):
+ resolved_address = vip
+ break
+ except NotImplementedError:
+ # If no net-splits configured and no support for extra
+ # bindings/network spaces so we expect a single vip
+ resolved_address = vips[0]
+ else:
+ if config('prefer-ipv6'):
+ fallback_addr = get_ipv6_addr(exc_list=vips)[0]
+ else:
+ fallback_addr = unit_get(net_fallback)
+
+ if net_addr:
+ resolved_address = get_address_in_network(net_addr, fallback_addr)
+ else:
+ # NOTE: only try to use extra bindings if legacy network
+ # configuration is not in use
+ try:
+ resolved_address = network_get_primary_address(binding)
+ except NotImplementedError:
+ resolved_address = fallback_addr
+
+ if resolved_address is None:
+ raise ValueError("Unable to resolve a suitable IP address based on "
+ "charm state and configuration. (net_type=%s, "
+ "clustered=%s)" % (net_type, clustered))
+
+ return resolved_address
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/neutron.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/neutron.py
new file mode 100644
index 0000000..d057ea6
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/neutron.py
@@ -0,0 +1,384 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+# Various utilies for dealing with Neutron and the renaming from Quantum.
+
+import six
+from subprocess import check_output
+
+from charmhelpers.core.hookenv import (
+ config,
+ log,
+ ERROR,
+)
+
+from charmhelpers.contrib.openstack.utils import os_release
+
+
+def headers_package():
+ """Ensures correct linux-headers for running kernel are installed,
+ for building DKMS package"""
+ kver = check_output(['uname', '-r']).decode('UTF-8').strip()
+ return 'linux-headers-%s' % kver
+
+QUANTUM_CONF_DIR = '/etc/quantum'
+
+
+def kernel_version():
+ """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
+ kver = check_output(['uname', '-r']).decode('UTF-8').strip()
+ kver = kver.split('.')
+ return (int(kver[0]), int(kver[1]))
+
+
+def determine_dkms_package():
+ """ Determine which DKMS package should be used based on kernel version """
+ # NOTE: 3.13 kernels have support for GRE and VXLAN native
+ if kernel_version() >= (3, 13):
+ return []
+ else:
+ return [headers_package(), 'openvswitch-datapath-dkms']
+
+
+# legacy
+
+
+def quantum_plugins():
+ from charmhelpers.contrib.openstack import context
+ return {
+ 'ovs': {
+ 'config': '/etc/quantum/plugins/openvswitch/'
+ 'ovs_quantum_plugin.ini',
+ 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
+ 'OVSQuantumPluginV2',
+ 'contexts': [
+ context.SharedDBContext(user=config('neutron-database-user'),
+ database=config('neutron-database'),
+ relation_prefix='neutron',
+ ssl_dir=QUANTUM_CONF_DIR)],
+ 'services': ['quantum-plugin-openvswitch-agent'],
+ 'packages': [determine_dkms_package(),
+ ['quantum-plugin-openvswitch-agent']],
+ 'server_packages': ['quantum-server',
+ 'quantum-plugin-openvswitch'],
+ 'server_services': ['quantum-server']
+ },
+ 'nvp': {
+ 'config': '/etc/quantum/plugins/nicira/nvp.ini',
+ 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
+ 'QuantumPlugin.NvpPluginV2',
+ 'contexts': [
+ context.SharedDBContext(user=config('neutron-database-user'),
+ database=config('neutron-database'),
+ relation_prefix='neutron',
+ ssl_dir=QUANTUM_CONF_DIR)],
+ 'services': [],
+ 'packages': [],
+ 'server_packages': ['quantum-server',
+ 'quantum-plugin-nicira'],
+ 'server_services': ['quantum-server']
+ }
+ }
+
+NEUTRON_CONF_DIR = '/etc/neutron'
+
+
+def neutron_plugins():
+ from charmhelpers.contrib.openstack import context
+ release = os_release('nova-common')
+ plugins = {
+ 'ovs': {
+ 'config': '/etc/neutron/plugins/openvswitch/'
+ 'ovs_neutron_plugin.ini',
+ 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
+ 'OVSNeutronPluginV2',
+ 'contexts': [
+ context.SharedDBContext(user=config('neutron-database-user'),
+ database=config('neutron-database'),
+ relation_prefix='neutron',
+ ssl_dir=NEUTRON_CONF_DIR)],
+ 'services': ['neutron-plugin-openvswitch-agent'],
+ 'packages': [determine_dkms_package(),
+ ['neutron-plugin-openvswitch-agent']],
+ 'server_packages': ['neutron-server',
+ 'neutron-plugin-openvswitch'],
+ 'server_services': ['neutron-server']
+ },
+ 'nvp': {
+ 'config': '/etc/neutron/plugins/nicira/nvp.ini',
+ 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
+ 'NeutronPlugin.NvpPluginV2',
+ 'contexts': [
+ context.SharedDBContext(user=config('neutron-database-user'),
+ database=config('neutron-database'),
+ relation_prefix='neutron',
+ ssl_dir=NEUTRON_CONF_DIR)],
+ 'services': [],
+ 'packages': [],
+ 'server_packages': ['neutron-server',
+ 'neutron-plugin-nicira'],
+ 'server_services': ['neutron-server']
+ },
+ 'nsx': {
+ 'config': '/etc/neutron/plugins/vmware/nsx.ini',
+ 'driver': 'vmware',
+ 'contexts': [
+ context.SharedDBContext(user=config('neutron-database-user'),
+ database=config('neutron-database'),
+ relation_prefix='neutron',
+ ssl_dir=NEUTRON_CONF_DIR)],
+ 'services': [],
+ 'packages': [],
+ 'server_packages': ['neutron-server',
+ 'neutron-plugin-vmware'],
+ 'server_services': ['neutron-server']
+ },
+ 'n1kv': {
+ 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
+ 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
+ 'contexts': [
+ context.SharedDBContext(user=config('neutron-database-user'),
+ database=config('neutron-database'),
+ relation_prefix='neutron',
+ ssl_dir=NEUTRON_CONF_DIR)],
+ 'services': [],
+ 'packages': [determine_dkms_package(),
+ ['neutron-plugin-cisco']],
+ 'server_packages': ['neutron-server',
+ 'neutron-plugin-cisco'],
+ 'server_services': ['neutron-server']
+ },
+ 'Calico': {
+ 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
+ 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
+ 'contexts': [
+ context.SharedDBContext(user=config('neutron-database-user'),
+ database=config('neutron-database'),
+ relation_prefix='neutron',
+ ssl_dir=NEUTRON_CONF_DIR)],
+ 'services': ['calico-felix',
+ 'bird',
+ 'neutron-dhcp-agent',
+ 'nova-api-metadata',
+ 'etcd'],
+ 'packages': [determine_dkms_package(),
+ ['calico-compute',
+ 'bird',
+ 'neutron-dhcp-agent',
+ 'nova-api-metadata',
+ 'etcd']],
+ 'server_packages': ['neutron-server', 'calico-control', 'etcd'],
+ 'server_services': ['neutron-server', 'etcd']
+ },
+ 'vsp': {
+ 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
+ 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
+ 'contexts': [
+ context.SharedDBContext(user=config('neutron-database-user'),
+ database=config('neutron-database'),
+ relation_prefix='neutron',
+ ssl_dir=NEUTRON_CONF_DIR)],
+ 'services': [],
+ 'packages': [],
+ 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
+ 'server_services': ['neutron-server']
+ },
+ 'plumgrid': {
+ 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
+ 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
+ 'contexts': [
+ context.SharedDBContext(user=config('database-user'),
+ database=config('database'),
+ ssl_dir=NEUTRON_CONF_DIR)],
+ 'services': [],
+ 'packages': ['plumgrid-lxc',
+ 'iovisor-dkms'],
+ 'server_packages': ['neutron-server',
+ 'neutron-plugin-plumgrid'],
+ 'server_services': ['neutron-server']
+ },
+ 'midonet': {
+ 'config': '/etc/neutron/plugins/midonet/midonet.ini',
+ 'driver': 'midonet.neutron.plugin.MidonetPluginV2',
+ 'contexts': [
+ context.SharedDBContext(user=config('neutron-database-user'),
+ database=config('neutron-database'),
+ relation_prefix='neutron',
+ ssl_dir=NEUTRON_CONF_DIR)],
+ 'services': [],
+ 'packages': [determine_dkms_package()],
+ 'server_packages': ['neutron-server',
+ 'python-neutron-plugin-midonet'],
+ 'server_services': ['neutron-server']
+ }
+ }
+ if release >= 'icehouse':
+ # NOTE: patch in ml2 plugin for icehouse onwards
+ plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
+ plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
+ plugins['ovs']['server_packages'] = ['neutron-server',
+ 'neutron-plugin-ml2']
+ # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
+ plugins['nvp'] = plugins['nsx']
+ if release >= 'kilo':
+ plugins['midonet']['driver'] = (
+ 'neutron.plugins.midonet.plugin.MidonetPluginV2')
+ if release >= 'liberty':
+ plugins['midonet']['driver'] = (
+ 'midonet.neutron.plugin_v1.MidonetPluginV2')
+ plugins['midonet']['server_packages'].remove(
+ 'python-neutron-plugin-midonet')
+ plugins['midonet']['server_packages'].append(
+ 'python-networking-midonet')
+ plugins['plumgrid']['driver'] = (
+ 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2')
+ plugins['plumgrid']['server_packages'].remove(
+ 'neutron-plugin-plumgrid')
+ return plugins
+
+
+def neutron_plugin_attribute(plugin, attr, net_manager=None):
+ manager = net_manager or network_manager()
+ if manager == 'quantum':
+ plugins = quantum_plugins()
+ elif manager == 'neutron':
+ plugins = neutron_plugins()
+ else:
+ log("Network manager '%s' does not support plugins." % (manager),
+ level=ERROR)
+ raise Exception
+
+ try:
+ _plugin = plugins[plugin]
+ except KeyError:
+ log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
+ raise Exception
+
+ try:
+ return _plugin[attr]
+ except KeyError:
+ return None
+
+
+def network_manager():
+ '''
+ Deals with the renaming of Quantum to Neutron in H and any situations
+ that require compatability (eg, deploying H with network-manager=quantum,
+ upgrading from G).
+ '''
+ release = os_release('nova-common')
+ manager = config('network-manager').lower()
+
+ if manager not in ['quantum', 'neutron']:
+ return manager
+
+ if release in ['essex']:
+ # E does not support neutron
+ log('Neutron networking not supported in Essex.', level=ERROR)
+ raise Exception
+ elif release in ['folsom', 'grizzly']:
+ # neutron is named quantum in F and G
+ return 'quantum'
+ else:
+ # ensure accurate naming for all releases post-H
+ return 'neutron'
+
+
+def parse_mappings(mappings, key_rvalue=False):
+ """By default mappings are lvalue keyed.
+
+ If key_rvalue is True, the mapping will be reversed to allow multiple
+ configs for the same lvalue.
+ """
+ parsed = {}
+ if mappings:
+ mappings = mappings.split()
+ for m in mappings:
+ p = m.partition(':')
+
+ if key_rvalue:
+ key_index = 2
+ val_index = 0
+ # if there is no rvalue skip to next
+ if not p[1]:
+ continue
+ else:
+ key_index = 0
+ val_index = 2
+
+ key = p[key_index].strip()
+ parsed[key] = p[val_index].strip()
+
+ return parsed
+
+
+def parse_bridge_mappings(mappings):
+ """Parse bridge mappings.
+
+ Mappings must be a space-delimited list of provider:bridge mappings.
+
+ Returns dict of the form {provider:bridge}.
+ """
+ return parse_mappings(mappings)
+
+
+def parse_data_port_mappings(mappings, default_bridge='br-data'):
+ """Parse data port mappings.
+
+ Mappings must be a space-delimited list of bridge:port.
+
+ Returns dict of the form {port:bridge} where ports may be mac addresses or
+ interface names.
+ """
+
+ # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
+ # proposed for <port> since it may be a mac address which will differ
+ # across units this allowing first-known-good to be chosen.
+ _mappings = parse_mappings(mappings, key_rvalue=True)
+ if not _mappings or list(_mappings.values()) == ['']:
+ if not mappings:
+ return {}
+
+ # For backwards-compatibility we need to support port-only provided in
+ # config.
+ _mappings = {mappings.split()[0]: default_bridge}
+
+ ports = _mappings.keys()
+ if len(set(ports)) != len(ports):
+ raise Exception("It is not allowed to have the same port configured "
+ "on more than one bridge")
+
+ return _mappings
+
+
+def parse_vlan_range_mappings(mappings):
+ """Parse vlan range mappings.
+
+ Mappings must be a space-delimited list of provider:start:end mappings.
+
+ The start:end range is optional and may be omitted.
+
+ Returns dict of the form {provider: (start, end)}.
+ """
+ _mappings = parse_mappings(mappings)
+ if not _mappings:
+ return {}
+
+ mappings = {}
+ for p, r in six.iteritems(_mappings):
+ mappings[p] = tuple(r.split(':'))
+
+ return mappings
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/__init__.py
new file mode 100644
index 0000000..7587679
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/__init__.py
@@ -0,0 +1,18 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+# dummy __init__.py to fool syncer into thinking this is a syncable python
+# module
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/ceph.conf b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/ceph.conf
new file mode 100644
index 0000000..33ceee2
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/ceph.conf
@@ -0,0 +1,21 @@
+###############################################################################
+# [ WARNING ]
+# cinder configuration file maintained by Juju
+# local changes may be overwritten.
+###############################################################################
+[global]
+{% if auth -%}
+auth_supported = {{ auth }}
+keyring = /etc/ceph/$cluster.$name.keyring
+mon host = {{ mon_hosts }}
+{% endif -%}
+log to syslog = {{ use_syslog }}
+err to syslog = {{ use_syslog }}
+clog to syslog = {{ use_syslog }}
+
+[client]
+{% if rbd_client_cache_settings -%}
+{% for key, value in rbd_client_cache_settings.iteritems() -%}
+{{ key }} = {{ value }}
+{% endfor -%}
+{%- endif %} \ No newline at end of file
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/git.upstart b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/git.upstart
new file mode 100644
index 0000000..4bed404
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/git.upstart
@@ -0,0 +1,17 @@
+description "{{ service_description }}"
+author "Juju {{ service_name }} Charm <juju@localhost>"
+
+start on runlevel [2345]
+stop on runlevel [!2345]
+
+respawn
+
+exec start-stop-daemon --start --chuid {{ user_name }} \
+ --chdir {{ start_dir }} --name {{ process_name }} \
+ --exec {{ executable_name }} -- \
+ {% for config_file in config_files -%}
+ --config-file={{ config_file }} \
+ {% endfor -%}
+ {% if log_file -%}
+ --log-file={{ log_file }}
+ {% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/haproxy.cfg b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/haproxy.cfg
new file mode 100644
index 0000000..32b6276
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/haproxy.cfg
@@ -0,0 +1,66 @@
+global
+ log {{ local_host }} local0
+ log {{ local_host }} local1 notice
+ maxconn 20000
+ user haproxy
+ group haproxy
+ spread-checks 0
+
+defaults
+ log global
+ mode tcp
+ option tcplog
+ option dontlognull
+ retries 3
+{%- if haproxy_queue_timeout %}
+ timeout queue {{ haproxy_queue_timeout }}
+{%- else %}
+ timeout queue 5000
+{%- endif %}
+{%- if haproxy_connect_timeout %}
+ timeout connect {{ haproxy_connect_timeout }}
+{%- else %}
+ timeout connect 5000
+{%- endif %}
+{%- if haproxy_client_timeout %}
+ timeout client {{ haproxy_client_timeout }}
+{%- else %}
+ timeout client 30000
+{%- endif %}
+{%- if haproxy_server_timeout %}
+ timeout server {{ haproxy_server_timeout }}
+{%- else %}
+ timeout server 30000
+{%- endif %}
+
+listen stats
+ bind {{ local_host }}:{{ stat_port }}
+ mode http
+ stats enable
+ stats hide-version
+ stats realm Haproxy\ Statistics
+ stats uri /
+ stats auth admin:{{ stat_password }}
+
+{% if frontends -%}
+{% for service, ports in service_ports.items() -%}
+frontend tcp-in_{{ service }}
+ bind *:{{ ports[0] }}
+ {% if ipv6 -%}
+ bind :::{{ ports[0] }}
+ {% endif -%}
+ {% for frontend in frontends -%}
+ acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
+ use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
+ {% endfor -%}
+ default_backend {{ service }}_{{ default_backend }}
+
+{% for frontend in frontends -%}
+backend {{ service }}_{{ frontend }}
+ balance leastconn
+ {% for unit, address in frontends[frontend]['backends'].items() -%}
+ server {{ unit }} {{ address }}:{{ ports[1] }} check
+ {% endfor %}
+{% endfor -%}
+{% endfor -%}
+{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend
new file mode 100644
index 0000000..6a92380
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend
@@ -0,0 +1,26 @@
+{% if endpoints -%}
+{% for ext_port in ext_ports -%}
+Listen {{ ext_port }}
+{% endfor -%}
+{% for address, endpoint, ext, int in endpoints -%}
+<VirtualHost {{ address }}:{{ ext }}>
+ ServerName {{ endpoint }}
+ SSLEngine on
+ SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2
+ SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM
+ SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
+ SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
+ ProxyPass / http://localhost:{{ int }}/
+ ProxyPassReverse / http://localhost:{{ int }}/
+ ProxyPreserveHost on
+</VirtualHost>
+{% endfor -%}
+<Proxy *>
+ Order deny,allow
+ Allow from all
+</Proxy>
+<Location />
+ Order allow,deny
+ Allow from all
+</Location>
+{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf
new file mode 100644
index 0000000..6a92380
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf
@@ -0,0 +1,26 @@
+{% if endpoints -%}
+{% for ext_port in ext_ports -%}
+Listen {{ ext_port }}
+{% endfor -%}
+{% for address, endpoint, ext, int in endpoints -%}
+<VirtualHost {{ address }}:{{ ext }}>
+ ServerName {{ endpoint }}
+ SSLEngine on
+ SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2
+ SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM
+ SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
+ SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
+ ProxyPass / http://localhost:{{ int }}/
+ ProxyPassReverse / http://localhost:{{ int }}/
+ ProxyPreserveHost on
+</VirtualHost>
+{% endfor -%}
+<Proxy *>
+ Order deny,allow
+ Allow from all
+</Proxy>
+<Location />
+ Order allow,deny
+ Allow from all
+</Location>
+{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
new file mode 100644
index 0000000..5dcebe7
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
@@ -0,0 +1,12 @@
+{% if auth_host -%}
+[keystone_authtoken]
+auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}
+auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
+auth_plugin = password
+project_domain_id = default
+user_domain_id = default
+project_name = {{ admin_tenant_name }}
+username = {{ admin_user }}
+password = {{ admin_password }}
+signing_dir = {{ signing_dir }}
+{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy
new file mode 100644
index 0000000..9356b2b
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy
@@ -0,0 +1,10 @@
+{% if auth_host -%}
+[keystone_authtoken]
+# Juno specific config (Bug #1557223)
+auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }}
+identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
+admin_tenant_name = {{ admin_tenant_name }}
+admin_user = {{ admin_user }}
+admin_password = {{ admin_password }}
+signing_dir = {{ signing_dir }}
+{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka
new file mode 100644
index 0000000..dd6f364
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka
@@ -0,0 +1,12 @@
+{% if auth_host -%}
+[keystone_authtoken]
+auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}
+auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_name = {{ admin_tenant_name }}
+username = {{ admin_user }}
+password = {{ admin_password }}
+signing_dir = {{ signing_dir }}
+{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo
new file mode 100644
index 0000000..b444c9c
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo
@@ -0,0 +1,22 @@
+{% if rabbitmq_host or rabbitmq_hosts -%}
+[oslo_messaging_rabbit]
+rabbit_userid = {{ rabbitmq_user }}
+rabbit_virtual_host = {{ rabbitmq_virtual_host }}
+rabbit_password = {{ rabbitmq_password }}
+{% if rabbitmq_hosts -%}
+rabbit_hosts = {{ rabbitmq_hosts }}
+{% if rabbitmq_ha_queues -%}
+rabbit_ha_queues = True
+rabbit_durable_queues = False
+{% endif -%}
+{% else -%}
+rabbit_host = {{ rabbitmq_host }}
+{% endif -%}
+{% if rabbit_ssl_port -%}
+rabbit_use_ssl = True
+rabbit_port = {{ rabbit_ssl_port }}
+{% if rabbit_ssl_ca -%}
+kombu_ssl_ca_certs = {{ rabbit_ssl_ca }}
+{% endif -%}
+{% endif -%}
+{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-zeromq b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-zeromq
new file mode 100644
index 0000000..95f1a76
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-zeromq
@@ -0,0 +1,14 @@
+{% if zmq_host -%}
+# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }})
+rpc_backend = zmq
+rpc_zmq_host = {{ zmq_host }}
+{% if zmq_redis_address -%}
+rpc_zmq_matchmaker = redis
+matchmaker_heartbeat_freq = 15
+matchmaker_heartbeat_ttl = 30
+[matchmaker_redis]
+host = {{ zmq_redis_address }}
+{% else -%}
+rpc_zmq_matchmaker = ring
+{% endif -%}
+{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templating.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templating.py
new file mode 100644
index 0000000..e5e3cb1
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templating.py
@@ -0,0 +1,323 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+import six
+
+from charmhelpers.fetch import apt_install, apt_update
+from charmhelpers.core.hookenv import (
+ log,
+ ERROR,
+ INFO
+)
+from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
+
+try:
+ from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
+except ImportError:
+ apt_update(fatal=True)
+ apt_install('python-jinja2', fatal=True)
+ from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
+
+
+class OSConfigException(Exception):
+ pass
+
+
+def get_loader(templates_dir, os_release):
+ """
+ Create a jinja2.ChoiceLoader containing template dirs up to
+ and including os_release. If directory template directory
+ is missing at templates_dir, it will be omitted from the loader.
+ templates_dir is added to the bottom of the search list as a base
+ loading dir.
+
+ A charm may also ship a templates dir with this module
+ and it will be appended to the bottom of the search list, eg::
+
+ hooks/charmhelpers/contrib/openstack/templates
+
+ :param templates_dir (str): Base template directory containing release
+ sub-directories.
+ :param os_release (str): OpenStack release codename to construct template
+ loader.
+ :returns: jinja2.ChoiceLoader constructed with a list of
+ jinja2.FilesystemLoaders, ordered in descending
+ order by OpenStack release.
+ """
+ tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
+ for rel in six.itervalues(OPENSTACK_CODENAMES)]
+
+ if not os.path.isdir(templates_dir):
+ log('Templates directory not found @ %s.' % templates_dir,
+ level=ERROR)
+ raise OSConfigException
+
+ # the bottom contains tempaltes_dir and possibly a common templates dir
+ # shipped with the helper.
+ loaders = [FileSystemLoader(templates_dir)]
+ helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
+ if os.path.isdir(helper_templates):
+ loaders.append(FileSystemLoader(helper_templates))
+
+ for rel, tmpl_dir in tmpl_dirs:
+ if os.path.isdir(tmpl_dir):
+ loaders.insert(0, FileSystemLoader(tmpl_dir))
+ if rel == os_release:
+ break
+ log('Creating choice loader with dirs: %s' %
+ [l.searchpath for l in loaders], level=INFO)
+ return ChoiceLoader(loaders)
+
+
+class OSConfigTemplate(object):
+ """
+ Associates a config file template with a list of context generators.
+ Responsible for constructing a template context based on those generators.
+ """
+ def __init__(self, config_file, contexts):
+ self.config_file = config_file
+
+ if hasattr(contexts, '__call__'):
+ self.contexts = [contexts]
+ else:
+ self.contexts = contexts
+
+ self._complete_contexts = []
+
+ def context(self):
+ ctxt = {}
+ for context in self.contexts:
+ _ctxt = context()
+ if _ctxt:
+ ctxt.update(_ctxt)
+ # track interfaces for every complete context.
+ [self._complete_contexts.append(interface)
+ for interface in context.interfaces
+ if interface not in self._complete_contexts]
+ return ctxt
+
+ def complete_contexts(self):
+ '''
+ Return a list of interfaces that have satisfied contexts.
+ '''
+ if self._complete_contexts:
+ return self._complete_contexts
+ self.context()
+ return self._complete_contexts
+
+
+class OSConfigRenderer(object):
+ """
+ This class provides a common templating system to be used by OpenStack
+ charms. It is intended to help charms share common code and templates,
+ and ease the burden of managing config templates across multiple OpenStack
+ releases.
+
+ Basic usage::
+
+ # import some common context generates from charmhelpers
+ from charmhelpers.contrib.openstack import context
+
+ # Create a renderer object for a specific OS release.
+ configs = OSConfigRenderer(templates_dir='/tmp/templates',
+ openstack_release='folsom')
+ # register some config files with context generators.
+ configs.register(config_file='/etc/nova/nova.conf',
+ contexts=[context.SharedDBContext(),
+ context.AMQPContext()])
+ configs.register(config_file='/etc/nova/api-paste.ini',
+ contexts=[context.IdentityServiceContext()])
+ configs.register(config_file='/etc/haproxy/haproxy.conf',
+ contexts=[context.HAProxyContext()])
+ # write out a single config
+ configs.write('/etc/nova/nova.conf')
+ # write out all registered configs
+ configs.write_all()
+
+ **OpenStack Releases and template loading**
+
+ When the object is instantiated, it is associated with a specific OS
+ release. This dictates how the template loader will be constructed.
+
+ The constructed loader attempts to load the template from several places
+ in the following order:
+ - from the most recent OS release-specific template dir (if one exists)
+ - the base templates_dir
+ - a template directory shipped in the charm with this helper file.
+
+ For the example above, '/tmp/templates' contains the following structure::
+
+ /tmp/templates/nova.conf
+ /tmp/templates/api-paste.ini
+ /tmp/templates/grizzly/api-paste.ini
+ /tmp/templates/havana/api-paste.ini
+
+ Since it was registered with the grizzly release, it first seraches
+ the grizzly directory for nova.conf, then the templates dir.
+
+ When writing api-paste.ini, it will find the template in the grizzly
+ directory.
+
+ If the object were created with folsom, it would fall back to the
+ base templates dir for its api-paste.ini template.
+
+ This system should help manage changes in config files through
+ openstack releases, allowing charms to fall back to the most recently
+ updated config template for a given release
+
+ The haproxy.conf, since it is not shipped in the templates dir, will
+ be loaded from the module directory's template directory, eg
+ $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
+ us to ship common templates (haproxy, apache) with the helpers.
+
+ **Context generators**
+
+ Context generators are used to generate template contexts during hook
+ execution. Doing so may require inspecting service relations, charm
+ config, etc. When registered, a config file is associated with a list
+ of generators. When a template is rendered and written, all context
+ generates are called in a chain to generate the context dictionary
+ passed to the jinja2 template. See context.py for more info.
+ """
+ def __init__(self, templates_dir, openstack_release):
+ if not os.path.isdir(templates_dir):
+ log('Could not locate templates dir %s' % templates_dir,
+ level=ERROR)
+ raise OSConfigException
+
+ self.templates_dir = templates_dir
+ self.openstack_release = openstack_release
+ self.templates = {}
+ self._tmpl_env = None
+
+ if None in [Environment, ChoiceLoader, FileSystemLoader]:
+ # if this code is running, the object is created pre-install hook.
+ # jinja2 shouldn't get touched until the module is reloaded on next
+ # hook execution, with proper jinja2 bits successfully imported.
+ apt_install('python-jinja2')
+
+ def register(self, config_file, contexts):
+ """
+ Register a config file with a list of context generators to be called
+ during rendering.
+ """
+ self.templates[config_file] = OSConfigTemplate(config_file=config_file,
+ contexts=contexts)
+ log('Registered config file: %s' % config_file, level=INFO)
+
+ def _get_tmpl_env(self):
+ if not self._tmpl_env:
+ loader = get_loader(self.templates_dir, self.openstack_release)
+ self._tmpl_env = Environment(loader=loader)
+
+ def _get_template(self, template):
+ self._get_tmpl_env()
+ template = self._tmpl_env.get_template(template)
+ log('Loaded template from %s' % template.filename, level=INFO)
+ return template
+
+ def render(self, config_file):
+ if config_file not in self.templates:
+ log('Config not registered: %s' % config_file, level=ERROR)
+ raise OSConfigException
+ ctxt = self.templates[config_file].context()
+
+ _tmpl = os.path.basename(config_file)
+ try:
+ template = self._get_template(_tmpl)
+ except exceptions.TemplateNotFound:
+ # if no template is found with basename, try looking for it
+ # using a munged full path, eg:
+ # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
+ _tmpl = '_'.join(config_file.split('/')[1:])
+ try:
+ template = self._get_template(_tmpl)
+ except exceptions.TemplateNotFound as e:
+ log('Could not load template from %s by %s or %s.' %
+ (self.templates_dir, os.path.basename(config_file), _tmpl),
+ level=ERROR)
+ raise e
+
+ log('Rendering from template: %s' % _tmpl, level=INFO)
+ return template.render(ctxt)
+
+ def write(self, config_file):
+ """
+ Write a single config file, raises if config file is not registered.
+ """
+ if config_file not in self.templates:
+ log('Config not registered: %s' % config_file, level=ERROR)
+ raise OSConfigException
+
+ _out = self.render(config_file)
+
+ with open(config_file, 'wb') as out:
+ out.write(_out)
+
+ log('Wrote template %s.' % config_file, level=INFO)
+
+ def write_all(self):
+ """
+ Write out all registered config files.
+ """
+ [self.write(k) for k in six.iterkeys(self.templates)]
+
+ def set_release(self, openstack_release):
+ """
+ Resets the template environment and generates a new template loader
+ based on a the new openstack release.
+ """
+ self._tmpl_env = None
+ self.openstack_release = openstack_release
+ self._get_tmpl_env()
+
+ def complete_contexts(self):
+ '''
+ Returns a list of context interfaces that yield a complete context.
+ '''
+ interfaces = []
+ [interfaces.extend(i.complete_contexts())
+ for i in six.itervalues(self.templates)]
+ return interfaces
+
+ def get_incomplete_context_data(self, interfaces):
+ '''
+ Return dictionary of relation status of interfaces and any missing
+ required context data. Example:
+ {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
+ 'zeromq-configuration': {'related': False}}
+ '''
+ incomplete_context_data = {}
+
+ for i in six.itervalues(self.templates):
+ for context in i.contexts:
+ for interface in interfaces:
+ related = False
+ if interface in context.interfaces:
+ related = context.get_related()
+ missing_data = context.missing_data
+ if missing_data:
+ incomplete_context_data[interface] = {'missing_data': missing_data}
+ if related:
+ if incomplete_context_data.get(interface):
+ incomplete_context_data[interface].update({'related': True})
+ else:
+ incomplete_context_data[interface] = {'related': True}
+ else:
+ incomplete_context_data[interface] = {'related': False}
+ return incomplete_context_data
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/utils.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/utils.py
new file mode 100644
index 0000000..115cc4b
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/utils.py
@@ -0,0 +1,1576 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+# Common python helper functions used for OpenStack charms.
+from collections import OrderedDict
+from functools import wraps
+
+import subprocess
+import json
+import os
+import sys
+import re
+import itertools
+import functools
+
+import six
+import tempfile
+import traceback
+import uuid
+import yaml
+
+from charmhelpers.contrib.network import ip
+
+from charmhelpers.core import (
+ unitdata,
+)
+
+from charmhelpers.core.hookenv import (
+ action_fail,
+ action_set,
+ config,
+ log as juju_log,
+ charm_dir,
+ DEBUG,
+ INFO,
+ related_units,
+ relation_ids,
+ relation_set,
+ status_set,
+ hook_name
+)
+
+from charmhelpers.contrib.storage.linux.lvm import (
+ deactivate_lvm_volume_group,
+ is_lvm_physical_volume,
+ remove_lvm_physical_volume,
+)
+
+from charmhelpers.contrib.network.ip import (
+ get_ipv6_addr,
+ is_ipv6,
+ port_has_listener,
+)
+
+from charmhelpers.contrib.python.packages import (
+ pip_create_virtualenv,
+ pip_install,
+)
+
+from charmhelpers.core.host import (
+ lsb_release,
+ mounts,
+ umount,
+ service_running,
+ service_pause,
+ service_resume,
+ restart_on_change_helper,
+)
+from charmhelpers.fetch import apt_install, apt_cache, install_remote
+from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
+from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
+
+CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
+CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
+
+DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
+ 'restricted main multiverse universe')
+
+UBUNTU_OPENSTACK_RELEASE = OrderedDict([
+ ('oneiric', 'diablo'),
+ ('precise', 'essex'),
+ ('quantal', 'folsom'),
+ ('raring', 'grizzly'),
+ ('saucy', 'havana'),
+ ('trusty', 'icehouse'),
+ ('utopic', 'juno'),
+ ('vivid', 'kilo'),
+ ('wily', 'liberty'),
+ ('xenial', 'mitaka'),
+])
+
+
+OPENSTACK_CODENAMES = OrderedDict([
+ ('2011.2', 'diablo'),
+ ('2012.1', 'essex'),
+ ('2012.2', 'folsom'),
+ ('2013.1', 'grizzly'),
+ ('2013.2', 'havana'),
+ ('2014.1', 'icehouse'),
+ ('2014.2', 'juno'),
+ ('2015.1', 'kilo'),
+ ('2015.2', 'liberty'),
+ ('2016.1', 'mitaka'),
+])
+
+# The ugly duckling - must list releases oldest to newest
+SWIFT_CODENAMES = OrderedDict([
+ ('diablo',
+ ['1.4.3']),
+ ('essex',
+ ['1.4.8']),
+ ('folsom',
+ ['1.7.4']),
+ ('grizzly',
+ ['1.7.6', '1.7.7', '1.8.0']),
+ ('havana',
+ ['1.9.0', '1.9.1', '1.10.0']),
+ ('icehouse',
+ ['1.11.0', '1.12.0', '1.13.0', '1.13.1']),
+ ('juno',
+ ['2.0.0', '2.1.0', '2.2.0']),
+ ('kilo',
+ ['2.2.1', '2.2.2']),
+ ('liberty',
+ ['2.3.0', '2.4.0', '2.5.0']),
+ ('mitaka',
+ ['2.5.0', '2.6.0', '2.7.0']),
+])
+
+# >= Liberty version->codename mapping
+PACKAGE_CODENAMES = {
+ 'nova-common': OrderedDict([
+ ('12.0', 'liberty'),
+ ('13.0', 'mitaka'),
+ ]),
+ 'neutron-common': OrderedDict([
+ ('7.0', 'liberty'),
+ ('8.0', 'mitaka'),
+ ('8.1', 'mitaka'),
+ ]),
+ 'cinder-common': OrderedDict([
+ ('7.0', 'liberty'),
+ ('8.0', 'mitaka'),
+ ]),
+ 'keystone': OrderedDict([
+ ('8.0', 'liberty'),
+ ('8.1', 'liberty'),
+ ('9.0', 'mitaka'),
+ ]),
+ 'horizon-common': OrderedDict([
+ ('8.0', 'liberty'),
+ ('9.0', 'mitaka'),
+ ]),
+ 'ceilometer-common': OrderedDict([
+ ('5.0', 'liberty'),
+ ('6.1', 'mitaka'),
+ ]),
+ 'heat-common': OrderedDict([
+ ('5.0', 'liberty'),
+ ('6.0', 'mitaka'),
+ ]),
+ 'glance-common': OrderedDict([
+ ('11.0', 'liberty'),
+ ('12.0', 'mitaka'),
+ ]),
+ 'openstack-dashboard': OrderedDict([
+ ('8.0', 'liberty'),
+ ('9.0', 'mitaka'),
+ ]),
+}
+
+DEFAULT_LOOPBACK_SIZE = '5G'
+
+
+def error_out(msg):
+ juju_log("FATAL ERROR: %s" % msg, level='ERROR')
+ sys.exit(1)
+
+
+def get_os_codename_install_source(src):
+ '''Derive OpenStack release codename from a given installation source.'''
+ ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
+ rel = ''
+ if src is None:
+ return rel
+ if src in ['distro', 'distro-proposed']:
+ try:
+ rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
+ except KeyError:
+ e = 'Could not derive openstack release for '\
+ 'this Ubuntu release: %s' % ubuntu_rel
+ error_out(e)
+ return rel
+
+ if src.startswith('cloud:'):
+ ca_rel = src.split(':')[1]
+ ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
+ return ca_rel
+
+ # Best guess match based on deb string provided
+ if src.startswith('deb') or src.startswith('ppa'):
+ for k, v in six.iteritems(OPENSTACK_CODENAMES):
+ if v in src:
+ return v
+
+
+def get_os_version_install_source(src):
+ codename = get_os_codename_install_source(src)
+ return get_os_version_codename(codename)
+
+
+def get_os_codename_version(vers):
+ '''Determine OpenStack codename from version number.'''
+ try:
+ return OPENSTACK_CODENAMES[vers]
+ except KeyError:
+ e = 'Could not determine OpenStack codename for version %s' % vers
+ error_out(e)
+
+
+def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
+ '''Determine OpenStack version number from codename.'''
+ for k, v in six.iteritems(version_map):
+ if v == codename:
+ return k
+ e = 'Could not derive OpenStack version for '\
+ 'codename: %s' % codename
+ error_out(e)
+
+
+def get_os_version_codename_swift(codename):
+ '''Determine OpenStack version number of swift from codename.'''
+ for k, v in six.iteritems(SWIFT_CODENAMES):
+ if k == codename:
+ return v[-1]
+ e = 'Could not derive swift version for '\
+ 'codename: %s' % codename
+ error_out(e)
+
+
+def get_swift_codename(version):
+ '''Determine OpenStack codename that corresponds to swift version.'''
+ codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
+ if len(codenames) > 1:
+ # If more than one release codename contains this version we determine
+ # the actual codename based on the highest available install source.
+ for codename in reversed(codenames):
+ releases = UBUNTU_OPENSTACK_RELEASE
+ release = [k for k, v in six.iteritems(releases) if codename in v]
+ ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
+ if codename in ret or release[0] in ret:
+ return codename
+ elif len(codenames) == 1:
+ return codenames[0]
+ return None
+
+
+def get_os_codename_package(package, fatal=True):
+ '''Derive OpenStack release codename from an installed package.'''
+ import apt_pkg as apt
+
+ cache = apt_cache()
+
+ try:
+ pkg = cache[package]
+ except:
+ if not fatal:
+ return None
+ # the package is unknown to the current apt cache.
+ e = 'Could not determine version of package with no installation '\
+ 'candidate: %s' % package
+ error_out(e)
+
+ if not pkg.current_ver:
+ if not fatal:
+ return None
+ # package is known, but no version is currently installed.
+ e = 'Could not determine version of uninstalled package: %s' % package
+ error_out(e)
+
+ vers = apt.upstream_version(pkg.current_ver.ver_str)
+ if 'swift' in pkg.name:
+ # Fully x.y.z match for swift versions
+ match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
+ else:
+ # x.y match only for 20XX.X
+ # and ignore patch level for other packages
+ match = re.match('^(\d+)\.(\d+)', vers)
+
+ if match:
+ vers = match.group(0)
+
+ # >= Liberty independent project versions
+ if (package in PACKAGE_CODENAMES and
+ vers in PACKAGE_CODENAMES[package]):
+ return PACKAGE_CODENAMES[package][vers]
+ else:
+ # < Liberty co-ordinated project versions
+ try:
+ if 'swift' in pkg.name:
+ return get_swift_codename(vers)
+ else:
+ return OPENSTACK_CODENAMES[vers]
+ except KeyError:
+ if not fatal:
+ return None
+ e = 'Could not determine OpenStack codename for version %s' % vers
+ error_out(e)
+
+
+def get_os_version_package(pkg, fatal=True):
+ '''Derive OpenStack version number from an installed package.'''
+ codename = get_os_codename_package(pkg, fatal=fatal)
+
+ if not codename:
+ return None
+
+ if 'swift' in pkg:
+ vers_map = SWIFT_CODENAMES
+ for cname, version in six.iteritems(vers_map):
+ if cname == codename:
+ return version[-1]
+ else:
+ vers_map = OPENSTACK_CODENAMES
+ for version, cname in six.iteritems(vers_map):
+ if cname == codename:
+ return version
+ # e = "Could not determine OpenStack version for package: %s" % pkg
+ # error_out(e)
+
+
+os_rel = None
+
+
+def os_release(package, base='essex'):
+ '''
+ Returns OpenStack release codename from a cached global.
+ If the codename can not be determined from either an installed package or
+ the installation source, the earliest release supported by the charm should
+ be returned.
+ '''
+ global os_rel
+ if os_rel:
+ return os_rel
+ os_rel = (get_os_codename_package(package, fatal=False) or
+ get_os_codename_install_source(config('openstack-origin')) or
+ base)
+ return os_rel
+
+
+def import_key(keyid):
+ key = keyid.strip()
+ if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and
+ key.endswith('-----END PGP PUBLIC KEY BLOCK-----')):
+ juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
+ juju_log("Importing ASCII Armor PGP key", level=DEBUG)
+ with tempfile.NamedTemporaryFile() as keyfile:
+ with open(keyfile.name, 'w') as fd:
+ fd.write(key)
+ fd.write("\n")
+
+ cmd = ['apt-key', 'add', keyfile.name]
+ try:
+ subprocess.check_call(cmd)
+ except subprocess.CalledProcessError:
+ error_out("Error importing PGP key '%s'" % key)
+ else:
+ juju_log("PGP key found (looks like Radix64 format)", level=DEBUG)
+ juju_log("Importing PGP key from keyserver", level=DEBUG)
+ cmd = ['apt-key', 'adv', '--keyserver',
+ 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
+ try:
+ subprocess.check_call(cmd)
+ except subprocess.CalledProcessError:
+ error_out("Error importing PGP key '%s'" % key)
+
+
+def get_source_and_pgp_key(input):
+ """Look for a pgp key ID or ascii-armor key in the given input."""
+ index = input.strip()
+ index = input.rfind('|')
+ if index < 0:
+ return input, None
+
+ key = input[index + 1:].strip('|')
+ source = input[:index]
+ return source, key
+
+
+def configure_installation_source(rel):
+ '''Configure apt installation source.'''
+ if rel == 'distro':
+ return
+ elif rel == 'distro-proposed':
+ ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
+ with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
+ f.write(DISTRO_PROPOSED % ubuntu_rel)
+ elif rel[:4] == "ppa:":
+ src, key = get_source_and_pgp_key(rel)
+ if key:
+ import_key(key)
+
+ subprocess.check_call(["add-apt-repository", "-y", src])
+ elif rel[:3] == "deb":
+ src, key = get_source_and_pgp_key(rel)
+ if key:
+ import_key(key)
+
+ with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
+ f.write(src)
+ elif rel[:6] == 'cloud:':
+ ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
+ rel = rel.split(':')[1]
+ u_rel = rel.split('-')[0]
+ ca_rel = rel.split('-')[1]
+
+ if u_rel != ubuntu_rel:
+ e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
+ 'version (%s)' % (ca_rel, ubuntu_rel)
+ error_out(e)
+
+ if 'staging' in ca_rel:
+ # staging is just a regular PPA.
+ os_rel = ca_rel.split('/')[0]
+ ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
+ cmd = 'add-apt-repository -y %s' % ppa
+ subprocess.check_call(cmd.split(' '))
+ return
+
+ # map charm config options to actual archive pockets.
+ pockets = {
+ 'folsom': 'precise-updates/folsom',
+ 'folsom/updates': 'precise-updates/folsom',
+ 'folsom/proposed': 'precise-proposed/folsom',
+ 'grizzly': 'precise-updates/grizzly',
+ 'grizzly/updates': 'precise-updates/grizzly',
+ 'grizzly/proposed': 'precise-proposed/grizzly',
+ 'havana': 'precise-updates/havana',
+ 'havana/updates': 'precise-updates/havana',
+ 'havana/proposed': 'precise-proposed/havana',
+ 'icehouse': 'precise-updates/icehouse',
+ 'icehouse/updates': 'precise-updates/icehouse',
+ 'icehouse/proposed': 'precise-proposed/icehouse',
+ 'juno': 'trusty-updates/juno',
+ 'juno/updates': 'trusty-updates/juno',
+ 'juno/proposed': 'trusty-proposed/juno',
+ 'kilo': 'trusty-updates/kilo',
+ 'kilo/updates': 'trusty-updates/kilo',
+ 'kilo/proposed': 'trusty-proposed/kilo',
+ 'liberty': 'trusty-updates/liberty',
+ 'liberty/updates': 'trusty-updates/liberty',
+ 'liberty/proposed': 'trusty-proposed/liberty',
+ 'mitaka': 'trusty-updates/mitaka',
+ 'mitaka/updates': 'trusty-updates/mitaka',
+ 'mitaka/proposed': 'trusty-proposed/mitaka',
+ }
+
+ try:
+ pocket = pockets[ca_rel]
+ except KeyError:
+ e = 'Invalid Cloud Archive release specified: %s' % rel
+ error_out(e)
+
+ src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
+ apt_install('ubuntu-cloud-keyring', fatal=True)
+
+ with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
+ f.write(src)
+ else:
+ error_out("Invalid openstack-release specified: %s" % rel)
+
+
+def config_value_changed(option):
+ """
+ Determine if config value changed since last call to this function.
+ """
+ hook_data = unitdata.HookData()
+ with hook_data():
+ db = unitdata.kv()
+ current = config(option)
+ saved = db.get(option)
+ db.set(option, current)
+ if saved is None:
+ return False
+ return current != saved
+
+
+def save_script_rc(script_path="scripts/scriptrc", **env_vars):
+ """
+ Write an rc file in the charm-delivered directory containing
+ exported environment variables provided by env_vars. Any charm scripts run
+ outside the juju hook environment can source this scriptrc to obtain
+ updated config information necessary to perform health checks or
+ service changes.
+ """
+ juju_rc_path = "%s/%s" % (charm_dir(), script_path)
+ if not os.path.exists(os.path.dirname(juju_rc_path)):
+ os.mkdir(os.path.dirname(juju_rc_path))
+ with open(juju_rc_path, 'wb') as rc_script:
+ rc_script.write(
+ "#!/bin/bash\n")
+ [rc_script.write('export %s=%s\n' % (u, p))
+ for u, p in six.iteritems(env_vars) if u != "script_path"]
+
+
+def openstack_upgrade_available(package):
+ """
+ Determines if an OpenStack upgrade is available from installation
+ source, based on version of installed package.
+
+ :param package: str: Name of installed package.
+
+ :returns: bool: : Returns True if configured installation source offers
+ a newer version of package.
+
+ """
+
+ import apt_pkg as apt
+ src = config('openstack-origin')
+ cur_vers = get_os_version_package(package)
+ if "swift" in package:
+ codename = get_os_codename_install_source(src)
+ avail_vers = get_os_version_codename_swift(codename)
+ else:
+ avail_vers = get_os_version_install_source(src)
+ apt.init()
+ if "swift" in package:
+ major_cur_vers = cur_vers.split('.', 1)[0]
+ major_avail_vers = avail_vers.split('.', 1)[0]
+ major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
+ return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
+ return apt.version_compare(avail_vers, cur_vers) == 1
+
+
+def ensure_block_device(block_device):
+ '''
+ Confirm block_device, create as loopback if necessary.
+
+ :param block_device: str: Full path of block device to ensure.
+
+ :returns: str: Full path of ensured block device.
+ '''
+ _none = ['None', 'none', None]
+ if (block_device in _none):
+ error_out('prepare_storage(): Missing required input: block_device=%s.'
+ % block_device)
+
+ if block_device.startswith('/dev/'):
+ bdev = block_device
+ elif block_device.startswith('/'):
+ _bd = block_device.split('|')
+ if len(_bd) == 2:
+ bdev, size = _bd
+ else:
+ bdev = block_device
+ size = DEFAULT_LOOPBACK_SIZE
+ bdev = ensure_loopback_device(bdev, size)
+ else:
+ bdev = '/dev/%s' % block_device
+
+ if not is_block_device(bdev):
+ error_out('Failed to locate valid block device at %s' % bdev)
+
+ return bdev
+
+
+def clean_storage(block_device):
+ '''
+ Ensures a block device is clean. That is:
+ - unmounted
+ - any lvm volume groups are deactivated
+ - any lvm physical device signatures removed
+ - partition table wiped
+
+ :param block_device: str: Full path to block device to clean.
+ '''
+ for mp, d in mounts():
+ if d == block_device:
+ juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
+ (d, mp), level=INFO)
+ umount(mp, persist=True)
+
+ if is_lvm_physical_volume(block_device):
+ deactivate_lvm_volume_group(block_device)
+ remove_lvm_physical_volume(block_device)
+ else:
+ zap_disk(block_device)
+
+is_ip = ip.is_ip
+ns_query = ip.ns_query
+get_host_ip = ip.get_host_ip
+get_hostname = ip.get_hostname
+
+
+def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
+ mm_map = {}
+ if os.path.isfile(mm_file):
+ with open(mm_file, 'r') as f:
+ mm_map = json.load(f)
+ return mm_map
+
+
+def sync_db_with_multi_ipv6_addresses(database, database_user,
+ relation_prefix=None):
+ hosts = get_ipv6_addr(dynamic_only=False)
+
+ if config('vip'):
+ vips = config('vip').split()
+ for vip in vips:
+ if vip and is_ipv6(vip):
+ hosts.append(vip)
+
+ kwargs = {'database': database,
+ 'username': database_user,
+ 'hostname': json.dumps(hosts)}
+
+ if relation_prefix:
+ for key in list(kwargs.keys()):
+ kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
+ del kwargs[key]
+
+ for rid in relation_ids('shared-db'):
+ relation_set(relation_id=rid, **kwargs)
+
+
+def os_requires_version(ostack_release, pkg):
+ """
+ Decorator for hook to specify minimum supported release
+ """
+ def wrap(f):
+ @wraps(f)
+ def wrapped_f(*args):
+ if os_release(pkg) < ostack_release:
+ raise Exception("This hook is not supported on releases"
+ " before %s" % ostack_release)
+ f(*args)
+ return wrapped_f
+ return wrap
+
+
+def git_install_requested():
+ """
+ Returns true if openstack-origin-git is specified.
+ """
+ return config('openstack-origin-git') is not None
+
+
+requirements_dir = None
+
+
+def _git_yaml_load(projects_yaml):
+ """
+ Load the specified yaml into a dictionary.
+ """
+ if not projects_yaml:
+ return None
+
+ return yaml.load(projects_yaml)
+
+
+def git_clone_and_install(projects_yaml, core_project):
+ """
+ Clone/install all specified OpenStack repositories.
+
+ The expected format of projects_yaml is:
+
+ repositories:
+ - {name: keystone,
+ repository: 'git://git.openstack.org/openstack/keystone.git',
+ branch: 'stable/icehouse'}
+ - {name: requirements,
+ repository: 'git://git.openstack.org/openstack/requirements.git',
+ branch: 'stable/icehouse'}
+
+ directory: /mnt/openstack-git
+ http_proxy: squid-proxy-url
+ https_proxy: squid-proxy-url
+
+ The directory, http_proxy, and https_proxy keys are optional.
+
+ """
+ global requirements_dir
+ parent_dir = '/mnt/openstack-git'
+ http_proxy = None
+
+ projects = _git_yaml_load(projects_yaml)
+ _git_validate_projects_yaml(projects, core_project)
+
+ old_environ = dict(os.environ)
+
+ if 'http_proxy' in projects.keys():
+ http_proxy = projects['http_proxy']
+ os.environ['http_proxy'] = projects['http_proxy']
+ if 'https_proxy' in projects.keys():
+ os.environ['https_proxy'] = projects['https_proxy']
+
+ if 'directory' in projects.keys():
+ parent_dir = projects['directory']
+
+ pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
+
+ # Upgrade setuptools and pip from default virtualenv versions. The default
+ # versions in trusty break master OpenStack branch deployments.
+ for p in ['pip', 'setuptools']:
+ pip_install(p, upgrade=True, proxy=http_proxy,
+ venv=os.path.join(parent_dir, 'venv'))
+
+ for p in projects['repositories']:
+ repo = p['repository']
+ branch = p['branch']
+ depth = '1'
+ if 'depth' in p.keys():
+ depth = p['depth']
+ if p['name'] == 'requirements':
+ repo_dir = _git_clone_and_install_single(repo, branch, depth,
+ parent_dir, http_proxy,
+ update_requirements=False)
+ requirements_dir = repo_dir
+ else:
+ repo_dir = _git_clone_and_install_single(repo, branch, depth,
+ parent_dir, http_proxy,
+ update_requirements=True)
+
+ os.environ = old_environ
+
+
+def _git_validate_projects_yaml(projects, core_project):
+ """
+ Validate the projects yaml.
+ """
+ _git_ensure_key_exists('repositories', projects)
+
+ for project in projects['repositories']:
+ _git_ensure_key_exists('name', project.keys())
+ _git_ensure_key_exists('repository', project.keys())
+ _git_ensure_key_exists('branch', project.keys())
+
+ if projects['repositories'][0]['name'] != 'requirements':
+ error_out('{} git repo must be specified first'.format('requirements'))
+
+ if projects['repositories'][-1]['name'] != core_project:
+ error_out('{} git repo must be specified last'.format(core_project))
+
+
+def _git_ensure_key_exists(key, keys):
+ """
+ Ensure that key exists in keys.
+ """
+ if key not in keys:
+ error_out('openstack-origin-git key \'{}\' is missing'.format(key))
+
+
+def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
+ update_requirements):
+ """
+ Clone and install a single git repository.
+ """
+ if not os.path.exists(parent_dir):
+ juju_log('Directory already exists at {}. '
+ 'No need to create directory.'.format(parent_dir))
+ os.mkdir(parent_dir)
+
+ juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
+ repo_dir = install_remote(
+ repo, dest=parent_dir, branch=branch, depth=depth)
+
+ venv = os.path.join(parent_dir, 'venv')
+
+ if update_requirements:
+ if not requirements_dir:
+ error_out('requirements repo must be cloned before '
+ 'updating from global requirements.')
+ _git_update_requirements(venv, repo_dir, requirements_dir)
+
+ juju_log('Installing git repo from dir: {}'.format(repo_dir))
+ if http_proxy:
+ pip_install(repo_dir, proxy=http_proxy, venv=venv)
+ else:
+ pip_install(repo_dir, venv=venv)
+
+ return repo_dir
+
+
+def _git_update_requirements(venv, package_dir, reqs_dir):
+ """
+ Update from global requirements.
+
+ Update an OpenStack git directory's requirements.txt and
+ test-requirements.txt from global-requirements.txt.
+ """
+ orig_dir = os.getcwd()
+ os.chdir(reqs_dir)
+ python = os.path.join(venv, 'bin/python')
+ cmd = [python, 'update.py', package_dir]
+ try:
+ subprocess.check_call(cmd)
+ except subprocess.CalledProcessError:
+ package = os.path.basename(package_dir)
+ error_out("Error updating {} from "
+ "global-requirements.txt".format(package))
+ os.chdir(orig_dir)
+
+
+def git_pip_venv_dir(projects_yaml):
+ """
+ Return the pip virtualenv path.
+ """
+ parent_dir = '/mnt/openstack-git'
+
+ projects = _git_yaml_load(projects_yaml)
+
+ if 'directory' in projects.keys():
+ parent_dir = projects['directory']
+
+ return os.path.join(parent_dir, 'venv')
+
+
+def git_src_dir(projects_yaml, project):
+ """
+ Return the directory where the specified project's source is located.
+ """
+ parent_dir = '/mnt/openstack-git'
+
+ projects = _git_yaml_load(projects_yaml)
+
+ if 'directory' in projects.keys():
+ parent_dir = projects['directory']
+
+ for p in projects['repositories']:
+ if p['name'] == project:
+ return os.path.join(parent_dir, os.path.basename(p['repository']))
+
+ return None
+
+
+def git_yaml_value(projects_yaml, key):
+ """
+ Return the value in projects_yaml for the specified key.
+ """
+ projects = _git_yaml_load(projects_yaml)
+
+ if key in projects.keys():
+ return projects[key]
+
+ return None
+
+
+def os_workload_status(configs, required_interfaces, charm_func=None):
+ """
+ Decorator to set workload status based on complete contexts
+ """
+ def wrap(f):
+ @wraps(f)
+ def wrapped_f(*args, **kwargs):
+ # Run the original function first
+ f(*args, **kwargs)
+ # Set workload status now that contexts have been
+ # acted on
+ set_os_workload_status(configs, required_interfaces, charm_func)
+ return wrapped_f
+ return wrap
+
+
+def set_os_workload_status(configs, required_interfaces, charm_func=None,
+ services=None, ports=None):
+ """Set the state of the workload status for the charm.
+
+ This calls _determine_os_workload_status() to get the new state, message
+ and sets the status using status_set()
+
+ @param configs: a templating.OSConfigRenderer() object
+ @param required_interfaces: {generic: [specific, specific2, ...]}
+ @param charm_func: a callable function that returns state, message. The
+ signature is charm_func(configs) -> (state, message)
+ @param services: list of strings OR dictionary specifying services/ports
+ @param ports: OPTIONAL list of port numbers.
+ @returns state, message: the new workload status, user message
+ """
+ state, message = _determine_os_workload_status(
+ configs, required_interfaces, charm_func, services, ports)
+ status_set(state, message)
+
+
+def _determine_os_workload_status(
+ configs, required_interfaces, charm_func=None,
+ services=None, ports=None):
+ """Determine the state of the workload status for the charm.
+
+ This function returns the new workload status for the charm based
+ on the state of the interfaces, the paused state and whether the
+ services are actually running and any specified ports are open.
+
+ This checks:
+
+ 1. if the unit should be paused, that it is actually paused. If so the
+ state is 'maintenance' + message, else 'broken'.
+ 2. that the interfaces/relations are complete. If they are not then
+ it sets the state to either 'broken' or 'waiting' and an appropriate
+ message.
+ 3. If all the relation data is set, then it checks that the actual
+ services really are running. If not it sets the state to 'broken'.
+
+ If everything is okay then the state returns 'active'.
+
+ @param configs: a templating.OSConfigRenderer() object
+ @param required_interfaces: {generic: [specific, specific2, ...]}
+ @param charm_func: a callable function that returns state, message. The
+ signature is charm_func(configs) -> (state, message)
+ @param services: list of strings OR dictionary specifying services/ports
+ @param ports: OPTIONAL list of port numbers.
+ @returns state, message: the new workload status, user message
+ """
+ state, message = _ows_check_if_paused(services, ports)
+
+ if state is None:
+ state, message = _ows_check_generic_interfaces(
+ configs, required_interfaces)
+
+ if state != 'maintenance' and charm_func:
+ # _ows_check_charm_func() may modify the state, message
+ state, message = _ows_check_charm_func(
+ state, message, lambda: charm_func(configs))
+
+ if state is None:
+ state, message = _ows_check_services_running(services, ports)
+
+ if state is None:
+ state = 'active'
+ message = "Unit is ready"
+ juju_log(message, 'INFO')
+
+ return state, message
+
+
+def _ows_check_if_paused(services=None, ports=None):
+ """Check if the unit is supposed to be paused, and if so check that the
+ services/ports (if passed) are actually stopped/not being listened to.
+
+ if the unit isn't supposed to be paused, just return None, None
+
+ @param services: OPTIONAL services spec or list of service names.
+ @param ports: OPTIONAL list of port numbers.
+ @returns state, message or None, None
+ """
+ if is_unit_paused_set():
+ state, message = check_actually_paused(services=services,
+ ports=ports)
+ if state is None:
+ # we're paused okay, so set maintenance and return
+ state = "maintenance"
+ message = "Paused. Use 'resume' action to resume normal service."
+ return state, message
+ return None, None
+
+
+def _ows_check_generic_interfaces(configs, required_interfaces):
+ """Check the complete contexts to determine the workload status.
+
+ - Checks for missing or incomplete contexts
+ - juju log details of missing required data.
+ - determines the correct workload status
+ - creates an appropriate message for status_set(...)
+
+ if there are no problems then the function returns None, None
+
+ @param configs: a templating.OSConfigRenderer() object
+ @params required_interfaces: {generic_interface: [specific_interface], }
+ @returns state, message or None, None
+ """
+ incomplete_rel_data = incomplete_relation_data(configs,
+ required_interfaces)
+ state = None
+ message = None
+ missing_relations = set()
+ incomplete_relations = set()
+
+ for generic_interface, relations_states in incomplete_rel_data.items():
+ related_interface = None
+ missing_data = {}
+ # Related or not?
+ for interface, relation_state in relations_states.items():
+ if relation_state.get('related'):
+ related_interface = interface
+ missing_data = relation_state.get('missing_data')
+ break
+ # No relation ID for the generic_interface?
+ if not related_interface:
+ juju_log("{} relation is missing and must be related for "
+ "functionality. ".format(generic_interface), 'WARN')
+ state = 'blocked'
+ missing_relations.add(generic_interface)
+ else:
+ # Relation ID eists but no related unit
+ if not missing_data:
+ # Edge case - relation ID exists but departings
+ _hook_name = hook_name()
+ if (('departed' in _hook_name or 'broken' in _hook_name) and
+ related_interface in _hook_name):
+ state = 'blocked'
+ missing_relations.add(generic_interface)
+ juju_log("{} relation's interface, {}, "
+ "relationship is departed or broken "
+ "and is required for functionality."
+ "".format(generic_interface, related_interface),
+ "WARN")
+ # Normal case relation ID exists but no related unit
+ # (joining)
+ else:
+ juju_log("{} relations's interface, {}, is related but has"
+ " no units in the relation."
+ "".format(generic_interface, related_interface),
+ "INFO")
+ # Related unit exists and data missing on the relation
+ else:
+ juju_log("{} relation's interface, {}, is related awaiting "
+ "the following data from the relationship: {}. "
+ "".format(generic_interface, related_interface,
+ ", ".join(missing_data)), "INFO")
+ if state != 'blocked':
+ state = 'waiting'
+ if generic_interface not in missing_relations:
+ incomplete_relations.add(generic_interface)
+
+ if missing_relations:
+ message = "Missing relations: {}".format(", ".join(missing_relations))
+ if incomplete_relations:
+ message += "; incomplete relations: {}" \
+ "".format(", ".join(incomplete_relations))
+ state = 'blocked'
+ elif incomplete_relations:
+ message = "Incomplete relations: {}" \
+ "".format(", ".join(incomplete_relations))
+ state = 'waiting'
+
+ return state, message
+
+
+def _ows_check_charm_func(state, message, charm_func_with_configs):
+ """Run a custom check function for the charm to see if it wants to
+ change the state. This is only run if not in 'maintenance' and
+ tests to see if the new state is more important that the previous
+ one determined by the interfaces/relations check.
+
+ @param state: the previously determined state so far.
+ @param message: the user orientated message so far.
+ @param charm_func: a callable function that returns state, message
+ @returns state, message strings.
+ """
+ if charm_func_with_configs:
+ charm_state, charm_message = charm_func_with_configs()
+ if charm_state != 'active' and charm_state != 'unknown':
+ state = workload_state_compare(state, charm_state)
+ if message:
+ charm_message = charm_message.replace("Incomplete relations: ",
+ "")
+ message = "{}, {}".format(message, charm_message)
+ else:
+ message = charm_message
+ return state, message
+
+
+def _ows_check_services_running(services, ports):
+ """Check that the services that should be running are actually running
+ and that any ports specified are being listened to.
+
+ @param services: list of strings OR dictionary specifying services/ports
+ @param ports: list of ports
+ @returns state, message: strings or None, None
+ """
+ messages = []
+ state = None
+ if services is not None:
+ services = _extract_services_list_helper(services)
+ services_running, running = _check_running_services(services)
+ if not all(running):
+ messages.append(
+ "Services not running that should be: {}"
+ .format(", ".join(_filter_tuples(services_running, False))))
+ state = 'blocked'
+ # also verify that the ports that should be open are open
+ # NB, that ServiceManager objects only OPTIONALLY have ports
+ map_not_open, ports_open = (
+ _check_listening_on_services_ports(services))
+ if not all(ports_open):
+ # find which service has missing ports. They are in service
+ # order which makes it a bit easier.
+ message_parts = {service: ", ".join([str(v) for v in open_ports])
+ for service, open_ports in map_not_open.items()}
+ message = ", ".join(
+ ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
+ messages.append(
+ "Services with ports not open that should be: {}"
+ .format(message))
+ state = 'blocked'
+
+ if ports is not None:
+ # and we can also check ports which we don't know the service for
+ ports_open, ports_open_bools = _check_listening_on_ports_list(ports)
+ if not all(ports_open_bools):
+ messages.append(
+ "Ports which should be open, but are not: {}"
+ .format(", ".join([str(p) for p, v in ports_open
+ if not v])))
+ state = 'blocked'
+
+ if state is not None:
+ message = "; ".join(messages)
+ return state, message
+
+ return None, None
+
+
+def _extract_services_list_helper(services):
+ """Extract a OrderedDict of {service: [ports]} of the supplied services
+ for use by the other functions.
+
+ The services object can either be:
+ - None : no services were passed (an empty dict is returned)
+ - a list of strings
+ - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
+ - An array of [{'service': service_name, ...}, ...]
+
+ @param services: see above
+ @returns OrderedDict(service: [ports], ...)
+ """
+ if services is None:
+ return {}
+ if isinstance(services, dict):
+ services = services.values()
+ # either extract the list of services from the dictionary, or if
+ # it is a simple string, use that. i.e. works with mixed lists.
+ _s = OrderedDict()
+ for s in services:
+ if isinstance(s, dict) and 'service' in s:
+ _s[s['service']] = s.get('ports', [])
+ if isinstance(s, str):
+ _s[s] = []
+ return _s
+
+
+def _check_running_services(services):
+ """Check that the services dict provided is actually running and provide
+ a list of (service, boolean) tuples for each service.
+
+ Returns both a zipped list of (service, boolean) and a list of booleans
+ in the same order as the services.
+
+ @param services: OrderedDict of strings: [ports], one for each service to
+ check.
+ @returns [(service, boolean), ...], : results for checks
+ [boolean] : just the result of the service checks
+ """
+ services_running = [service_running(s) for s in services]
+ return list(zip(services, services_running)), services_running
+
+
+def _check_listening_on_services_ports(services, test=False):
+ """Check that the unit is actually listening (has the port open) on the
+ ports that the service specifies are open. If test is True then the
+ function returns the services with ports that are open rather than
+ closed.
+
+ Returns an OrderedDict of service: ports and a list of booleans
+
+ @param services: OrderedDict(service: [port, ...], ...)
+ @param test: default=False, if False, test for closed, otherwise open.
+ @returns OrderedDict(service: [port-not-open, ...]...), [boolean]
+ """
+ test = not(not(test)) # ensure test is True or False
+ all_ports = list(itertools.chain(*services.values()))
+ ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
+ map_ports = OrderedDict()
+ matched_ports = [p for p, opened in zip(all_ports, ports_states)
+ if opened == test] # essentially opened xor test
+ for service, ports in services.items():
+ set_ports = set(ports).intersection(matched_ports)
+ if set_ports:
+ map_ports[service] = set_ports
+ return map_ports, ports_states
+
+
+def _check_listening_on_ports_list(ports):
+ """Check that the ports list given are being listened to
+
+ Returns a list of ports being listened to and a list of the
+ booleans.
+
+ @param ports: LIST or port numbers.
+ @returns [(port_num, boolean), ...], [boolean]
+ """
+ ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
+ return zip(ports, ports_open), ports_open
+
+
+def _filter_tuples(services_states, state):
+ """Return a simple list from a list of tuples according to the condition
+
+ @param services_states: LIST of (string, boolean): service and running
+ state.
+ @param state: Boolean to match the tuple against.
+ @returns [LIST of strings] that matched the tuple RHS.
+ """
+ return [s for s, b in services_states if b == state]
+
+
+def workload_state_compare(current_workload_state, workload_state):
+ """ Return highest priority of two states"""
+ hierarchy = {'unknown': -1,
+ 'active': 0,
+ 'maintenance': 1,
+ 'waiting': 2,
+ 'blocked': 3,
+ }
+
+ if hierarchy.get(workload_state) is None:
+ workload_state = 'unknown'
+ if hierarchy.get(current_workload_state) is None:
+ current_workload_state = 'unknown'
+
+ # Set workload_state based on hierarchy of statuses
+ if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
+ return current_workload_state
+ else:
+ return workload_state
+
+
+def incomplete_relation_data(configs, required_interfaces):
+ """Check complete contexts against required_interfaces
+ Return dictionary of incomplete relation data.
+
+ configs is an OSConfigRenderer object with configs registered
+
+ required_interfaces is a dictionary of required general interfaces
+ with dictionary values of possible specific interfaces.
+ Example:
+ required_interfaces = {'database': ['shared-db', 'pgsql-db']}
+
+ The interface is said to be satisfied if anyone of the interfaces in the
+ list has a complete context.
+
+ Return dictionary of incomplete or missing required contexts with relation
+ status of interfaces and any missing data points. Example:
+ {'message':
+ {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
+ 'zeromq-configuration': {'related': False}},
+ 'identity':
+ {'identity-service': {'related': False}},
+ 'database':
+ {'pgsql-db': {'related': False},
+ 'shared-db': {'related': True}}}
+ """
+ complete_ctxts = configs.complete_contexts()
+ incomplete_relations = [
+ svc_type
+ for svc_type, interfaces in required_interfaces.items()
+ if not set(interfaces).intersection(complete_ctxts)]
+ return {
+ i: configs.get_incomplete_context_data(required_interfaces[i])
+ for i in incomplete_relations}
+
+
+def do_action_openstack_upgrade(package, upgrade_callback, configs):
+ """Perform action-managed OpenStack upgrade.
+
+ Upgrades packages to the configured openstack-origin version and sets
+ the corresponding action status as a result.
+
+ If the charm was installed from source we cannot upgrade it.
+ For backwards compatibility a config flag (action-managed-upgrade) must
+ be set for this code to run, otherwise a full service level upgrade will
+ fire on config-changed.
+
+ @param package: package name for determining if upgrade available
+ @param upgrade_callback: function callback to charm's upgrade function
+ @param configs: templating object derived from OSConfigRenderer class
+
+ @return: True if upgrade successful; False if upgrade failed or skipped
+ """
+ ret = False
+
+ if git_install_requested():
+ action_set({'outcome': 'installed from source, skipped upgrade.'})
+ else:
+ if openstack_upgrade_available(package):
+ if config('action-managed-upgrade'):
+ juju_log('Upgrading OpenStack release')
+
+ try:
+ upgrade_callback(configs=configs)
+ action_set({'outcome': 'success, upgrade completed.'})
+ ret = True
+ except:
+ action_set({'outcome': 'upgrade failed, see traceback.'})
+ action_set({'traceback': traceback.format_exc()})
+ action_fail('do_openstack_upgrade resulted in an '
+ 'unexpected error')
+ else:
+ action_set({'outcome': 'action-managed-upgrade config is '
+ 'False, skipped upgrade.'})
+ else:
+ action_set({'outcome': 'no upgrade available.'})
+
+ return ret
+
+
+def remote_restart(rel_name, remote_service=None):
+ trigger = {
+ 'restart-trigger': str(uuid.uuid4()),
+ }
+ if remote_service:
+ trigger['remote-service'] = remote_service
+ for rid in relation_ids(rel_name):
+ # This subordinate can be related to two seperate services using
+ # different subordinate relations so only issue the restart if
+ # the principle is conencted down the relation we think it is
+ if related_units(relid=rid):
+ relation_set(relation_id=rid,
+ relation_settings=trigger,
+ )
+
+
+def check_actually_paused(services=None, ports=None):
+ """Check that services listed in the services object and and ports
+ are actually closed (not listened to), to verify that the unit is
+ properly paused.
+
+ @param services: See _extract_services_list_helper
+ @returns status, : string for status (None if okay)
+ message : string for problem for status_set
+ """
+ state = None
+ message = None
+ messages = []
+ if services is not None:
+ services = _extract_services_list_helper(services)
+ services_running, services_states = _check_running_services(services)
+ if any(services_states):
+ # there shouldn't be any running so this is a problem
+ messages.append("these services running: {}"
+ .format(", ".join(
+ _filter_tuples(services_running, True))))
+ state = "blocked"
+ ports_open, ports_open_bools = (
+ _check_listening_on_services_ports(services, True))
+ if any(ports_open_bools):
+ message_parts = {service: ", ".join([str(v) for v in open_ports])
+ for service, open_ports in ports_open.items()}
+ message = ", ".join(
+ ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
+ messages.append(
+ "these service:ports are open: {}".format(message))
+ state = 'blocked'
+ if ports is not None:
+ ports_open, bools = _check_listening_on_ports_list(ports)
+ if any(bools):
+ messages.append(
+ "these ports which should be closed, but are open: {}"
+ .format(", ".join([str(p) for p, v in ports_open if v])))
+ state = 'blocked'
+ if messages:
+ message = ("Services should be paused but {}"
+ .format(", ".join(messages)))
+ return state, message
+
+
+def set_unit_paused():
+ """Set the unit to a paused state in the local kv() store.
+ This does NOT actually pause the unit
+ """
+ with unitdata.HookData()() as t:
+ kv = t[0]
+ kv.set('unit-paused', True)
+
+
+def clear_unit_paused():
+ """Clear the unit from a paused state in the local kv() store
+ This does NOT actually restart any services - it only clears the
+ local state.
+ """
+ with unitdata.HookData()() as t:
+ kv = t[0]
+ kv.set('unit-paused', False)
+
+
+def is_unit_paused_set():
+ """Return the state of the kv().get('unit-paused').
+ This does NOT verify that the unit really is paused.
+
+ To help with units that don't have HookData() (testing)
+ if it excepts, return False
+ """
+ try:
+ with unitdata.HookData()() as t:
+ kv = t[0]
+ # transform something truth-y into a Boolean.
+ return not(not(kv.get('unit-paused')))
+ except:
+ return False
+
+
+def pause_unit(assess_status_func, services=None, ports=None,
+ charm_func=None):
+ """Pause a unit by stopping the services and setting 'unit-paused'
+ in the local kv() store.
+
+ Also checks that the services have stopped and ports are no longer
+ being listened to.
+
+ An optional charm_func() can be called that can either raise an
+ Exception or return non None, None to indicate that the unit
+ didn't pause cleanly.
+
+ The signature for charm_func is:
+ charm_func() -> message: string
+
+ charm_func() is executed after any services are stopped, if supplied.
+
+ The services object can either be:
+ - None : no services were passed (an empty dict is returned)
+ - a list of strings
+ - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
+ - An array of [{'service': service_name, ...}, ...]
+
+ @param assess_status_func: (f() -> message: string | None) or None
+ @param services: OPTIONAL see above
+ @param ports: OPTIONAL list of port
+ @param charm_func: function to run for custom charm pausing.
+ @returns None
+ @raises Exception(message) on an error for action_fail().
+ """
+ services = _extract_services_list_helper(services)
+ messages = []
+ if services:
+ for service in services.keys():
+ stopped = service_pause(service)
+ if not stopped:
+ messages.append("{} didn't stop cleanly.".format(service))
+ if charm_func:
+ try:
+ message = charm_func()
+ if message:
+ messages.append(message)
+ except Exception as e:
+ message.append(str(e))
+ set_unit_paused()
+ if assess_status_func:
+ message = assess_status_func()
+ if message:
+ messages.append(message)
+ if messages:
+ raise Exception("Couldn't pause: {}".format("; ".join(messages)))
+
+
+def resume_unit(assess_status_func, services=None, ports=None,
+ charm_func=None):
+ """Resume a unit by starting the services and clearning 'unit-paused'
+ in the local kv() store.
+
+ Also checks that the services have started and ports are being listened to.
+
+ An optional charm_func() can be called that can either raise an
+ Exception or return non None to indicate that the unit
+ didn't resume cleanly.
+
+ The signature for charm_func is:
+ charm_func() -> message: string
+
+ charm_func() is executed after any services are started, if supplied.
+
+ The services object can either be:
+ - None : no services were passed (an empty dict is returned)
+ - a list of strings
+ - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
+ - An array of [{'service': service_name, ...}, ...]
+
+ @param assess_status_func: (f() -> message: string | None) or None
+ @param services: OPTIONAL see above
+ @param ports: OPTIONAL list of port
+ @param charm_func: function to run for custom charm resuming.
+ @returns None
+ @raises Exception(message) on an error for action_fail().
+ """
+ services = _extract_services_list_helper(services)
+ messages = []
+ if services:
+ for service in services.keys():
+ started = service_resume(service)
+ if not started:
+ messages.append("{} didn't start cleanly.".format(service))
+ if charm_func:
+ try:
+ message = charm_func()
+ if message:
+ messages.append(message)
+ except Exception as e:
+ message.append(str(e))
+ clear_unit_paused()
+ if assess_status_func:
+ message = assess_status_func()
+ if message:
+ messages.append(message)
+ if messages:
+ raise Exception("Couldn't resume: {}".format("; ".join(messages)))
+
+
+def make_assess_status_func(*args, **kwargs):
+ """Creates an assess_status_func() suitable for handing to pause_unit()
+ and resume_unit().
+
+ This uses the _determine_os_workload_status(...) function to determine
+ what the workload_status should be for the unit. If the unit is
+ not in maintenance or active states, then the message is returned to
+ the caller. This is so an action that doesn't result in either a
+ complete pause or complete resume can signal failure with an action_fail()
+ """
+ def _assess_status_func():
+ state, message = _determine_os_workload_status(*args, **kwargs)
+ status_set(state, message)
+ if state not in ['maintenance', 'active']:
+ return message
+ return None
+
+ return _assess_status_func
+
+
+def pausable_restart_on_change(restart_map, stopstart=False,
+ restart_functions=None):
+ """A restart_on_change decorator that checks to see if the unit is
+ paused. If it is paused then the decorated function doesn't fire.
+
+ This is provided as a helper, as the @restart_on_change(...) decorator
+ is in core.host, yet the openstack specific helpers are in this file
+ (contrib.openstack.utils). Thus, this needs to be an optional feature
+ for openstack charms (or charms that wish to use the openstack
+ pause/resume type features).
+
+ It is used as follows:
+
+ from contrib.openstack.utils import (
+ pausable_restart_on_change as restart_on_change)
+
+ @restart_on_change(restart_map, stopstart=<boolean>)
+ def some_hook(...):
+ pass
+
+ see core.utils.restart_on_change() for more details.
+
+ @param f: the function to decorate
+ @param restart_map: the restart map {conf_file: [services]}
+ @param stopstart: DEFAULT false; whether to stop, start or just restart
+ @returns decorator to use a restart_on_change with pausability
+ """
+ def wrap(f):
+ @functools.wraps(f)
+ def wrapped_f(*args, **kwargs):
+ if is_unit_paused_set():
+ return f(*args, **kwargs)
+ # otherwise, normal restart_on_change functionality
+ return restart_on_change_helper(
+ (lambda: f(*args, **kwargs)), restart_map, stopstart,
+ restart_functions)
+ return wrapped_f
+ return wrap
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/peerstorage/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/peerstorage/__init__.py
new file mode 100644
index 0000000..eafca44
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/peerstorage/__init__.py
@@ -0,0 +1,269 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import json
+import six
+
+from charmhelpers.core.hookenv import relation_id as current_relation_id
+from charmhelpers.core.hookenv import (
+ is_relation_made,
+ relation_ids,
+ relation_get as _relation_get,
+ local_unit,
+ relation_set as _relation_set,
+ leader_get as _leader_get,
+ leader_set,
+ is_leader,
+)
+
+
+"""
+This helper provides functions to support use of a peer relation
+for basic key/value storage, with the added benefit that all storage
+can be replicated across peer units.
+
+Requirement to use:
+
+To use this, the "peer_echo()" method has to be called form the peer
+relation's relation-changed hook:
+
+@hooks.hook("cluster-relation-changed") # Adapt the to your peer relation name
+def cluster_relation_changed():
+ peer_echo()
+
+Once this is done, you can use peer storage from anywhere:
+
+@hooks.hook("some-hook")
+def some_hook():
+ # You can store and retrieve key/values this way:
+ if is_relation_made("cluster"): # from charmhelpers.core.hookenv
+ # There are peers available so we can work with peer storage
+ peer_store("mykey", "myvalue")
+ value = peer_retrieve("mykey")
+ print value
+ else:
+ print "No peers joind the relation, cannot share key/values :("
+"""
+
+
+def leader_get(attribute=None, rid=None):
+ """Wrapper to ensure that settings are migrated from the peer relation.
+
+ This is to support upgrading an environment that does not support
+ Juju leadership election to one that does.
+
+ If a setting is not extant in the leader-get but is on the relation-get
+ peer rel, it is migrated and marked as such so that it is not re-migrated.
+ """
+ migration_key = '__leader_get_migrated_settings__'
+ if not is_leader():
+ return _leader_get(attribute=attribute)
+
+ settings_migrated = False
+ leader_settings = _leader_get(attribute=attribute)
+ previously_migrated = _leader_get(attribute=migration_key)
+
+ if previously_migrated:
+ migrated = set(json.loads(previously_migrated))
+ else:
+ migrated = set([])
+
+ try:
+ if migration_key in leader_settings:
+ del leader_settings[migration_key]
+ except TypeError:
+ pass
+
+ if attribute:
+ if attribute in migrated:
+ return leader_settings
+
+ # If attribute not present in leader db, check if this unit has set
+ # the attribute in the peer relation
+ if not leader_settings:
+ peer_setting = _relation_get(attribute=attribute, unit=local_unit(),
+ rid=rid)
+ if peer_setting:
+ leader_set(settings={attribute: peer_setting})
+ leader_settings = peer_setting
+
+ if leader_settings:
+ settings_migrated = True
+ migrated.add(attribute)
+ else:
+ r_settings = _relation_get(unit=local_unit(), rid=rid)
+ if r_settings:
+ for key in set(r_settings.keys()).difference(migrated):
+ # Leader setting wins
+ if not leader_settings.get(key):
+ leader_settings[key] = r_settings[key]
+
+ settings_migrated = True
+ migrated.add(key)
+
+ if settings_migrated:
+ leader_set(**leader_settings)
+
+ if migrated and settings_migrated:
+ migrated = json.dumps(list(migrated))
+ leader_set(settings={migration_key: migrated})
+
+ return leader_settings
+
+
+def relation_set(relation_id=None, relation_settings=None, **kwargs):
+ """Attempt to use leader-set if supported in the current version of Juju,
+ otherwise falls back on relation-set.
+
+ Note that we only attempt to use leader-set if the provided relation_id is
+ a peer relation id or no relation id is provided (in which case we assume
+ we are within the peer relation context).
+ """
+ try:
+ if relation_id in relation_ids('cluster'):
+ return leader_set(settings=relation_settings, **kwargs)
+ else:
+ raise NotImplementedError
+ except NotImplementedError:
+ return _relation_set(relation_id=relation_id,
+ relation_settings=relation_settings, **kwargs)
+
+
+def relation_get(attribute=None, unit=None, rid=None):
+ """Attempt to use leader-get if supported in the current version of Juju,
+ otherwise falls back on relation-get.
+
+ Note that we only attempt to use leader-get if the provided rid is a peer
+ relation id or no relation id is provided (in which case we assume we are
+ within the peer relation context).
+ """
+ try:
+ if rid in relation_ids('cluster'):
+ return leader_get(attribute, rid)
+ else:
+ raise NotImplementedError
+ except NotImplementedError:
+ return _relation_get(attribute=attribute, rid=rid, unit=unit)
+
+
+def peer_retrieve(key, relation_name='cluster'):
+ """Retrieve a named key from peer relation `relation_name`."""
+ cluster_rels = relation_ids(relation_name)
+ if len(cluster_rels) > 0:
+ cluster_rid = cluster_rels[0]
+ return relation_get(attribute=key, rid=cluster_rid,
+ unit=local_unit())
+ else:
+ raise ValueError('Unable to detect'
+ 'peer relation {}'.format(relation_name))
+
+
+def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_',
+ inc_list=None, exc_list=None):
+ """ Retrieve k/v pairs given a prefix and filter using {inc,exc}_list """
+ inc_list = inc_list if inc_list else []
+ exc_list = exc_list if exc_list else []
+ peerdb_settings = peer_retrieve('-', relation_name=relation_name)
+ matched = {}
+ if peerdb_settings is None:
+ return matched
+ for k, v in peerdb_settings.items():
+ full_prefix = prefix + delimiter
+ if k.startswith(full_prefix):
+ new_key = k.replace(full_prefix, '')
+ if new_key in exc_list:
+ continue
+ if new_key in inc_list or len(inc_list) == 0:
+ matched[new_key] = v
+ return matched
+
+
+def peer_store(key, value, relation_name='cluster'):
+ """Store the key/value pair on the named peer relation `relation_name`."""
+ cluster_rels = relation_ids(relation_name)
+ if len(cluster_rels) > 0:
+ cluster_rid = cluster_rels[0]
+ relation_set(relation_id=cluster_rid,
+ relation_settings={key: value})
+ else:
+ raise ValueError('Unable to detect '
+ 'peer relation {}'.format(relation_name))
+
+
+def peer_echo(includes=None, force=False):
+ """Echo filtered attributes back onto the same relation for storage.
+
+ This is a requirement to use the peerstorage module - it needs to be called
+ from the peer relation's changed hook.
+
+ If Juju leader support exists this will be a noop unless force is True.
+ """
+ try:
+ is_leader()
+ except NotImplementedError:
+ pass
+ else:
+ if not force:
+ return # NOOP if leader-election is supported
+
+ # Use original non-leader calls
+ relation_get = _relation_get
+ relation_set = _relation_set
+
+ rdata = relation_get()
+ echo_data = {}
+ if includes is None:
+ echo_data = rdata.copy()
+ for ex in ['private-address', 'public-address']:
+ if ex in echo_data:
+ echo_data.pop(ex)
+ else:
+ for attribute, value in six.iteritems(rdata):
+ for include in includes:
+ if include in attribute:
+ echo_data[attribute] = value
+ if len(echo_data) > 0:
+ relation_set(relation_settings=echo_data)
+
+
+def peer_store_and_set(relation_id=None, peer_relation_name='cluster',
+ peer_store_fatal=False, relation_settings=None,
+ delimiter='_', **kwargs):
+ """Store passed-in arguments both in argument relation and in peer storage.
+
+ It functions like doing relation_set() and peer_store() at the same time,
+ with the same data.
+
+ @param relation_id: the id of the relation to store the data on. Defaults
+ to the current relation.
+ @param peer_store_fatal: Set to True, the function will raise an exception
+ should the peer sotrage not be avialable."""
+
+ relation_settings = relation_settings if relation_settings else {}
+ relation_set(relation_id=relation_id,
+ relation_settings=relation_settings,
+ **kwargs)
+ if is_relation_made(peer_relation_name):
+ for key, value in six.iteritems(dict(list(kwargs.items()) +
+ list(relation_settings.items()))):
+ key_prefix = relation_id or current_relation_id()
+ peer_store(key_prefix + delimiter + key,
+ value,
+ relation_name=peer_relation_name)
+ else:
+ if peer_store_fatal:
+ raise ValueError('Unable to detect '
+ 'peer relation {}'.format(peer_relation_name))
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/python/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/python/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/python/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/python/packages.py b/charms/trusty/ceilometer/charmhelpers/contrib/python/packages.py
new file mode 100644
index 0000000..a2411c3
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/python/packages.py
@@ -0,0 +1,145 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import subprocess
+import sys
+
+from charmhelpers.fetch import apt_install, apt_update
+from charmhelpers.core.hookenv import charm_dir, log
+
+__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
+
+
+def pip_execute(*args, **kwargs):
+ """Overriden pip_execute() to stop sys.path being changed.
+
+ The act of importing main from the pip module seems to cause add wheels
+ from the /usr/share/python-wheels which are installed by various tools.
+ This function ensures that sys.path remains the same after the call is
+ executed.
+ """
+ try:
+ _path = sys.path
+ try:
+ from pip import main as _pip_execute
+ except ImportError:
+ apt_update()
+ apt_install('python-pip')
+ from pip import main as _pip_execute
+ _pip_execute(*args, **kwargs)
+ finally:
+ sys.path = _path
+
+
+def parse_options(given, available):
+ """Given a set of options, check if available"""
+ for key, value in sorted(given.items()):
+ if not value:
+ continue
+ if key in available:
+ yield "--{0}={1}".format(key, value)
+
+
+def pip_install_requirements(requirements, constraints=None, **options):
+ """Install a requirements file.
+
+ :param constraints: Path to pip constraints file.
+ http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
+ """
+ command = ["install"]
+
+ available_options = ('proxy', 'src', 'log', )
+ for option in parse_options(options, available_options):
+ command.append(option)
+
+ command.append("-r {0}".format(requirements))
+ if constraints:
+ command.append("-c {0}".format(constraints))
+ log("Installing from file: {} with constraints {} "
+ "and options: {}".format(requirements, constraints, command))
+ else:
+ log("Installing from file: {} with options: {}".format(requirements,
+ command))
+ pip_execute(command)
+
+
+def pip_install(package, fatal=False, upgrade=False, venv=None, **options):
+ """Install a python package"""
+ if venv:
+ venv_python = os.path.join(venv, 'bin/pip')
+ command = [venv_python, "install"]
+ else:
+ command = ["install"]
+
+ available_options = ('proxy', 'src', 'log', 'index-url', )
+ for option in parse_options(options, available_options):
+ command.append(option)
+
+ if upgrade:
+ command.append('--upgrade')
+
+ if isinstance(package, list):
+ command.extend(package)
+ else:
+ command.append(package)
+
+ log("Installing {} package with options: {}".format(package,
+ command))
+ if venv:
+ subprocess.check_call(command)
+ else:
+ pip_execute(command)
+
+
+def pip_uninstall(package, **options):
+ """Uninstall a python package"""
+ command = ["uninstall", "-q", "-y"]
+
+ available_options = ('proxy', 'log', )
+ for option in parse_options(options, available_options):
+ command.append(option)
+
+ if isinstance(package, list):
+ command.extend(package)
+ else:
+ command.append(package)
+
+ log("Uninstalling {} package with options: {}".format(package,
+ command))
+ pip_execute(command)
+
+
+def pip_list():
+ """Returns the list of current python installed packages
+ """
+ return pip_execute(["list"])
+
+
+def pip_create_virtualenv(path=None):
+ """Create an isolated Python environment."""
+ apt_install('python-virtualenv')
+
+ if path:
+ venv_path = path
+ else:
+ venv_path = os.path.join(charm_dir(), 'venv')
+
+ if not os.path.exists(venv_path):
+ subprocess.check_call(['virtualenv', venv_path])
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/storage/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/storage/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/storage/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/ceph.py b/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/ceph.py
new file mode 100644
index 0000000..d008081
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/ceph.py
@@ -0,0 +1,1206 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+#
+# Copyright 2012 Canonical Ltd.
+#
+# This file is sourced from lp:openstack-charm-helpers
+#
+# Authors:
+# James Page <james.page@ubuntu.com>
+# Adam Gandelman <adamg@ubuntu.com>
+#
+import bisect
+import errno
+import hashlib
+import six
+
+import os
+import shutil
+import json
+import time
+import uuid
+
+from subprocess import (
+ check_call,
+ check_output,
+ CalledProcessError,
+)
+from charmhelpers.core.hookenv import (
+ local_unit,
+ relation_get,
+ relation_ids,
+ relation_set,
+ related_units,
+ log,
+ DEBUG,
+ INFO,
+ WARNING,
+ ERROR,
+)
+from charmhelpers.core.host import (
+ mount,
+ mounts,
+ service_start,
+ service_stop,
+ service_running,
+ umount,
+)
+from charmhelpers.fetch import (
+ apt_install,
+)
+
+from charmhelpers.core.kernel import modprobe
+
+KEYRING = '/etc/ceph/ceph.client.{}.keyring'
+KEYFILE = '/etc/ceph/ceph.client.{}.key'
+
+CEPH_CONF = """[global]
+auth supported = {auth}
+keyring = {keyring}
+mon host = {mon_hosts}
+log to syslog = {use_syslog}
+err to syslog = {use_syslog}
+clog to syslog = {use_syslog}
+"""
+# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs)
+powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608]
+
+
+def validator(value, valid_type, valid_range=None):
+ """
+ Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
+ Example input:
+ validator(value=1,
+ valid_type=int,
+ valid_range=[0, 2])
+ This says I'm testing value=1. It must be an int inclusive in [0,2]
+
+ :param value: The value to validate
+ :param valid_type: The type that value should be.
+ :param valid_range: A range of values that value can assume.
+ :return:
+ """
+ assert isinstance(value, valid_type), "{} is not a {}".format(
+ value,
+ valid_type)
+ if valid_range is not None:
+ assert isinstance(valid_range, list), \
+ "valid_range must be a list, was given {}".format(valid_range)
+ # If we're dealing with strings
+ if valid_type is six.string_types:
+ assert value in valid_range, \
+ "{} is not in the list {}".format(value, valid_range)
+ # Integer, float should have a min and max
+ else:
+ if len(valid_range) != 2:
+ raise ValueError(
+ "Invalid valid_range list of {} for {}. "
+ "List must be [min,max]".format(valid_range, value))
+ assert value >= valid_range[0], \
+ "{} is less than minimum allowed value of {}".format(
+ value, valid_range[0])
+ assert value <= valid_range[1], \
+ "{} is greater than maximum allowed value of {}".format(
+ value, valid_range[1])
+
+
+class PoolCreationError(Exception):
+ """
+ A custom error to inform the caller that a pool creation failed. Provides an error message
+ """
+
+ def __init__(self, message):
+ super(PoolCreationError, self).__init__(message)
+
+
+class Pool(object):
+ """
+ An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool.
+ Do not call create() on this base class as it will not do anything. Instantiate a child class and call create().
+ """
+
+ def __init__(self, service, name):
+ self.service = service
+ self.name = name
+
+ # Create the pool if it doesn't exist already
+ # To be implemented by subclasses
+ def create(self):
+ pass
+
+ def add_cache_tier(self, cache_pool, mode):
+ """
+ Adds a new cache tier to an existing pool.
+ :param cache_pool: six.string_types. The cache tier pool name to add.
+ :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"]
+ :return: None
+ """
+ # Check the input types and values
+ validator(value=cache_pool, valid_type=six.string_types)
+ validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"])
+
+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool])
+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode])
+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool])
+ check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom'])
+
+ def remove_cache_tier(self, cache_pool):
+ """
+ Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete.
+ :param cache_pool: six.string_types. The cache tier pool name to remove.
+ :return: None
+ """
+ # read-only is easy, writeback is much harder
+ mode = get_cache_mode(self.service, cache_pool)
+ version = ceph_version()
+ if mode == 'readonly':
+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
+
+ elif mode == 'writeback':
+ pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
+ 'cache-mode', cache_pool, 'forward']
+ if version >= '10.1':
+ # Jewel added a mandatory flag
+ pool_forward_cmd.append('--yes-i-really-mean-it')
+
+ check_call(pool_forward_cmd)
+ # Flush the cache and wait for it to return
+ check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
+
+ def get_pgs(self, pool_size):
+ """
+ :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for
+ erasure coded pools
+ :return: int. The number of pgs to use.
+ """
+ validator(value=pool_size, valid_type=int)
+ osd_list = get_osds(self.service)
+ if not osd_list:
+ # NOTE(james-page): Default to 200 for older ceph versions
+ # which don't support OSD query from cli
+ return 200
+
+ osd_list_length = len(osd_list)
+ # Calculate based on Ceph best practices
+ if osd_list_length < 5:
+ return 128
+ elif 5 < osd_list_length < 10:
+ return 512
+ elif 10 < osd_list_length < 50:
+ return 4096
+ else:
+ estimate = (osd_list_length * 100) / pool_size
+ # Return the next nearest power of 2
+ index = bisect.bisect_right(powers_of_two, estimate)
+ return powers_of_two[index]
+
+
+class ReplicatedPool(Pool):
+ def __init__(self, service, name, pg_num=None, replicas=2):
+ super(ReplicatedPool, self).__init__(service=service, name=name)
+ self.replicas = replicas
+ if pg_num is None:
+ self.pg_num = self.get_pgs(self.replicas)
+ else:
+ self.pg_num = pg_num
+
+ def create(self):
+ if not pool_exists(self.service, self.name):
+ # Create it
+ cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
+ self.name, str(self.pg_num)]
+ try:
+ check_call(cmd)
+ # Set the pool replica size
+ update_pool(client=self.service,
+ pool=self.name,
+ settings={'size': str(self.replicas)})
+ except CalledProcessError:
+ raise
+
+
+# Default jerasure erasure coded pool
+class ErasurePool(Pool):
+ def __init__(self, service, name, erasure_code_profile="default"):
+ super(ErasurePool, self).__init__(service=service, name=name)
+ self.erasure_code_profile = erasure_code_profile
+
+ def create(self):
+ if not pool_exists(self.service, self.name):
+ # Try to find the erasure profile information so we can properly size the pgs
+ erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile)
+
+ # Check for errors
+ if erasure_profile is None:
+ log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile),
+ level=ERROR)
+ raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile))
+ if 'k' not in erasure_profile or 'm' not in erasure_profile:
+ # Error
+ log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile),
+ level=ERROR)
+ raise PoolCreationError(
+ message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile))
+
+ pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m']))
+ # Create it
+ cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs),
+ 'erasure', self.erasure_code_profile]
+ try:
+ check_call(cmd)
+ except CalledProcessError:
+ raise
+
+ """Get an existing erasure code profile if it already exists.
+ Returns json formatted output"""
+
+
+def get_mon_map(service):
+ """
+ Returns the current monitor map.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :return: json string. :raise: ValueError if the monmap fails to parse.
+ Also raises CalledProcessError if our ceph command fails
+ """
+ try:
+ mon_status = check_output(
+ ['ceph', '--id', service,
+ 'mon_status', '--format=json'])
+ try:
+ return json.loads(mon_status)
+ except ValueError as v:
+ log("Unable to parse mon_status json: {}. Error: {}".format(
+ mon_status, v.message))
+ raise
+ except CalledProcessError as e:
+ log("mon_status command failed with message: {}".format(
+ e.message))
+ raise
+
+
+def hash_monitor_names(service):
+ """
+ Uses the get_mon_map() function to get information about the monitor
+ cluster.
+ Hash the name of each monitor. Return a sorted list of monitor hashes
+ in an ascending order.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :rtype : dict. json dict of monitor name, ip address and rank
+ example: {
+ 'name': 'ip-172-31-13-165',
+ 'rank': 0,
+ 'addr': '172.31.13.165:6789/0'}
+ """
+ try:
+ hash_list = []
+ monitor_list = get_mon_map(service=service)
+ if monitor_list['monmap']['mons']:
+ for mon in monitor_list['monmap']['mons']:
+ hash_list.append(
+ hashlib.sha224(mon['name'].encode('utf-8')).hexdigest())
+ return sorted(hash_list)
+ else:
+ return None
+ except (ValueError, CalledProcessError):
+ raise
+
+
+def monitor_key_delete(service, key):
+ """
+ Delete a key and value pair from the monitor cluster
+ :param service: six.string_types. The Ceph user name to run the command under
+ Deletes a key value pair on the monitor cluster.
+ :param key: six.string_types. The key to delete.
+ """
+ try:
+ check_output(
+ ['ceph', '--id', service,
+ 'config-key', 'del', str(key)])
+ except CalledProcessError as e:
+ log("Monitor config-key put failed with message: {}".format(
+ e.output))
+ raise
+
+
+def monitor_key_set(service, key, value):
+ """
+ Sets a key value pair on the monitor cluster.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param key: six.string_types. The key to set.
+ :param value: The value to set. This will be converted to a string
+ before setting
+ """
+ try:
+ check_output(
+ ['ceph', '--id', service,
+ 'config-key', 'put', str(key), str(value)])
+ except CalledProcessError as e:
+ log("Monitor config-key put failed with message: {}".format(
+ e.output))
+ raise
+
+
+def monitor_key_get(service, key):
+ """
+ Gets the value of an existing key in the monitor cluster.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param key: six.string_types. The key to search for.
+ :return: Returns the value of that key or None if not found.
+ """
+ try:
+ output = check_output(
+ ['ceph', '--id', service,
+ 'config-key', 'get', str(key)])
+ return output
+ except CalledProcessError as e:
+ log("Monitor config-key get failed with message: {}".format(
+ e.output))
+ return None
+
+
+def monitor_key_exists(service, key):
+ """
+ Searches for the existence of a key in the monitor cluster.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param key: six.string_types. The key to search for
+ :return: Returns True if the key exists, False if not and raises an
+ exception if an unknown error occurs. :raise: CalledProcessError if
+ an unknown error occurs
+ """
+ try:
+ check_call(
+ ['ceph', '--id', service,
+ 'config-key', 'exists', str(key)])
+ # I can return true here regardless because Ceph returns
+ # ENOENT if the key wasn't found
+ return True
+ except CalledProcessError as e:
+ if e.returncode == errno.ENOENT:
+ return False
+ else:
+ log("Unknown error from ceph config-get exists: {} {}".format(
+ e.returncode, e.output))
+ raise
+
+
+def get_erasure_profile(service, name):
+ """
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param name:
+ :return:
+ """
+ try:
+ out = check_output(['ceph', '--id', service,
+ 'osd', 'erasure-code-profile', 'get',
+ name, '--format=json'])
+ return json.loads(out)
+ except (CalledProcessError, OSError, ValueError):
+ return None
+
+
+def pool_set(service, pool_name, key, value):
+ """
+ Sets a value for a RADOS pool in ceph.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param pool_name: six.string_types
+ :param key: six.string_types
+ :param value:
+ :return: None. Can raise CalledProcessError
+ """
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
+ try:
+ check_call(cmd)
+ except CalledProcessError:
+ raise
+
+
+def snapshot_pool(service, pool_name, snapshot_name):
+ """
+ Snapshots a RADOS pool in ceph.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param pool_name: six.string_types
+ :param snapshot_name: six.string_types
+ :return: None. Can raise CalledProcessError
+ """
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name]
+ try:
+ check_call(cmd)
+ except CalledProcessError:
+ raise
+
+
+def remove_pool_snapshot(service, pool_name, snapshot_name):
+ """
+ Remove a snapshot from a RADOS pool in ceph.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param pool_name: six.string_types
+ :param snapshot_name: six.string_types
+ :return: None. Can raise CalledProcessError
+ """
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name]
+ try:
+ check_call(cmd)
+ except CalledProcessError:
+ raise
+
+
+# max_bytes should be an int or long
+def set_pool_quota(service, pool_name, max_bytes):
+ """
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param pool_name: six.string_types
+ :param max_bytes: int or long
+ :return: None. Can raise CalledProcessError
+ """
+ # Set a byte quota on a RADOS pool in ceph.
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
+ 'max_bytes', str(max_bytes)]
+ try:
+ check_call(cmd)
+ except CalledProcessError:
+ raise
+
+
+def remove_pool_quota(service, pool_name):
+ """
+ Set a byte quota on a RADOS pool in ceph.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param pool_name: six.string_types
+ :return: None. Can raise CalledProcessError
+ """
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0']
+ try:
+ check_call(cmd)
+ except CalledProcessError:
+ raise
+
+
+def remove_erasure_profile(service, profile_name):
+ """
+ Create a new erasure code profile if one does not already exist for it. Updates
+ the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
+ for more details
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param profile_name: six.string_types
+ :return: None. Can raise CalledProcessError
+ """
+ cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm',
+ profile_name]
+ try:
+ check_call(cmd)
+ except CalledProcessError:
+ raise
+
+
+def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
+ failure_domain='host',
+ data_chunks=2, coding_chunks=1,
+ locality=None, durability_estimator=None):
+ """
+ Create a new erasure code profile if one does not already exist for it. Updates
+ the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
+ for more details
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param profile_name: six.string_types
+ :param erasure_plugin_name: six.string_types
+ :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region',
+ 'room', 'root', 'row'])
+ :param data_chunks: int
+ :param coding_chunks: int
+ :param locality: int
+ :param durability_estimator: int
+ :return: None. Can raise CalledProcessError
+ """
+ # Ensure this failure_domain is allowed by Ceph
+ validator(failure_domain, six.string_types,
+ ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
+
+ cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
+ 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
+ 'ruleset_failure_domain=' + failure_domain]
+ if locality is not None and durability_estimator is not None:
+ raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
+
+ # Add plugin specific information
+ if locality is not None:
+ # For local erasure codes
+ cmd.append('l=' + str(locality))
+ if durability_estimator is not None:
+ # For Shec erasure codes
+ cmd.append('c=' + str(durability_estimator))
+
+ if erasure_profile_exists(service, profile_name):
+ cmd.append('--force')
+
+ try:
+ check_call(cmd)
+ except CalledProcessError:
+ raise
+
+
+def rename_pool(service, old_name, new_name):
+ """
+ Rename a Ceph pool from old_name to new_name
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param old_name: six.string_types
+ :param new_name: six.string_types
+ :return: None
+ """
+ validator(value=old_name, valid_type=six.string_types)
+ validator(value=new_name, valid_type=six.string_types)
+
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name]
+ check_call(cmd)
+
+
+def erasure_profile_exists(service, name):
+ """
+ Check to see if an Erasure code profile already exists.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param name: six.string_types
+ :return: int or None
+ """
+ validator(value=name, valid_type=six.string_types)
+ try:
+ check_call(['ceph', '--id', service,
+ 'osd', 'erasure-code-profile', 'get',
+ name])
+ return True
+ except CalledProcessError:
+ return False
+
+
+def get_cache_mode(service, pool_name):
+ """
+ Find the current caching mode of the pool_name given.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param pool_name: six.string_types
+ :return: int or None
+ """
+ validator(value=service, valid_type=six.string_types)
+ validator(value=pool_name, valid_type=six.string_types)
+ out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json'])
+ try:
+ osd_json = json.loads(out)
+ for pool in osd_json['pools']:
+ if pool['pool_name'] == pool_name:
+ return pool['cache_mode']
+ return None
+ except ValueError:
+ raise
+
+
+def pool_exists(service, name):
+ """Check to see if a RADOS pool already exists."""
+ try:
+ out = check_output(['rados', '--id', service,
+ 'lspools']).decode('UTF-8')
+ except CalledProcessError:
+ return False
+
+ return name in out.split()
+
+
+def get_osds(service):
+ """Return a list of all Ceph Object Storage Daemons currently in the
+ cluster.
+ """
+ version = ceph_version()
+ if version and version >= '0.56':
+ return json.loads(check_output(['ceph', '--id', service,
+ 'osd', 'ls',
+ '--format=json']).decode('UTF-8'))
+
+ return None
+
+
+def install():
+ """Basic Ceph client installation."""
+ ceph_dir = "/etc/ceph"
+ if not os.path.exists(ceph_dir):
+ os.mkdir(ceph_dir)
+
+ apt_install('ceph-common', fatal=True)
+
+
+def rbd_exists(service, pool, rbd_img):
+ """Check to see if a RADOS block device exists."""
+ try:
+ out = check_output(['rbd', 'list', '--id',
+ service, '--pool', pool]).decode('UTF-8')
+ except CalledProcessError:
+ return False
+
+ return rbd_img in out
+
+
+def create_rbd_image(service, pool, image, sizemb):
+ """Create a new RADOS block device."""
+ cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
+ '--pool', pool]
+ check_call(cmd)
+
+
+def update_pool(client, pool, settings):
+ cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
+ for k, v in six.iteritems(settings):
+ cmd.append(k)
+ cmd.append(v)
+
+ check_call(cmd)
+
+
+def create_pool(service, name, replicas=3, pg_num=None):
+ """Create a new RADOS pool."""
+ if pool_exists(service, name):
+ log("Ceph pool {} already exists, skipping creation".format(name),
+ level=WARNING)
+ return
+
+ if not pg_num:
+ # Calculate the number of placement groups based
+ # on upstream recommended best practices.
+ osds = get_osds(service)
+ if osds:
+ pg_num = (len(osds) * 100 // replicas)
+ else:
+ # NOTE(james-page): Default to 200 for older ceph versions
+ # which don't support OSD query from cli
+ pg_num = 200
+
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)]
+ check_call(cmd)
+
+ update_pool(service, name, settings={'size': str(replicas)})
+
+
+def delete_pool(service, name):
+ """Delete a RADOS pool from ceph."""
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
+ '--yes-i-really-really-mean-it']
+ check_call(cmd)
+
+
+def _keyfile_path(service):
+ return KEYFILE.format(service)
+
+
+def _keyring_path(service):
+ return KEYRING.format(service)
+
+
+def create_keyring(service, key):
+ """Create a new Ceph keyring containing key."""
+ keyring = _keyring_path(service)
+ if os.path.exists(keyring):
+ log('Ceph keyring exists at %s.' % keyring, level=WARNING)
+ return
+
+ cmd = ['ceph-authtool', keyring, '--create-keyring',
+ '--name=client.{}'.format(service), '--add-key={}'.format(key)]
+ check_call(cmd)
+ log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
+
+
+def delete_keyring(service):
+ """Delete an existing Ceph keyring."""
+ keyring = _keyring_path(service)
+ if not os.path.exists(keyring):
+ log('Keyring does not exist at %s' % keyring, level=WARNING)
+ return
+
+ os.remove(keyring)
+ log('Deleted ring at %s.' % keyring, level=INFO)
+
+
+def create_key_file(service, key):
+ """Create a file containing key."""
+ keyfile = _keyfile_path(service)
+ if os.path.exists(keyfile):
+ log('Keyfile exists at %s.' % keyfile, level=WARNING)
+ return
+
+ with open(keyfile, 'w') as fd:
+ fd.write(key)
+
+ log('Created new keyfile at %s.' % keyfile, level=INFO)
+
+
+def get_ceph_nodes(relation='ceph'):
+ """Query named relation to determine current nodes."""
+ hosts = []
+ for r_id in relation_ids(relation):
+ for unit in related_units(r_id):
+ hosts.append(relation_get('private-address', unit=unit, rid=r_id))
+
+ return hosts
+
+
+def configure(service, key, auth, use_syslog):
+ """Perform basic configuration of Ceph."""
+ create_keyring(service, key)
+ create_key_file(service, key)
+ hosts = get_ceph_nodes()
+ with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
+ ceph_conf.write(CEPH_CONF.format(auth=auth,
+ keyring=_keyring_path(service),
+ mon_hosts=",".join(map(str, hosts)),
+ use_syslog=use_syslog))
+ modprobe('rbd')
+
+
+def image_mapped(name):
+ """Determine whether a RADOS block device is mapped locally."""
+ try:
+ out = check_output(['rbd', 'showmapped']).decode('UTF-8')
+ except CalledProcessError:
+ return False
+
+ return name in out
+
+
+def map_block_storage(service, pool, image):
+ """Map a RADOS block device for local use."""
+ cmd = [
+ 'rbd',
+ 'map',
+ '{}/{}'.format(pool, image),
+ '--user',
+ service,
+ '--secret',
+ _keyfile_path(service),
+ ]
+ check_call(cmd)
+
+
+def filesystem_mounted(fs):
+ """Determine whether a filesytems is already mounted."""
+ return fs in [f for f, m in mounts()]
+
+
+def make_filesystem(blk_device, fstype='ext4', timeout=10):
+ """Make a new filesystem on the specified block device."""
+ count = 0
+ e_noent = os.errno.ENOENT
+ while not os.path.exists(blk_device):
+ if count >= timeout:
+ log('Gave up waiting on block device %s' % blk_device,
+ level=ERROR)
+ raise IOError(e_noent, os.strerror(e_noent), blk_device)
+
+ log('Waiting for block device %s to appear' % blk_device,
+ level=DEBUG)
+ count += 1
+ time.sleep(1)
+ else:
+ log('Formatting block device %s as filesystem %s.' %
+ (blk_device, fstype), level=INFO)
+ check_call(['mkfs', '-t', fstype, blk_device])
+
+
+def place_data_on_block_device(blk_device, data_src_dst):
+ """Migrate data in data_src_dst to blk_device and then remount."""
+ # mount block device into /mnt
+ mount(blk_device, '/mnt')
+ # copy data to /mnt
+ copy_files(data_src_dst, '/mnt')
+ # umount block device
+ umount('/mnt')
+ # Grab user/group ID's from original source
+ _dir = os.stat(data_src_dst)
+ uid = _dir.st_uid
+ gid = _dir.st_gid
+ # re-mount where the data should originally be
+ # TODO: persist is currently a NO-OP in core.host
+ mount(blk_device, data_src_dst, persist=True)
+ # ensure original ownership of new mount.
+ os.chown(data_src_dst, uid, gid)
+
+
+def copy_files(src, dst, symlinks=False, ignore=None):
+ """Copy files from src to dst."""
+ for item in os.listdir(src):
+ s = os.path.join(src, item)
+ d = os.path.join(dst, item)
+ if os.path.isdir(s):
+ shutil.copytree(s, d, symlinks, ignore)
+ else:
+ shutil.copy2(s, d)
+
+
+def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
+ blk_device, fstype, system_services=[],
+ replicas=3):
+ """NOTE: This function must only be called from a single service unit for
+ the same rbd_img otherwise data loss will occur.
+
+ Ensures given pool and RBD image exists, is mapped to a block device,
+ and the device is formatted and mounted at the given mount_point.
+
+ If formatting a device for the first time, data existing at mount_point
+ will be migrated to the RBD device before being re-mounted.
+
+ All services listed in system_services will be stopped prior to data
+ migration and restarted when complete.
+ """
+ # Ensure pool, RBD image, RBD mappings are in place.
+ if not pool_exists(service, pool):
+ log('Creating new pool {}.'.format(pool), level=INFO)
+ create_pool(service, pool, replicas=replicas)
+
+ if not rbd_exists(service, pool, rbd_img):
+ log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
+ create_rbd_image(service, pool, rbd_img, sizemb)
+
+ if not image_mapped(rbd_img):
+ log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
+ level=INFO)
+ map_block_storage(service, pool, rbd_img)
+
+ # make file system
+ # TODO: What happens if for whatever reason this is run again and
+ # the data is already in the rbd device and/or is mounted??
+ # When it is mounted already, it will fail to make the fs
+ # XXX: This is really sketchy! Need to at least add an fstab entry
+ # otherwise this hook will blow away existing data if its executed
+ # after a reboot.
+ if not filesystem_mounted(mount_point):
+ make_filesystem(blk_device, fstype)
+
+ for svc in system_services:
+ if service_running(svc):
+ log('Stopping services {} prior to migrating data.'
+ .format(svc), level=DEBUG)
+ service_stop(svc)
+
+ place_data_on_block_device(blk_device, mount_point)
+
+ for svc in system_services:
+ log('Starting service {} after migrating data.'
+ .format(svc), level=DEBUG)
+ service_start(svc)
+
+
+def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'):
+ """Ensures a ceph keyring is created for a named service and optionally
+ ensures user and group ownership.
+
+ Returns False if no ceph key is available in relation state.
+ """
+ key = None
+ for rid in relation_ids(relation):
+ for unit in related_units(rid):
+ key = relation_get('key', rid=rid, unit=unit)
+ if key:
+ break
+
+ if not key:
+ return False
+
+ create_keyring(service=service, key=key)
+ keyring = _keyring_path(service)
+ if user and group:
+ check_call(['chown', '%s.%s' % (user, group), keyring])
+
+ return True
+
+
+def ceph_version():
+ """Retrieve the local version of ceph."""
+ if os.path.exists('/usr/bin/ceph'):
+ cmd = ['ceph', '-v']
+ output = check_output(cmd).decode('US-ASCII')
+ output = output.split()
+ if len(output) > 3:
+ return output[2]
+ else:
+ return None
+ else:
+ return None
+
+
+class CephBrokerRq(object):
+ """Ceph broker request.
+
+ Multiple operations can be added to a request and sent to the Ceph broker
+ to be executed.
+
+ Request is json-encoded for sending over the wire.
+
+ The API is versioned and defaults to version 1.
+ """
+
+ def __init__(self, api_version=1, request_id=None):
+ self.api_version = api_version
+ if request_id:
+ self.request_id = request_id
+ else:
+ self.request_id = str(uuid.uuid1())
+ self.ops = []
+
+ def add_op_create_pool(self, name, replica_count=3, pg_num=None):
+ """Adds an operation to create a pool.
+
+ @param pg_num setting: optional setting. If not provided, this value
+ will be calculated by the broker based on how many OSDs are in the
+ cluster at the time of creation. Note that, if provided, this value
+ will be capped at the current available maximum.
+ """
+ self.ops.append({'op': 'create-pool', 'name': name,
+ 'replicas': replica_count, 'pg_num': pg_num})
+
+ def set_ops(self, ops):
+ """Set request ops to provided value.
+
+ Useful for injecting ops that come from a previous request
+ to allow comparisons to ensure validity.
+ """
+ self.ops = ops
+
+ @property
+ def request(self):
+ return json.dumps({'api-version': self.api_version, 'ops': self.ops,
+ 'request-id': self.request_id})
+
+ def _ops_equal(self, other):
+ if len(self.ops) == len(other.ops):
+ for req_no in range(0, len(self.ops)):
+ for key in ['replicas', 'name', 'op', 'pg_num']:
+ if self.ops[req_no].get(key) != other.ops[req_no].get(key):
+ return False
+ else:
+ return False
+ return True
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return False
+ if self.api_version == other.api_version and \
+ self._ops_equal(other):
+ return True
+ else:
+ return False
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+
+class CephBrokerRsp(object):
+ """Ceph broker response.
+
+ Response is json-decoded and contents provided as methods/properties.
+
+ The API is versioned and defaults to version 1.
+ """
+
+ def __init__(self, encoded_rsp):
+ self.api_version = None
+ self.rsp = json.loads(encoded_rsp)
+
+ @property
+ def request_id(self):
+ return self.rsp.get('request-id')
+
+ @property
+ def exit_code(self):
+ return self.rsp.get('exit-code')
+
+ @property
+ def exit_msg(self):
+ return self.rsp.get('stderr')
+
+
+# Ceph Broker Conversation:
+# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
+# and send that request to ceph via the ceph relation. The CephBrokerRq has a
+# unique id so that the client can identity which CephBrokerRsp is associated
+# with the request. Ceph will also respond to each client unit individually
+# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
+# via key broker-rsp-glance-0
+#
+# To use this the charm can just do something like:
+#
+# from charmhelpers.contrib.storage.linux.ceph import (
+# send_request_if_needed,
+# is_request_complete,
+# CephBrokerRq,
+# )
+#
+# @hooks.hook('ceph-relation-changed')
+# def ceph_changed():
+# rq = CephBrokerRq()
+# rq.add_op_create_pool(name='poolname', replica_count=3)
+#
+# if is_request_complete(rq):
+# <Request complete actions>
+# else:
+# send_request_if_needed(get_ceph_request())
+#
+# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
+# of glance having sent a request to ceph which ceph has successfully processed
+# 'ceph:8': {
+# 'ceph/0': {
+# 'auth': 'cephx',
+# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
+# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
+# 'ceph-public-address': '10.5.44.103',
+# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
+# 'private-address': '10.5.44.103',
+# },
+# 'glance/0': {
+# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
+# '"ops": [{"replicas": 3, "name": "glance", '
+# '"op": "create-pool"}]}'),
+# 'private-address': '10.5.44.109',
+# },
+# }
+
+def get_previous_request(rid):
+ """Return the last ceph broker request sent on a given relation
+
+ @param rid: Relation id to query for request
+ """
+ request = None
+ broker_req = relation_get(attribute='broker_req', rid=rid,
+ unit=local_unit())
+ if broker_req:
+ request_data = json.loads(broker_req)
+ request = CephBrokerRq(api_version=request_data['api-version'],
+ request_id=request_data['request-id'])
+ request.set_ops(request_data['ops'])
+
+ return request
+
+
+def get_request_states(request, relation='ceph'):
+ """Return a dict of requests per relation id with their corresponding
+ completion state.
+
+ This allows a charm, which has a request for ceph, to see whether there is
+ an equivalent request already being processed and if so what state that
+ request is in.
+
+ @param request: A CephBrokerRq object
+ """
+ complete = []
+ requests = {}
+ for rid in relation_ids(relation):
+ complete = False
+ previous_request = get_previous_request(rid)
+ if request == previous_request:
+ sent = True
+ complete = is_request_complete_for_rid(previous_request, rid)
+ else:
+ sent = False
+ complete = False
+
+ requests[rid] = {
+ 'sent': sent,
+ 'complete': complete,
+ }
+
+ return requests
+
+
+def is_request_sent(request, relation='ceph'):
+ """Check to see if a functionally equivalent request has already been sent
+
+ Returns True if a similair request has been sent
+
+ @param request: A CephBrokerRq object
+ """
+ states = get_request_states(request, relation=relation)
+ for rid in states.keys():
+ if not states[rid]['sent']:
+ return False
+
+ return True
+
+
+def is_request_complete(request, relation='ceph'):
+ """Check to see if a functionally equivalent request has already been
+ completed
+
+ Returns True if a similair request has been completed
+
+ @param request: A CephBrokerRq object
+ """
+ states = get_request_states(request, relation=relation)
+ for rid in states.keys():
+ if not states[rid]['complete']:
+ return False
+
+ return True
+
+
+def is_request_complete_for_rid(request, rid):
+ """Check if a given request has been completed on the given relation
+
+ @param request: A CephBrokerRq object
+ @param rid: Relation ID
+ """
+ broker_key = get_broker_rsp_key()
+ for unit in related_units(rid):
+ rdata = relation_get(rid=rid, unit=unit)
+ if rdata.get(broker_key):
+ rsp = CephBrokerRsp(rdata.get(broker_key))
+ if rsp.request_id == request.request_id:
+ if not rsp.exit_code:
+ return True
+ else:
+ # The remote unit sent no reply targeted at this unit so either the
+ # remote ceph cluster does not support unit targeted replies or it
+ # has not processed our request yet.
+ if rdata.get('broker_rsp'):
+ request_data = json.loads(rdata['broker_rsp'])
+ if request_data.get('request-id'):
+ log('Ignoring legacy broker_rsp without unit key as remote '
+ 'service supports unit specific replies', level=DEBUG)
+ else:
+ log('Using legacy broker_rsp as remote service does not '
+ 'supports unit specific replies', level=DEBUG)
+ rsp = CephBrokerRsp(rdata['broker_rsp'])
+ if not rsp.exit_code:
+ return True
+
+ return False
+
+
+def get_broker_rsp_key():
+ """Return broker response key for this unit
+
+ This is the key that ceph is going to use to pass request status
+ information back to this unit
+ """
+ return 'broker-rsp-' + local_unit().replace('/', '-')
+
+
+def send_request_if_needed(request, relation='ceph'):
+ """Send broker request if an equivalent request has not already been sent
+
+ @param request: A CephBrokerRq object
+ """
+ if is_request_sent(request, relation=relation):
+ log('Request already sent but not complete, not sending new request',
+ level=DEBUG)
+ else:
+ for rid in relation_ids(relation):
+ log('Sending request {}'.format(request.request_id), level=DEBUG)
+ relation_set(relation_id=rid, broker_req=request.request)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/loopback.py b/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/loopback.py
new file mode 100644
index 0000000..3a3f514
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/loopback.py
@@ -0,0 +1,88 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import re
+from subprocess import (
+ check_call,
+ check_output,
+)
+
+import six
+
+
+##################################################
+# loopback device helpers.
+##################################################
+def loopback_devices():
+ '''
+ Parse through 'losetup -a' output to determine currently mapped
+ loopback devices. Output is expected to look like:
+
+ /dev/loop0: [0807]:961814 (/tmp/my.img)
+
+ :returns: dict: a dict mapping {loopback_dev: backing_file}
+ '''
+ loopbacks = {}
+ cmd = ['losetup', '-a']
+ devs = [d.strip().split(' ') for d in
+ check_output(cmd).splitlines() if d != '']
+ for dev, _, f in devs:
+ loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
+ return loopbacks
+
+
+def create_loopback(file_path):
+ '''
+ Create a loopback device for a given backing file.
+
+ :returns: str: Full path to new loopback device (eg, /dev/loop0)
+ '''
+ file_path = os.path.abspath(file_path)
+ check_call(['losetup', '--find', file_path])
+ for d, f in six.iteritems(loopback_devices()):
+ if f == file_path:
+ return d
+
+
+def ensure_loopback_device(path, size):
+ '''
+ Ensure a loopback device exists for a given backing file path and size.
+ If it a loopback device is not mapped to file, a new one will be created.
+
+ TODO: Confirm size of found loopback device.
+
+ :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
+ '''
+ for d, f in six.iteritems(loopback_devices()):
+ if f == path:
+ return d
+
+ if not os.path.exists(path):
+ cmd = ['truncate', '--size', size, path]
+ check_call(cmd)
+
+ return create_loopback(path)
+
+
+def is_mapped_loopback_device(device):
+ """
+ Checks if a given device name is an existing/mapped loopback device.
+ :param device: str: Full path to the device (eg, /dev/loop1).
+ :returns: str: Path to the backing file if is a loopback device
+ empty string otherwise
+ """
+ return loopback_devices().get(device, "")
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/lvm.py b/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/lvm.py
new file mode 100644
index 0000000..34b5f71
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/lvm.py
@@ -0,0 +1,105 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from subprocess import (
+ CalledProcessError,
+ check_call,
+ check_output,
+ Popen,
+ PIPE,
+)
+
+
+##################################################
+# LVM helpers.
+##################################################
+def deactivate_lvm_volume_group(block_device):
+ '''
+ Deactivate any volume gruop associated with an LVM physical volume.
+
+ :param block_device: str: Full path to LVM physical volume
+ '''
+ vg = list_lvm_volume_group(block_device)
+ if vg:
+ cmd = ['vgchange', '-an', vg]
+ check_call(cmd)
+
+
+def is_lvm_physical_volume(block_device):
+ '''
+ Determine whether a block device is initialized as an LVM PV.
+
+ :param block_device: str: Full path of block device to inspect.
+
+ :returns: boolean: True if block device is a PV, False if not.
+ '''
+ try:
+ check_output(['pvdisplay', block_device])
+ return True
+ except CalledProcessError:
+ return False
+
+
+def remove_lvm_physical_volume(block_device):
+ '''
+ Remove LVM PV signatures from a given block device.
+
+ :param block_device: str: Full path of block device to scrub.
+ '''
+ p = Popen(['pvremove', '-ff', block_device],
+ stdin=PIPE)
+ p.communicate(input='y\n')
+
+
+def list_lvm_volume_group(block_device):
+ '''
+ List LVM volume group associated with a given block device.
+
+ Assumes block device is a valid LVM PV.
+
+ :param block_device: str: Full path of block device to inspect.
+
+ :returns: str: Name of volume group associated with block device or None
+ '''
+ vg = None
+ pvd = check_output(['pvdisplay', block_device]).splitlines()
+ for l in pvd:
+ l = l.decode('UTF-8')
+ if l.strip().startswith('VG Name'):
+ vg = ' '.join(l.strip().split()[2:])
+ return vg
+
+
+def create_lvm_physical_volume(block_device):
+ '''
+ Initialize a block device as an LVM physical volume.
+
+ :param block_device: str: Full path of block device to initialize.
+
+ '''
+ check_call(['pvcreate', block_device])
+
+
+def create_lvm_volume_group(volume_group, block_device):
+ '''
+ Create an LVM volume group backed by a given block device.
+
+ Assumes block device has already been initialized as an LVM PV.
+
+ :param volume_group: str: Name of volume group to create.
+ :block_device: str: Full path of PV-initialized block device.
+ '''
+ check_call(['vgcreate', volume_group, block_device])
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/utils.py b/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/utils.py
new file mode 100644
index 0000000..4e35c29
--- /dev/null
+++ b/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/utils.py
@@ -0,0 +1,71 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import re
+from stat import S_ISBLK
+
+from subprocess import (
+ check_call,
+ check_output,
+ call
+)
+
+
+def is_block_device(path):
+ '''
+ Confirm device at path is a valid block device node.
+
+ :returns: boolean: True if path is a block device, False if not.
+ '''
+ if not os.path.exists(path):
+ return False
+ return S_ISBLK(os.stat(path).st_mode)
+
+
+def zap_disk(block_device):
+ '''
+ Clear a block device of partition table. Relies on sgdisk, which is
+ installed as pat of the 'gdisk' package in Ubuntu.
+
+ :param block_device: str: Full path of block device to clean.
+ '''
+ # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
+ # sometimes sgdisk exits non-zero; this is OK, dd will clean up
+ call(['sgdisk', '--zap-all', '--', block_device])
+ call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
+ dev_end = check_output(['blockdev', '--getsz',
+ block_device]).decode('UTF-8')
+ gpt_end = int(dev_end.split()[0]) - 100
+ check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
+ 'bs=1M', 'count=1'])
+ check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
+ 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
+
+
+def is_device_mounted(device):
+ '''Given a device path, return True if that device is mounted, and False
+ if it isn't.
+
+ :param device: str: Full path of the device to check.
+ :returns: boolean: True if the path represents a mounted device, False if
+ it doesn't.
+ '''
+ try:
+ out = check_output(['lsblk', '-P', device]).decode('UTF-8')
+ except:
+ return False
+ return bool(re.search(r'MOUNTPOINT=".+"', out))