aboutsummaryrefslogtreecommitdiffstats
path: root/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack
diff options
context:
space:
mode:
Diffstat (limited to 'charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack')
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/__init__.py13
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/alternatives.py31
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/__init__.py13
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/deployment.py295
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/utils.py1010
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/context.py1508
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/exceptions.py21
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/files/__init__.py16
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ha/__init__.py13
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ha/utils.py128
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ip.py179
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/neutron.py382
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/templates/__init__.py16
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/templating.py321
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/utils.py1891
15 files changed, 0 insertions, 5837 deletions
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/alternatives.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/alternatives.py
deleted file mode 100644
index 1501641..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/alternatives.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-''' Helper for managing alternatives for file conflict resolution '''
-
-import subprocess
-import shutil
-import os
-
-
-def install_alternative(name, target, source, priority=50):
- ''' Install alternative configuration '''
- if (os.path.exists(target) and not os.path.islink(target)):
- # Move existing file/directory away before installing
- shutil.move(target, '{}.bak'.format(target))
- cmd = [
- 'update-alternatives', '--force', '--install',
- target, name, source, str(priority)
- ]
- subprocess.check_call(cmd)
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
deleted file mode 100644
index 6ce91db..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
+++ /dev/null
@@ -1,295 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-import re
-import sys
-import six
-from collections import OrderedDict
-from charmhelpers.contrib.amulet.deployment import (
- AmuletDeployment
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-
-class OpenStackAmuletDeployment(AmuletDeployment):
- """OpenStack amulet deployment.
-
- This class inherits from AmuletDeployment and has additional support
- that is specifically for use by OpenStack charms.
- """
-
- def __init__(self, series=None, openstack=None, source=None,
- stable=True, log_level=DEBUG):
- """Initialize the deployment environment."""
- super(OpenStackAmuletDeployment, self).__init__(series)
- self.log = self.get_logger(level=log_level)
- self.log.info('OpenStackAmuletDeployment: init')
- self.openstack = openstack
- self.source = source
- self.stable = stable
-
- def get_logger(self, name="deployment-logger", level=logging.DEBUG):
- """Get a logger object that will log to stdout."""
- log = logging
- logger = log.getLogger(name)
- fmt = log.Formatter("%(asctime)s %(funcName)s "
- "%(levelname)s: %(message)s")
-
- handler = log.StreamHandler(stream=sys.stdout)
- handler.setLevel(level)
- handler.setFormatter(fmt)
-
- logger.addHandler(handler)
- logger.setLevel(level)
-
- return logger
-
- def _determine_branch_locations(self, other_services):
- """Determine the branch locations for the other services.
-
- Determine if the local branch being tested is derived from its
- stable or next (dev) branch, and based on this, use the corresonding
- stable or next branches for the other_services."""
-
- self.log.info('OpenStackAmuletDeployment: determine branch locations')
-
- # Charms outside the ~openstack-charmers
- base_charms = {
- 'mysql': ['precise', 'trusty'],
- 'mongodb': ['precise', 'trusty'],
- 'nrpe': ['precise', 'trusty', 'wily', 'xenial'],
- }
-
- for svc in other_services:
- # If a location has been explicitly set, use it
- if svc.get('location'):
- continue
- if svc['name'] in base_charms:
- # NOTE: not all charms have support for all series we
- # want/need to test against, so fix to most recent
- # that each base charm supports
- target_series = self.series
- if self.series not in base_charms[svc['name']]:
- target_series = base_charms[svc['name']][-1]
- svc['location'] = 'cs:{}/{}'.format(target_series,
- svc['name'])
- elif self.stable:
- svc['location'] = 'cs:{}/{}'.format(self.series,
- svc['name'])
- else:
- svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format(
- self.series,
- svc['name']
- )
-
- return other_services
-
- def _add_services(self, this_service, other_services):
- """Add services to the deployment and set openstack-origin/source."""
- self.log.info('OpenStackAmuletDeployment: adding services')
-
- other_services = self._determine_branch_locations(other_services)
-
- super(OpenStackAmuletDeployment, self)._add_services(this_service,
- other_services)
-
- services = other_services
- services.append(this_service)
-
- # Charms which should use the source config option
- use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
- 'ceph-osd', 'ceph-radosgw', 'ceph-mon', 'ceph-proxy']
-
- # Charms which can not use openstack-origin, ie. many subordinates
- no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
- 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
- 'cinder-backup', 'nexentaedge-data',
- 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
- 'cinder-nexentaedge', 'nexentaedge-mgmt']
-
- if self.openstack:
- for svc in services:
- if svc['name'] not in use_source + no_origin:
- config = {'openstack-origin': self.openstack}
- self.d.configure(svc['name'], config)
-
- if self.source:
- for svc in services:
- if svc['name'] in use_source and svc['name'] not in no_origin:
- config = {'source': self.source}
- self.d.configure(svc['name'], config)
-
- def _configure_services(self, configs):
- """Configure all of the services."""
- self.log.info('OpenStackAmuletDeployment: configure services')
- for service, config in six.iteritems(configs):
- self.d.configure(service, config)
-
- def _auto_wait_for_status(self, message=None, exclude_services=None,
- include_only=None, timeout=1800):
- """Wait for all units to have a specific extended status, except
- for any defined as excluded. Unless specified via message, any
- status containing any case of 'ready' will be considered a match.
-
- Examples of message usage:
-
- Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
- message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
-
- Wait for all units to reach this status (exact match):
- message = re.compile('^Unit is ready and clustered$')
-
- Wait for all units to reach any one of these (exact match):
- message = re.compile('Unit is ready|OK|Ready')
-
- Wait for at least one unit to reach this status (exact match):
- message = {'ready'}
-
- See Amulet's sentry.wait_for_messages() for message usage detail.
- https://github.com/juju/amulet/blob/master/amulet/sentry.py
-
- :param message: Expected status match
- :param exclude_services: List of juju service names to ignore,
- not to be used in conjuction with include_only.
- :param include_only: List of juju service names to exclusively check,
- not to be used in conjuction with exclude_services.
- :param timeout: Maximum time in seconds to wait for status match
- :returns: None. Raises if timeout is hit.
- """
- self.log.info('Waiting for extended status on units...')
-
- all_services = self.d.services.keys()
-
- if exclude_services and include_only:
- raise ValueError('exclude_services can not be used '
- 'with include_only')
-
- if message:
- if isinstance(message, re._pattern_type):
- match = message.pattern
- else:
- match = message
-
- self.log.debug('Custom extended status wait match: '
- '{}'.format(match))
- else:
- self.log.debug('Default extended status wait match: contains '
- 'READY (case-insensitive)')
- message = re.compile('.*ready.*', re.IGNORECASE)
-
- if exclude_services:
- self.log.debug('Excluding services from extended status match: '
- '{}'.format(exclude_services))
- else:
- exclude_services = []
-
- if include_only:
- services = include_only
- else:
- services = list(set(all_services) - set(exclude_services))
-
- self.log.debug('Waiting up to {}s for extended status on services: '
- '{}'.format(timeout, services))
- service_messages = {service: message for service in services}
- self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
- self.log.info('OK')
-
- def _get_openstack_release(self):
- """Get openstack release.
-
- Return an integer representing the enum value of the openstack
- release.
- """
- # Must be ordered by OpenStack release (not by Ubuntu release):
- (self.precise_essex, self.precise_folsom, self.precise_grizzly,
- self.precise_havana, self.precise_icehouse,
- self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
- self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
- self.wily_liberty, self.trusty_mitaka,
- self.xenial_mitaka) = range(14)
-
- releases = {
- ('precise', None): self.precise_essex,
- ('precise', 'cloud:precise-folsom'): self.precise_folsom,
- ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
- ('precise', 'cloud:precise-havana'): self.precise_havana,
- ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
- ('trusty', None): self.trusty_icehouse,
- ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
- ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
- ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
- ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
- ('utopic', None): self.utopic_juno,
- ('vivid', None): self.vivid_kilo,
- ('wily', None): self.wily_liberty,
- ('xenial', None): self.xenial_mitaka}
- return releases[(self.series, self.openstack)]
-
- def _get_openstack_release_string(self):
- """Get openstack release string.
-
- Return a string representing the openstack release.
- """
- releases = OrderedDict([
- ('precise', 'essex'),
- ('quantal', 'folsom'),
- ('raring', 'grizzly'),
- ('saucy', 'havana'),
- ('trusty', 'icehouse'),
- ('utopic', 'juno'),
- ('vivid', 'kilo'),
- ('wily', 'liberty'),
- ('xenial', 'mitaka'),
- ])
- if self.openstack:
- os_origin = self.openstack.split(':')[1]
- return os_origin.split('%s-' % self.series)[1].split('/')[0]
- else:
- return releases[self.series]
-
- def get_ceph_expected_pools(self, radosgw=False):
- """Return a list of expected ceph pools in a ceph + cinder + glance
- test scenario, based on OpenStack release and whether ceph radosgw
- is flagged as present or not."""
-
- if self._get_openstack_release() >= self.trusty_kilo:
- # Kilo or later
- pools = [
- 'rbd',
- 'cinder',
- 'glance'
- ]
- else:
- # Juno or earlier
- pools = [
- 'data',
- 'metadata',
- 'rbd',
- 'cinder',
- 'glance'
- ]
-
- if radosgw:
- pools.extend([
- '.rgw.root',
- '.rgw.control',
- '.rgw',
- '.rgw.gc',
- '.users.uid'
- ])
-
- return pools
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/utils.py
deleted file mode 100644
index 8040b57..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/utils.py
+++ /dev/null
@@ -1,1010 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import amulet
-import json
-import logging
-import os
-import re
-import six
-import time
-import urllib
-
-import cinderclient.v1.client as cinder_client
-import glanceclient.v1.client as glance_client
-import heatclient.v1.client as heat_client
-import keystoneclient.v2_0 as keystone_client
-from keystoneclient.auth.identity import v3 as keystone_id_v3
-from keystoneclient import session as keystone_session
-from keystoneclient.v3 import client as keystone_client_v3
-
-import novaclient.client as nova_client
-import pika
-import swiftclient
-
-from charmhelpers.contrib.amulet.utils import (
- AmuletUtils
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-NOVA_CLIENT_VERSION = "2"
-
-
-class OpenStackAmuletUtils(AmuletUtils):
- """OpenStack amulet utilities.
-
- This class inherits from AmuletUtils and has additional support
- that is specifically for use by OpenStack charm tests.
- """
-
- def __init__(self, log_level=ERROR):
- """Initialize the deployment environment."""
- super(OpenStackAmuletUtils, self).__init__(log_level)
-
- def validate_endpoint_data(self, endpoints, admin_port, internal_port,
- public_port, expected):
- """Validate endpoint data.
-
- Validate actual endpoint data vs expected endpoint data. The ports
- are used to find the matching endpoint.
- """
- self.log.debug('Validating endpoint data...')
- self.log.debug('actual: {}'.format(repr(endpoints)))
- found = False
- for ep in endpoints:
- self.log.debug('endpoint: {}'.format(repr(ep)))
- if (admin_port in ep.adminurl and
- internal_port in ep.internalurl and
- public_port in ep.publicurl):
- found = True
- actual = {'id': ep.id,
- 'region': ep.region,
- 'adminurl': ep.adminurl,
- 'internalurl': ep.internalurl,
- 'publicurl': ep.publicurl,
- 'service_id': ep.service_id}
- ret = self._validate_dict_data(expected, actual)
- if ret:
- return 'unexpected endpoint data - {}'.format(ret)
-
- if not found:
- return 'endpoint not found'
-
- def validate_svc_catalog_endpoint_data(self, expected, actual):
- """Validate service catalog endpoint data.
-
- Validate a list of actual service catalog endpoints vs a list of
- expected service catalog endpoints.
- """
- self.log.debug('Validating service catalog endpoint data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for k, v in six.iteritems(expected):
- if k in actual:
- ret = self._validate_dict_data(expected[k][0], actual[k][0])
- if ret:
- return self.endpoint_error(k, ret)
- else:
- return "endpoint {} does not exist".format(k)
- return ret
-
- def validate_tenant_data(self, expected, actual):
- """Validate tenant data.
-
- Validate a list of actual tenant data vs list of expected tenant
- data.
- """
- self.log.debug('Validating tenant data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'enabled': act.enabled, 'description': act.description,
- 'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected tenant data - {}".format(ret)
- if not found:
- return "tenant {} does not exist".format(e['name'])
- return ret
-
- def validate_role_data(self, expected, actual):
- """Validate role data.
-
- Validate a list of actual role data vs a list of expected role
- data.
- """
- self.log.debug('Validating role data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected role data - {}".format(ret)
- if not found:
- return "role {} does not exist".format(e['name'])
- return ret
-
- def validate_user_data(self, expected, actual, api_version=None):
- """Validate user data.
-
- Validate a list of actual user data vs a list of expected user
- data.
- """
- self.log.debug('Validating user data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- if e['name'] == act.name:
- a = {'enabled': act.enabled, 'name': act.name,
- 'email': act.email, 'id': act.id}
- if api_version == 3:
- a['default_project_id'] = getattr(act,
- 'default_project_id',
- 'none')
- else:
- a['tenantId'] = act.tenantId
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected user data - {}".format(ret)
- if not found:
- return "user {} does not exist".format(e['name'])
- return ret
-
- def validate_flavor_data(self, expected, actual):
- """Validate flavor data.
-
- Validate a list of actual flavors vs a list of expected flavors.
- """
- self.log.debug('Validating flavor data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- act = [a.name for a in actual]
- return self._validate_list_data(expected, act)
-
- def tenant_exists(self, keystone, tenant):
- """Return True if tenant exists."""
- self.log.debug('Checking if tenant exists ({})...'.format(tenant))
- return tenant in [t.name for t in keystone.tenants.list()]
-
- def authenticate_cinder_admin(self, keystone_sentry, username,
- password, tenant):
- """Authenticates admin user with cinder."""
- # NOTE(beisner): cinder python client doesn't accept tokens.
- service_ip = \
- keystone_sentry.relation('shared-db',
- 'mysql:shared-db')['private-address']
- ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
- return cinder_client.Client(username, password, tenant, ept)
-
- def authenticate_keystone_admin(self, keystone_sentry, user, password,
- tenant=None, api_version=None,
- keystone_ip=None):
- """Authenticates admin user with the keystone admin endpoint."""
- self.log.debug('Authenticating keystone admin...')
- unit = keystone_sentry
- if not keystone_ip:
- keystone_ip = unit.relation('shared-db',
- 'mysql:shared-db')['private-address']
- base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8'))
- if not api_version or api_version == 2:
- ep = base_ep + "/v2.0"
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
- else:
- ep = base_ep + "/v3"
- auth = keystone_id_v3.Password(
- user_domain_name='admin_domain',
- username=user,
- password=password,
- domain_name='admin_domain',
- auth_url=ep,
- )
- sess = keystone_session.Session(auth=auth)
- return keystone_client_v3.Client(session=sess)
-
- def authenticate_keystone_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with the keystone public endpoint."""
- self.log.debug('Authenticating keystone user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
-
- def authenticate_glance_admin(self, keystone):
- """Authenticates admin user with glance."""
- self.log.debug('Authenticating glance admin...')
- ep = keystone.service_catalog.url_for(service_type='image',
- endpoint_type='adminURL')
- return glance_client.Client(ep, token=keystone.auth_token)
-
- def authenticate_heat_admin(self, keystone):
- """Authenticates the admin user with heat."""
- self.log.debug('Authenticating heat admin...')
- ep = keystone.service_catalog.url_for(service_type='orchestration',
- endpoint_type='publicURL')
- return heat_client.Client(endpoint=ep, token=keystone.auth_token)
-
- def authenticate_nova_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with nova-api."""
- self.log.debug('Authenticating nova user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return nova_client.Client(NOVA_CLIENT_VERSION,
- username=user, api_key=password,
- project_id=tenant, auth_url=ep)
-
- def authenticate_swift_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with swift api."""
- self.log.debug('Authenticating swift user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return swiftclient.Connection(authurl=ep,
- user=user,
- key=password,
- tenant_name=tenant,
- auth_version='2.0')
-
- def create_cirros_image(self, glance, image_name):
- """Download the latest cirros image and upload it to glance,
- validate and return a resource pointer.
-
- :param glance: pointer to authenticated glance connection
- :param image_name: display name for new image
- :returns: glance image pointer
- """
- self.log.debug('Creating glance cirros image '
- '({})...'.format(image_name))
-
- # Download cirros image
- http_proxy = os.getenv('AMULET_HTTP_PROXY')
- self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
- if http_proxy:
- proxies = {'http': http_proxy}
- opener = urllib.FancyURLopener(proxies)
- else:
- opener = urllib.FancyURLopener()
-
- f = opener.open('http://download.cirros-cloud.net/version/released')
- version = f.read().strip()
- cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
- local_path = os.path.join('tests', cirros_img)
-
- if not os.path.exists(local_path):
- cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
- version, cirros_img)
- opener.retrieve(cirros_url, local_path)
- f.close()
-
- # Create glance image
- with open(local_path) as f:
- image = glance.images.create(name=image_name, is_public=True,
- disk_format='qcow2',
- container_format='bare', data=f)
-
- # Wait for image to reach active status
- img_id = image.id
- ret = self.resource_reaches_status(glance.images, img_id,
- expected_stat='active',
- msg='Image status wait')
- if not ret:
- msg = 'Glance image failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new image
- self.log.debug('Validating image attributes...')
- val_img_name = glance.images.get(img_id).name
- val_img_stat = glance.images.get(img_id).status
- val_img_pub = glance.images.get(img_id).is_public
- val_img_cfmt = glance.images.get(img_id).container_format
- val_img_dfmt = glance.images.get(img_id).disk_format
- msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
- 'container fmt:{} disk fmt:{}'.format(
- val_img_name, val_img_pub, img_id,
- val_img_stat, val_img_cfmt, val_img_dfmt))
-
- if val_img_name == image_name and val_img_stat == 'active' \
- and val_img_pub is True and val_img_cfmt == 'bare' \
- and val_img_dfmt == 'qcow2':
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return image
-
- def delete_image(self, glance, image):
- """Delete the specified image."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_image.')
- self.log.debug('Deleting glance image ({})...'.format(image))
- return self.delete_resource(glance.images, image, msg='glance image')
-
- def create_instance(self, nova, image_name, instance_name, flavor):
- """Create the specified instance."""
- self.log.debug('Creating instance '
- '({}|{}|{})'.format(instance_name, image_name, flavor))
- image = nova.images.find(name=image_name)
- flavor = nova.flavors.find(name=flavor)
- instance = nova.servers.create(name=instance_name, image=image,
- flavor=flavor)
-
- count = 1
- status = instance.status
- while status != 'ACTIVE' and count < 60:
- time.sleep(3)
- instance = nova.servers.get(instance.id)
- status = instance.status
- self.log.debug('instance status: {}'.format(status))
- count += 1
-
- if status != 'ACTIVE':
- self.log.error('instance creation timed out')
- return None
-
- return instance
-
- def delete_instance(self, nova, instance):
- """Delete the specified instance."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_instance.')
- self.log.debug('Deleting instance ({})...'.format(instance))
- return self.delete_resource(nova.servers, instance,
- msg='nova instance')
-
- def create_or_get_keypair(self, nova, keypair_name="testkey"):
- """Create a new keypair, or return pointer if it already exists."""
- try:
- _keypair = nova.keypairs.get(keypair_name)
- self.log.debug('Keypair ({}) already exists, '
- 'using it.'.format(keypair_name))
- return _keypair
- except:
- self.log.debug('Keypair ({}) does not exist, '
- 'creating it.'.format(keypair_name))
-
- _keypair = nova.keypairs.create(name=keypair_name)
- return _keypair
-
- def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
- img_id=None, src_vol_id=None, snap_id=None):
- """Create cinder volume, optionally from a glance image, OR
- optionally as a clone of an existing volume, OR optionally
- from a snapshot. Wait for the new volume status to reach
- the expected status, validate and return a resource pointer.
-
- :param vol_name: cinder volume display name
- :param vol_size: size in gigabytes
- :param img_id: optional glance image id
- :param src_vol_id: optional source volume id to clone
- :param snap_id: optional snapshot id to use
- :returns: cinder volume pointer
- """
- # Handle parameter input and avoid impossible combinations
- if img_id and not src_vol_id and not snap_id:
- # Create volume from image
- self.log.debug('Creating cinder volume from glance image...')
- bootable = 'true'
- elif src_vol_id and not img_id and not snap_id:
- # Clone an existing volume
- self.log.debug('Cloning cinder volume...')
- bootable = cinder.volumes.get(src_vol_id).bootable
- elif snap_id and not src_vol_id and not img_id:
- # Create volume from snapshot
- self.log.debug('Creating cinder volume from snapshot...')
- snap = cinder.volume_snapshots.find(id=snap_id)
- vol_size = snap.size
- snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
- bootable = cinder.volumes.get(snap_vol_id).bootable
- elif not img_id and not src_vol_id and not snap_id:
- # Create volume
- self.log.debug('Creating cinder volume...')
- bootable = 'false'
- else:
- # Impossible combination of parameters
- msg = ('Invalid method use - name:{} size:{} img_id:{} '
- 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
- img_id, src_vol_id,
- snap_id))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Create new volume
- try:
- vol_new = cinder.volumes.create(display_name=vol_name,
- imageRef=img_id,
- size=vol_size,
- source_volid=src_vol_id,
- snapshot_id=snap_id)
- vol_id = vol_new.id
- except Exception as e:
- msg = 'Failed to create volume: {}'.format(e)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Wait for volume to reach available status
- ret = self.resource_reaches_status(cinder.volumes, vol_id,
- expected_stat="available",
- msg="Volume status wait")
- if not ret:
- msg = 'Cinder volume failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new volume
- self.log.debug('Validating volume attributes...')
- val_vol_name = cinder.volumes.get(vol_id).display_name
- val_vol_boot = cinder.volumes.get(vol_id).bootable
- val_vol_stat = cinder.volumes.get(vol_id).status
- val_vol_size = cinder.volumes.get(vol_id).size
- msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
- '{} size:{}'.format(val_vol_name, vol_id,
- val_vol_stat, val_vol_boot,
- val_vol_size))
-
- if val_vol_boot == bootable and val_vol_stat == 'available' \
- and val_vol_name == vol_name and val_vol_size == vol_size:
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return vol_new
-
- def delete_resource(self, resource, resource_id,
- msg="resource", max_wait=120):
- """Delete one openstack resource, such as one instance, keypair,
- image, volume, stack, etc., and confirm deletion within max wait time.
-
- :param resource: pointer to os resource type, ex:glance_client.images
- :param resource_id: unique name or id for the openstack resource
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, otherwise False
- """
- self.log.debug('Deleting OpenStack resource '
- '{} ({})'.format(resource_id, msg))
- num_before = len(list(resource.list()))
- resource.delete(resource_id)
-
- tries = 0
- num_after = len(list(resource.list()))
- while num_after != (num_before - 1) and tries < (max_wait / 4):
- self.log.debug('{} delete check: '
- '{} [{}:{}] {}'.format(msg, tries,
- num_before,
- num_after,
- resource_id))
- time.sleep(4)
- num_after = len(list(resource.list()))
- tries += 1
-
- self.log.debug('{}: expected, actual count = {}, '
- '{}'.format(msg, num_before - 1, num_after))
-
- if num_after == (num_before - 1):
- return True
- else:
- self.log.error('{} delete timed out'.format(msg))
- return False
-
- def resource_reaches_status(self, resource, resource_id,
- expected_stat='available',
- msg='resource', max_wait=120):
- """Wait for an openstack resources status to reach an
- expected status within a specified time. Useful to confirm that
- nova instances, cinder vols, snapshots, glance images, heat stacks
- and other resources eventually reach the expected status.
-
- :param resource: pointer to os resource type, ex: heat_client.stacks
- :param resource_id: unique id for the openstack resource
- :param expected_stat: status to expect resource to reach
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, False if status is not reached
- """
-
- tries = 0
- resource_stat = resource.get(resource_id).status
- while resource_stat != expected_stat and tries < (max_wait / 4):
- self.log.debug('{} status check: '
- '{} [{}:{}] {}'.format(msg, tries,
- resource_stat,
- expected_stat,
- resource_id))
- time.sleep(4)
- resource_stat = resource.get(resource_id).status
- tries += 1
-
- self.log.debug('{}: expected, actual status = {}, '
- '{}'.format(msg, resource_stat, expected_stat))
-
- if resource_stat == expected_stat:
- return True
- else:
- self.log.debug('{} never reached expected status: '
- '{}'.format(resource_id, expected_stat))
- return False
-
- def get_ceph_osd_id_cmd(self, index):
- """Produce a shell command that will return a ceph-osd id."""
- return ("`initctl list | grep 'ceph-osd ' | "
- "awk 'NR=={} {{ print $2 }}' | "
- "grep -o '[0-9]*'`".format(index + 1))
-
- def get_ceph_pools(self, sentry_unit):
- """Return a dict of ceph pools from a single ceph unit, with
- pool name as keys, pool id as vals."""
- pools = {}
- cmd = 'sudo ceph osd lspools'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
- for pool in str(output).split(','):
- pool_id_name = pool.split(' ')
- if len(pool_id_name) == 2:
- pool_id = pool_id_name[0]
- pool_name = pool_id_name[1]
- pools[pool_name] = int(pool_id)
-
- self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
- pools))
- return pools
-
- def get_ceph_df(self, sentry_unit):
- """Return dict of ceph df json output, including ceph pool state.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :returns: Dict of ceph df output
- """
- cmd = 'sudo ceph df --format=json'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return json.loads(output)
-
- def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
- """Take a sample of attributes of a ceph pool, returning ceph
- pool name, object count and disk space used for the specified
- pool ID number.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :param pool_id: Ceph pool ID
- :returns: List of pool name, object count, kb disk space used
- """
- df = self.get_ceph_df(sentry_unit)
- pool_name = df['pools'][pool_id]['name']
- obj_count = df['pools'][pool_id]['stats']['objects']
- kb_used = df['pools'][pool_id]['stats']['kb_used']
- self.log.debug('Ceph {} pool (ID {}): {} objects, '
- '{} kb used'.format(pool_name, pool_id,
- obj_count, kb_used))
- return pool_name, obj_count, kb_used
-
- def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
- """Validate ceph pool samples taken over time, such as pool
- object counts or pool kb used, before adding, after adding, and
- after deleting items which affect those pool attributes. The
- 2nd element is expected to be greater than the 1st; 3rd is expected
- to be less than the 2nd.
-
- :param samples: List containing 3 data samples
- :param sample_type: String for logging and usage context
- :returns: None if successful, Failure message otherwise
- """
- original, created, deleted = range(3)
- if samples[created] <= samples[original] or \
- samples[deleted] >= samples[created]:
- return ('Ceph {} samples ({}) '
- 'unexpected.'.format(sample_type, samples))
- else:
- self.log.debug('Ceph {} samples (OK): '
- '{}'.format(sample_type, samples))
- return None
-
- # rabbitmq/amqp specific helpers:
-
- def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
- """Wait for rmq units extended status to show cluster readiness,
- after an optional initial sleep period. Initial sleep is likely
- necessary to be effective following a config change, as status
- message may not instantly update to non-ready."""
-
- if init_sleep:
- time.sleep(init_sleep)
-
- message = re.compile('^Unit is ready and clustered$')
- deployment._auto_wait_for_status(message=message,
- timeout=timeout,
- include_only=['rabbitmq-server'])
-
- def add_rmq_test_user(self, sentry_units,
- username="testuser1", password="changeme"):
- """Add a test user via the first rmq juju unit, check connection as
- the new user against all sentry units.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Adding rmq user ({})...'.format(username))
-
- # Check that user does not already exist
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
- if username in output:
- self.log.warning('User ({}) already exists, returning '
- 'gracefully.'.format(username))
- return
-
- perms = '".*" ".*" ".*"'
- cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
- 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
-
- # Add user via first unit
- for cmd in cmds:
- output, _ = self.run_cmd_unit(sentry_units[0], cmd)
-
- # Check connection against the other sentry_units
- self.log.debug('Checking user connect against units...')
- for sentry_unit in sentry_units:
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
- username=username,
- password=password)
- connection.close()
-
- def delete_rmq_test_user(self, sentry_units, username="testuser1"):
- """Delete a rabbitmq user via the first rmq juju unit.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful or no such user.
- """
- self.log.debug('Deleting rmq user ({})...'.format(username))
-
- # Check that the user exists
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
-
- if username not in output:
- self.log.warning('User ({}) does not exist, returning '
- 'gracefully.'.format(username))
- return
-
- # Delete the user
- cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
-
- def get_rmq_cluster_status(self, sentry_unit):
- """Execute rabbitmq cluster status command on a unit and return
- the full output.
-
- :param unit: sentry unit
- :returns: String containing console output of cluster status command
- """
- cmd = 'rabbitmqctl cluster_status'
- output, _ = self.run_cmd_unit(sentry_unit, cmd)
- self.log.debug('{} cluster_status:\n{}'.format(
- sentry_unit.info['unit_name'], output))
- return str(output)
-
- def get_rmq_cluster_running_nodes(self, sentry_unit):
- """Parse rabbitmqctl cluster_status output string, return list of
- running rabbitmq cluster nodes.
-
- :param unit: sentry unit
- :returns: List containing node names of running nodes
- """
- # NOTE(beisner): rabbitmqctl cluster_status output is not
- # json-parsable, do string chop foo, then json.loads that.
- str_stat = self.get_rmq_cluster_status(sentry_unit)
- if 'running_nodes' in str_stat:
- pos_start = str_stat.find("{running_nodes,") + 15
- pos_end = str_stat.find("]},", pos_start) + 1
- str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
- run_nodes = json.loads(str_run_nodes)
- return run_nodes
- else:
- return []
-
- def validate_rmq_cluster_running_nodes(self, sentry_units):
- """Check that all rmq unit hostnames are represented in the
- cluster_status output of all units.
-
- :param host_names: dict of juju unit names to host names
- :param units: list of sentry unit pointers (all rmq units)
- :returns: None if successful, otherwise return error message
- """
- host_names = self.get_unit_hostnames(sentry_units)
- errors = []
-
- # Query every unit for cluster_status running nodes
- for query_unit in sentry_units:
- query_unit_name = query_unit.info['unit_name']
- running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
-
- # Confirm that every unit is represented in the queried unit's
- # cluster_status running nodes output.
- for validate_unit in sentry_units:
- val_host_name = host_names[validate_unit.info['unit_name']]
- val_node_name = 'rabbit@{}'.format(val_host_name)
-
- if val_node_name not in running_nodes:
- errors.append('Cluster member check failed on {}: {} not '
- 'in {}\n'.format(query_unit_name,
- val_node_name,
- running_nodes))
- if errors:
- return ''.join(errors)
-
- def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
- """Check a single juju rmq unit for ssl and port in the config file."""
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- conf_file = '/etc/rabbitmq/rabbitmq.config'
- conf_contents = str(self.file_contents_safe(sentry_unit,
- conf_file, max_wait=16))
- # Checks
- conf_ssl = 'ssl' in conf_contents
- conf_port = str(port) in conf_contents
-
- # Port explicitly checked in config
- if port and conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif port and not conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{} but not on port {} '
- '({})'.format(host, port, unit_name))
- return False
- # Port not checked (useful when checking that ssl is disabled)
- elif not port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif not conf_ssl:
- self.log.debug('SSL not enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return False
- else:
- msg = ('Unknown condition when checking SSL status @{}:{} '
- '({})'.format(host, port, unit_name))
- amulet.raise_status(amulet.FAIL, msg)
-
- def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
- """Check that ssl is enabled on rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :param port: optional ssl port override to validate
- :returns: None if successful, otherwise return error message
- """
- for sentry_unit in sentry_units:
- if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
- return ('Unexpected condition: ssl is disabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def validate_rmq_ssl_disabled_units(self, sentry_units):
- """Check that ssl is enabled on listed rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :returns: True if successful. Raise on error.
- """
- for sentry_unit in sentry_units:
- if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
- return ('Unexpected condition: ssl is enabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def configure_rmq_ssl_on(self, sentry_units, deployment,
- port=None, max_wait=60):
- """Turn ssl charm config option on, with optional non-default
- ssl port specification. Confirm that it is enabled on every
- unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param port: amqp port, use defaults if None
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: on')
-
- # Enable RMQ SSL
- config = {'ssl': 'on'}
- if port:
- config['ssl_port'] = port
-
- deployment.d.configure('rabbitmq-server', config)
-
- # Wait for unit status
- self.rmq_wait_for_cluster(deployment)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
- """Turn ssl charm config option off, confirm that it is disabled
- on every unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: off')
-
- # Disable RMQ SSL
- config = {'ssl': 'off'}
- deployment.d.configure('rabbitmq-server', config)
-
- # Wait for unit status
- self.rmq_wait_for_cluster(deployment)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def connect_amqp_by_unit(self, sentry_unit, ssl=False,
- port=None, fatal=True,
- username="testuser1", password="changeme"):
- """Establish and return a pika amqp connection to the rabbitmq service
- running on a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :param fatal: boolean, default to True (raises on connect error)
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: pika amqp connection pointer or None if failed and non-fatal
- """
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- # Default port logic if port is not specified
- if ssl and not port:
- port = 5671
- elif not ssl and not port:
- port = 5672
-
- self.log.debug('Connecting to amqp on {}:{} ({}) as '
- '{}...'.format(host, port, unit_name, username))
-
- try:
- credentials = pika.PlainCredentials(username, password)
- parameters = pika.ConnectionParameters(host=host, port=port,
- credentials=credentials,
- ssl=ssl,
- connection_attempts=3,
- retry_delay=5,
- socket_timeout=1)
- connection = pika.BlockingConnection(parameters)
- assert connection.server_properties['product'] == 'RabbitMQ'
- self.log.debug('Connect OK')
- return connection
- except Exception as e:
- msg = ('amqp connection failed to {}:{} as '
- '{} ({})'.format(host, port, username, str(e)))
- if fatal:
- amulet.raise_status(amulet.FAIL, msg)
- else:
- self.log.warn(msg)
- return None
-
- def publish_amqp_message_by_unit(self, sentry_unit, message,
- queue="test", ssl=False,
- username="testuser1",
- password="changeme",
- port=None):
- """Publish an amqp message to a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param message: amqp message string
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: None. Raises exception if publish failed.
- """
- self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
- message))
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
-
- # NOTE(beisner): extra debug here re: pika hang potential:
- # https://github.com/pika/pika/issues/297
- # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
- self.log.debug('Defining channel...')
- channel = connection.channel()
- self.log.debug('Declaring queue...')
- channel.queue_declare(queue=queue, auto_delete=False, durable=True)
- self.log.debug('Publishing message...')
- channel.basic_publish(exchange='', routing_key=queue, body=message)
- self.log.debug('Closing channel...')
- channel.close()
- self.log.debug('Closing connection...')
- connection.close()
-
- def get_amqp_message_by_unit(self, sentry_unit, queue="test",
- username="testuser1",
- password="changeme",
- ssl=False, port=None):
- """Get an amqp message from a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: amqp message body as string. Raise if get fails.
- """
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
- channel = connection.channel()
- method_frame, _, body = channel.basic_get(queue)
-
- if method_frame:
- self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
- body))
- channel.basic_ack(method_frame.delivery_tag)
- channel.close()
- connection.close()
- return body
- else:
- msg = 'No message retrieved.'
- amulet.raise_status(amulet.FAIL, msg)
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/context.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/context.py
deleted file mode 100644
index 76737f2..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/context.py
+++ /dev/null
@@ -1,1508 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import glob
-import json
-import os
-import re
-import time
-from base64 import b64decode
-from subprocess import check_call, CalledProcessError
-
-import six
-
-from charmhelpers.fetch import (
- apt_install,
- filter_installed_packages,
-)
-from charmhelpers.core.hookenv import (
- config,
- is_relation_made,
- local_unit,
- log,
- relation_get,
- relation_ids,
- related_units,
- relation_set,
- unit_get,
- unit_private_ip,
- charm_name,
- DEBUG,
- INFO,
- WARNING,
- ERROR,
- status_set,
-)
-
-from charmhelpers.core.sysctl import create as sysctl_create
-from charmhelpers.core.strutils import bool_from_string
-from charmhelpers.contrib.openstack.exceptions import OSContextError
-
-from charmhelpers.core.host import (
- get_bond_master,
- is_phy_iface,
- list_nics,
- get_nic_hwaddr,
- mkdir,
- write_file,
- pwgen,
- lsb_release,
-)
-from charmhelpers.contrib.hahelpers.cluster import (
- determine_apache_port,
- determine_api_port,
- https,
- is_clustered,
-)
-from charmhelpers.contrib.hahelpers.apache import (
- get_cert,
- get_ca_cert,
- install_ca_cert,
-)
-from charmhelpers.contrib.openstack.neutron import (
- neutron_plugin_attribute,
- parse_data_port_mappings,
-)
-from charmhelpers.contrib.openstack.ip import (
- resolve_address,
- INTERNAL,
-)
-from charmhelpers.contrib.network.ip import (
- get_address_in_network,
- get_ipv4_addr,
- get_ipv6_addr,
- get_netmask_for_address,
- format_ipv6_addr,
- is_address_in_network,
- is_bridge_member,
-)
-from charmhelpers.contrib.openstack.utils import (
- config_flags_parser,
- get_host_ip,
-)
-from charmhelpers.core.unitdata import kv
-
-try:
- import psutil
-except ImportError:
- apt_install('python-psutil', fatal=True)
- import psutil
-
-CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
-ADDRESS_TYPES = ['admin', 'internal', 'public']
-
-
-def ensure_packages(packages):
- """Install but do not upgrade required plugin packages."""
- required = filter_installed_packages(packages)
- if required:
- apt_install(required, fatal=True)
-
-
-def context_complete(ctxt):
- _missing = []
- for k, v in six.iteritems(ctxt):
- if v is None or v == '':
- _missing.append(k)
-
- if _missing:
- log('Missing required data: %s' % ' '.join(_missing), level=INFO)
- return False
-
- return True
-
-
-class OSContextGenerator(object):
- """Base class for all context generators."""
- interfaces = []
- related = False
- complete = False
- missing_data = []
-
- def __call__(self):
- raise NotImplementedError
-
- def context_complete(self, ctxt):
- """Check for missing data for the required context data.
- Set self.missing_data if it exists and return False.
- Set self.complete if no missing data and return True.
- """
- # Fresh start
- self.complete = False
- self.missing_data = []
- for k, v in six.iteritems(ctxt):
- if v is None or v == '':
- if k not in self.missing_data:
- self.missing_data.append(k)
-
- if self.missing_data:
- self.complete = False
- log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
- else:
- self.complete = True
- return self.complete
-
- def get_related(self):
- """Check if any of the context interfaces have relation ids.
- Set self.related and return True if one of the interfaces
- has relation ids.
- """
- # Fresh start
- self.related = False
- try:
- for interface in self.interfaces:
- if relation_ids(interface):
- self.related = True
- return self.related
- except AttributeError as e:
- log("{} {}"
- "".format(self, e), 'INFO')
- return self.related
-
-
-class SharedDBContext(OSContextGenerator):
- interfaces = ['shared-db']
-
- def __init__(self,
- database=None, user=None, relation_prefix=None, ssl_dir=None):
- """Allows inspecting relation for settings prefixed with
- relation_prefix. This is useful for parsing access for multiple
- databases returned via the shared-db interface (eg, nova_password,
- quantum_password)
- """
- self.relation_prefix = relation_prefix
- self.database = database
- self.user = user
- self.ssl_dir = ssl_dir
- self.rel_name = self.interfaces[0]
-
- def __call__(self):
- self.database = self.database or config('database')
- self.user = self.user or config('database-user')
- if None in [self.database, self.user]:
- log("Could not generate shared_db context. Missing required charm "
- "config options. (database name and user)", level=ERROR)
- raise OSContextError
-
- ctxt = {}
-
- # NOTE(jamespage) if mysql charm provides a network upon which
- # access to the database should be made, reconfigure relation
- # with the service units local address and defer execution
- access_network = relation_get('access-network')
- if access_network is not None:
- if self.relation_prefix is not None:
- hostname_key = "{}_hostname".format(self.relation_prefix)
- else:
- hostname_key = "hostname"
- access_hostname = get_address_in_network(access_network,
- unit_get('private-address'))
- set_hostname = relation_get(attribute=hostname_key,
- unit=local_unit())
- if set_hostname != access_hostname:
- relation_set(relation_settings={hostname_key: access_hostname})
- return None # Defer any further hook execution for now....
-
- password_setting = 'password'
- if self.relation_prefix:
- password_setting = self.relation_prefix + '_password'
-
- for rid in relation_ids(self.interfaces[0]):
- self.related = True
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- host = rdata.get('db_host')
- host = format_ipv6_addr(host) or host
- ctxt = {
- 'database_host': host,
- 'database': self.database,
- 'database_user': self.user,
- 'database_password': rdata.get(password_setting),
- 'database_type': 'mysql'
- }
- if self.context_complete(ctxt):
- db_ssl(rdata, ctxt, self.ssl_dir)
- return ctxt
- return {}
-
-
-class PostgresqlDBContext(OSContextGenerator):
- interfaces = ['pgsql-db']
-
- def __init__(self, database=None):
- self.database = database
-
- def __call__(self):
- self.database = self.database or config('database')
- if self.database is None:
- log('Could not generate postgresql_db context. Missing required '
- 'charm config options. (database name)', level=ERROR)
- raise OSContextError
-
- ctxt = {}
- for rid in relation_ids(self.interfaces[0]):
- self.related = True
- for unit in related_units(rid):
- rel_host = relation_get('host', rid=rid, unit=unit)
- rel_user = relation_get('user', rid=rid, unit=unit)
- rel_passwd = relation_get('password', rid=rid, unit=unit)
- ctxt = {'database_host': rel_host,
- 'database': self.database,
- 'database_user': rel_user,
- 'database_password': rel_passwd,
- 'database_type': 'postgresql'}
- if self.context_complete(ctxt):
- return ctxt
-
- return {}
-
-
-def db_ssl(rdata, ctxt, ssl_dir):
- if 'ssl_ca' in rdata and ssl_dir:
- ca_path = os.path.join(ssl_dir, 'db-client.ca')
- with open(ca_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_ca']))
-
- ctxt['database_ssl_ca'] = ca_path
- elif 'ssl_ca' in rdata:
- log("Charm not setup for ssl support but ssl ca found", level=INFO)
- return ctxt
-
- if 'ssl_cert' in rdata:
- cert_path = os.path.join(
- ssl_dir, 'db-client.cert')
- if not os.path.exists(cert_path):
- log("Waiting 1m for ssl client cert validity", level=INFO)
- time.sleep(60)
-
- with open(cert_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_cert']))
-
- ctxt['database_ssl_cert'] = cert_path
- key_path = os.path.join(ssl_dir, 'db-client.key')
- with open(key_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_key']))
-
- ctxt['database_ssl_key'] = key_path
-
- return ctxt
-
-
-class IdentityServiceContext(OSContextGenerator):
-
- def __init__(self, service=None, service_user=None, rel_name='identity-service'):
- self.service = service
- self.service_user = service_user
- self.rel_name = rel_name
- self.interfaces = [self.rel_name]
-
- def __call__(self):
- log('Generating template context for ' + self.rel_name, level=DEBUG)
- ctxt = {}
-
- if self.service and self.service_user:
- # This is required for pki token signing if we don't want /tmp to
- # be used.
- cachedir = '/var/cache/%s' % (self.service)
- if not os.path.isdir(cachedir):
- log("Creating service cache dir %s" % (cachedir), level=DEBUG)
- mkdir(path=cachedir, owner=self.service_user,
- group=self.service_user, perms=0o700)
-
- ctxt['signing_dir'] = cachedir
-
- for rid in relation_ids(self.rel_name):
- self.related = True
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- serv_host = rdata.get('service_host')
- serv_host = format_ipv6_addr(serv_host) or serv_host
- auth_host = rdata.get('auth_host')
- auth_host = format_ipv6_addr(auth_host) or auth_host
- svc_protocol = rdata.get('service_protocol') or 'http'
- auth_protocol = rdata.get('auth_protocol') or 'http'
- api_version = rdata.get('api_version') or '2.0'
- ctxt.update({'service_port': rdata.get('service_port'),
- 'service_host': serv_host,
- 'auth_host': auth_host,
- 'auth_port': rdata.get('auth_port'),
- 'admin_tenant_name': rdata.get('service_tenant'),
- 'admin_user': rdata.get('service_username'),
- 'admin_password': rdata.get('service_password'),
- 'service_protocol': svc_protocol,
- 'auth_protocol': auth_protocol,
- 'api_version': api_version})
-
- if self.context_complete(ctxt):
- # NOTE(jamespage) this is required for >= icehouse
- # so a missing value just indicates keystone needs
- # upgrading
- ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
- return ctxt
-
- return {}
-
-
-class AMQPContext(OSContextGenerator):
-
- def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
- self.ssl_dir = ssl_dir
- self.rel_name = rel_name
- self.relation_prefix = relation_prefix
- self.interfaces = [rel_name]
-
- def __call__(self):
- log('Generating template context for amqp', level=DEBUG)
- conf = config()
- if self.relation_prefix:
- user_setting = '%s-rabbit-user' % (self.relation_prefix)
- vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
- else:
- user_setting = 'rabbit-user'
- vhost_setting = 'rabbit-vhost'
-
- try:
- username = conf[user_setting]
- vhost = conf[vhost_setting]
- except KeyError as e:
- log('Could not generate shared_db context. Missing required charm '
- 'config options: %s.' % e, level=ERROR)
- raise OSContextError
-
- ctxt = {}
- for rid in relation_ids(self.rel_name):
- ha_vip_only = False
- self.related = True
- for unit in related_units(rid):
- if relation_get('clustered', rid=rid, unit=unit):
- ctxt['clustered'] = True
- vip = relation_get('vip', rid=rid, unit=unit)
- vip = format_ipv6_addr(vip) or vip
- ctxt['rabbitmq_host'] = vip
- else:
- host = relation_get('private-address', rid=rid, unit=unit)
- host = format_ipv6_addr(host) or host
- ctxt['rabbitmq_host'] = host
-
- ctxt.update({
- 'rabbitmq_user': username,
- 'rabbitmq_password': relation_get('password', rid=rid,
- unit=unit),
- 'rabbitmq_virtual_host': vhost,
- })
-
- ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
- if ssl_port:
- ctxt['rabbit_ssl_port'] = ssl_port
-
- ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
- if ssl_ca:
- ctxt['rabbit_ssl_ca'] = ssl_ca
-
- if relation_get('ha_queues', rid=rid, unit=unit) is not None:
- ctxt['rabbitmq_ha_queues'] = True
-
- ha_vip_only = relation_get('ha-vip-only',
- rid=rid, unit=unit) is not None
-
- if self.context_complete(ctxt):
- if 'rabbit_ssl_ca' in ctxt:
- if not self.ssl_dir:
- log("Charm not setup for ssl support but ssl ca "
- "found", level=INFO)
- break
-
- ca_path = os.path.join(
- self.ssl_dir, 'rabbit-client-ca.pem')
- with open(ca_path, 'w') as fh:
- fh.write(b64decode(ctxt['rabbit_ssl_ca']))
- ctxt['rabbit_ssl_ca'] = ca_path
-
- # Sufficient information found = break out!
- break
-
- # Used for active/active rabbitmq >= grizzly
- if (('clustered' not in ctxt or ha_vip_only) and
- len(related_units(rid)) > 1):
- rabbitmq_hosts = []
- for unit in related_units(rid):
- host = relation_get('private-address', rid=rid, unit=unit)
- host = format_ipv6_addr(host) or host
- rabbitmq_hosts.append(host)
-
- ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
-
- oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
- if oslo_messaging_flags:
- ctxt['oslo_messaging_flags'] = config_flags_parser(
- oslo_messaging_flags)
-
- if not self.complete:
- return {}
-
- return ctxt
-
-
-class CephContext(OSContextGenerator):
- """Generates context for /etc/ceph/ceph.conf templates."""
- interfaces = ['ceph']
-
- def __call__(self):
- if not relation_ids('ceph'):
- return {}
-
- log('Generating template context for ceph', level=DEBUG)
- mon_hosts = []
- ctxt = {
- 'use_syslog': str(config('use-syslog')).lower()
- }
- for rid in relation_ids('ceph'):
- for unit in related_units(rid):
- if not ctxt.get('auth'):
- ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
- if not ctxt.get('key'):
- ctxt['key'] = relation_get('key', rid=rid, unit=unit)
- ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
- unit=unit)
- unit_priv_addr = relation_get('private-address', rid=rid,
- unit=unit)
- ceph_addr = ceph_pub_addr or unit_priv_addr
- ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
- mon_hosts.append(ceph_addr)
-
- ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
-
- if not os.path.isdir('/etc/ceph'):
- os.mkdir('/etc/ceph')
-
- if not self.context_complete(ctxt):
- return {}
-
- ensure_packages(['ceph-common'])
- return ctxt
-
-
-class HAProxyContext(OSContextGenerator):
- """Provides half a context for the haproxy template, which describes
- all peers to be included in the cluster. Each charm needs to include
- its own context generator that describes the port mapping.
- """
- interfaces = ['cluster']
-
- def __init__(self, singlenode_mode=False):
- self.singlenode_mode = singlenode_mode
-
- def __call__(self):
- if not relation_ids('cluster') and not self.singlenode_mode:
- return {}
-
- if config('prefer-ipv6'):
- addr = get_ipv6_addr(exc_list=[config('vip')])[0]
- else:
- addr = get_host_ip(unit_get('private-address'))
-
- l_unit = local_unit().replace('/', '-')
- cluster_hosts = {}
-
- # NOTE(jamespage): build out map of configured network endpoints
- # and associated backends
- for addr_type in ADDRESS_TYPES:
- cfg_opt = 'os-{}-network'.format(addr_type)
- laddr = get_address_in_network(config(cfg_opt))
- if laddr:
- netmask = get_netmask_for_address(laddr)
- cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
- netmask),
- 'backends': {l_unit: laddr}}
- for rid in relation_ids('cluster'):
- for unit in related_units(rid):
- _laddr = relation_get('{}-address'.format(addr_type),
- rid=rid, unit=unit)
- if _laddr:
- _unit = unit.replace('/', '-')
- cluster_hosts[laddr]['backends'][_unit] = _laddr
-
- # NOTE(jamespage) add backend based on private address - this
- # with either be the only backend or the fallback if no acls
- # match in the frontend
- cluster_hosts[addr] = {}
- netmask = get_netmask_for_address(addr)
- cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
- 'backends': {l_unit: addr}}
- for rid in relation_ids('cluster'):
- for unit in related_units(rid):
- _laddr = relation_get('private-address',
- rid=rid, unit=unit)
- if _laddr:
- _unit = unit.replace('/', '-')
- cluster_hosts[addr]['backends'][_unit] = _laddr
-
- ctxt = {
- 'frontends': cluster_hosts,
- 'default_backend': addr
- }
-
- if config('haproxy-server-timeout'):
- ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
-
- if config('haproxy-client-timeout'):
- ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
-
- if config('haproxy-queue-timeout'):
- ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout')
-
- if config('haproxy-connect-timeout'):
- ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
-
- if config('prefer-ipv6'):
- ctxt['ipv6'] = True
- ctxt['local_host'] = 'ip6-localhost'
- ctxt['haproxy_host'] = '::'
- else:
- ctxt['local_host'] = '127.0.0.1'
- ctxt['haproxy_host'] = '0.0.0.0'
-
- ctxt['stat_port'] = '8888'
-
- db = kv()
- ctxt['stat_password'] = db.get('stat-password')
- if not ctxt['stat_password']:
- ctxt['stat_password'] = db.set('stat-password',
- pwgen(32))
- db.flush()
-
- for frontend in cluster_hosts:
- if (len(cluster_hosts[frontend]['backends']) > 1 or
- self.singlenode_mode):
- # Enable haproxy when we have enough peers.
- log('Ensuring haproxy enabled in /etc/default/haproxy.',
- level=DEBUG)
- with open('/etc/default/haproxy', 'w') as out:
- out.write('ENABLED=1\n')
-
- return ctxt
-
- log('HAProxy context is incomplete, this unit has no peers.',
- level=INFO)
- return {}
-
-
-class ImageServiceContext(OSContextGenerator):
- interfaces = ['image-service']
-
- def __call__(self):
- """Obtains the glance API server from the image-service relation.
- Useful in nova and cinder (currently).
- """
- log('Generating template context for image-service.', level=DEBUG)
- rids = relation_ids('image-service')
- if not rids:
- return {}
-
- for rid in rids:
- for unit in related_units(rid):
- api_server = relation_get('glance-api-server',
- rid=rid, unit=unit)
- if api_server:
- return {'glance_api_servers': api_server}
-
- log("ImageService context is incomplete. Missing required relation "
- "data.", level=INFO)
- return {}
-
-
-class ApacheSSLContext(OSContextGenerator):
- """Generates a context for an apache vhost configuration that configures
- HTTPS reverse proxying for one or many endpoints. Generated context
- looks something like::
-
- {
- 'namespace': 'cinder',
- 'private_address': 'iscsi.mycinderhost.com',
- 'endpoints': [(8776, 8766), (8777, 8767)]
- }
-
- The endpoints list consists of a tuples mapping external ports
- to internal ports.
- """
- interfaces = ['https']
-
- # charms should inherit this context and set external ports
- # and service namespace accordingly.
- external_ports = []
- service_namespace = None
-
- def enable_modules(self):
- cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
- check_call(cmd)
-
- def configure_cert(self, cn=None):
- ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
- mkdir(path=ssl_dir)
- cert, key = get_cert(cn)
- if cn:
- cert_filename = 'cert_{}'.format(cn)
- key_filename = 'key_{}'.format(cn)
- else:
- cert_filename = 'cert'
- key_filename = 'key'
-
- write_file(path=os.path.join(ssl_dir, cert_filename),
- content=b64decode(cert))
- write_file(path=os.path.join(ssl_dir, key_filename),
- content=b64decode(key))
-
- def configure_ca(self):
- ca_cert = get_ca_cert()
- if ca_cert:
- install_ca_cert(b64decode(ca_cert))
-
- def canonical_names(self):
- """Figure out which canonical names clients will access this service.
- """
- cns = []
- for r_id in relation_ids('identity-service'):
- for unit in related_units(r_id):
- rdata = relation_get(rid=r_id, unit=unit)
- for k in rdata:
- if k.startswith('ssl_key_'):
- cns.append(k.lstrip('ssl_key_'))
-
- return sorted(list(set(cns)))
-
- def get_network_addresses(self):
- """For each network configured, return corresponding address and vip
- (if available).
-
- Returns a list of tuples of the form:
-
- [(address_in_net_a, vip_in_net_a),
- (address_in_net_b, vip_in_net_b),
- ...]
-
- or, if no vip(s) available:
-
- [(address_in_net_a, address_in_net_a),
- (address_in_net_b, address_in_net_b),
- ...]
- """
- addresses = []
- if config('vip'):
- vips = config('vip').split()
- else:
- vips = []
-
- for net_type in ['os-internal-network', 'os-admin-network',
- 'os-public-network']:
- addr = get_address_in_network(config(net_type),
- unit_get('private-address'))
- if len(vips) > 1 and is_clustered():
- if not config(net_type):
- log("Multiple networks configured but net_type "
- "is None (%s)." % net_type, level=WARNING)
- continue
-
- for vip in vips:
- if is_address_in_network(config(net_type), vip):
- addresses.append((addr, vip))
- break
-
- elif is_clustered() and config('vip'):
- addresses.append((addr, config('vip')))
- else:
- addresses.append((addr, addr))
-
- return sorted(addresses)
-
- def __call__(self):
- if isinstance(self.external_ports, six.string_types):
- self.external_ports = [self.external_ports]
-
- if not self.external_ports or not https():
- return {}
-
- self.configure_ca()
- self.enable_modules()
-
- ctxt = {'namespace': self.service_namespace,
- 'endpoints': [],
- 'ext_ports': []}
-
- cns = self.canonical_names()
- if cns:
- for cn in cns:
- self.configure_cert(cn)
- else:
- # Expect cert/key provided in config (currently assumed that ca
- # uses ip for cn)
- cn = resolve_address(endpoint_type=INTERNAL)
- self.configure_cert(cn)
-
- addresses = self.get_network_addresses()
- for address, endpoint in sorted(set(addresses)):
- for api_port in self.external_ports:
- ext_port = determine_apache_port(api_port,
- singlenode_mode=True)
- int_port = determine_api_port(api_port, singlenode_mode=True)
- portmap = (address, endpoint, int(ext_port), int(int_port))
- ctxt['endpoints'].append(portmap)
- ctxt['ext_ports'].append(int(ext_port))
-
- ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
- return ctxt
-
-
-class NeutronContext(OSContextGenerator):
- interfaces = []
-
- @property
- def plugin(self):
- return None
-
- @property
- def network_manager(self):
- return None
-
- @property
- def packages(self):
- return neutron_plugin_attribute(self.plugin, 'packages',
- self.network_manager)
-
- @property
- def neutron_security_groups(self):
- return None
-
- def _ensure_packages(self):
- for pkgs in self.packages:
- ensure_packages(pkgs)
-
- def _save_flag_file(self):
- if self.network_manager == 'quantum':
- _file = '/etc/nova/quantum_plugin.conf'
- else:
- _file = '/etc/nova/neutron_plugin.conf'
-
- with open(_file, 'wb') as out:
- out.write(self.plugin + '\n')
-
- def ovs_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- ovs_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'ovs',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return ovs_ctxt
-
- def nuage_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- nuage_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'vsp',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return nuage_ctxt
-
- def nvp_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- nvp_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'nvp',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return nvp_ctxt
-
- def n1kv_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- n1kv_user_config_flags = config('n1kv-config-flags')
- restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
- n1kv_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'n1kv',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': n1kv_config,
- 'vsm_ip': config('n1kv-vsm-ip'),
- 'vsm_username': config('n1kv-vsm-username'),
- 'vsm_password': config('n1kv-vsm-password'),
- 'restrict_policy_profiles': restrict_policy_profiles}
-
- if n1kv_user_config_flags:
- flags = config_flags_parser(n1kv_user_config_flags)
- n1kv_ctxt['user_config_flags'] = flags
-
- return n1kv_ctxt
-
- def calico_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- calico_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'Calico',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return calico_ctxt
-
- def neutron_ctxt(self):
- if https():
- proto = 'https'
- else:
- proto = 'http'
-
- if is_clustered():
- host = config('vip')
- else:
- host = unit_get('private-address')
-
- ctxt = {'network_manager': self.network_manager,
- 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
- return ctxt
-
- def pg_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- ovs_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'plumgrid',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
- return ovs_ctxt
-
- def midonet_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- midonet_config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- mido_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'midonet',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': midonet_config}
-
- return mido_ctxt
-
- def __call__(self):
- if self.network_manager not in ['quantum', 'neutron']:
- return {}
-
- if not self.plugin:
- return {}
-
- ctxt = self.neutron_ctxt()
-
- if self.plugin == 'ovs':
- ctxt.update(self.ovs_ctxt())
- elif self.plugin in ['nvp', 'nsx']:
- ctxt.update(self.nvp_ctxt())
- elif self.plugin == 'n1kv':
- ctxt.update(self.n1kv_ctxt())
- elif self.plugin == 'Calico':
- ctxt.update(self.calico_ctxt())
- elif self.plugin == 'vsp':
- ctxt.update(self.nuage_ctxt())
- elif self.plugin == 'plumgrid':
- ctxt.update(self.pg_ctxt())
- elif self.plugin == 'midonet':
- ctxt.update(self.midonet_ctxt())
-
- alchemy_flags = config('neutron-alchemy-flags')
- if alchemy_flags:
- flags = config_flags_parser(alchemy_flags)
- ctxt['neutron_alchemy_flags'] = flags
-
- self._save_flag_file()
- return ctxt
-
-
-class NeutronPortContext(OSContextGenerator):
-
- def resolve_ports(self, ports):
- """Resolve NICs not yet bound to bridge(s)
-
- If hwaddress provided then returns resolved hwaddress otherwise NIC.
- """
- if not ports:
- return None
-
- hwaddr_to_nic = {}
- hwaddr_to_ip = {}
- for nic in list_nics():
- # Ignore virtual interfaces (bond masters will be identified from
- # their slaves)
- if not is_phy_iface(nic):
- continue
-
- _nic = get_bond_master(nic)
- if _nic:
- log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
- level=DEBUG)
- nic = _nic
-
- hwaddr = get_nic_hwaddr(nic)
- hwaddr_to_nic[hwaddr] = nic
- addresses = get_ipv4_addr(nic, fatal=False)
- addresses += get_ipv6_addr(iface=nic, fatal=False)
- hwaddr_to_ip[hwaddr] = addresses
-
- resolved = []
- mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
- for entry in ports:
- if re.match(mac_regex, entry):
- # NIC is in known NICs and does NOT hace an IP address
- if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
- # If the nic is part of a bridge then don't use it
- if is_bridge_member(hwaddr_to_nic[entry]):
- continue
-
- # Entry is a MAC address for a valid interface that doesn't
- # have an IP address assigned yet.
- resolved.append(hwaddr_to_nic[entry])
- else:
- # If the passed entry is not a MAC address, assume it's a valid
- # interface, and that the user put it there on purpose (we can
- # trust it to be the real external network).
- resolved.append(entry)
-
- # Ensure no duplicates
- return list(set(resolved))
-
-
-class OSConfigFlagContext(OSContextGenerator):
- """Provides support for user-defined config flags.
-
- Users can define a comma-seperated list of key=value pairs
- in the charm configuration and apply them at any point in
- any file by using a template flag.
-
- Sometimes users might want config flags inserted within a
- specific section so this class allows users to specify the
- template flag name, allowing for multiple template flags
- (sections) within the same context.
-
- NOTE: the value of config-flags may be a comma-separated list of
- key=value pairs and some Openstack config files support
- comma-separated lists as values.
- """
-
- def __init__(self, charm_flag='config-flags',
- template_flag='user_config_flags'):
- """
- :param charm_flag: config flags in charm configuration.
- :param template_flag: insert point for user-defined flags in template
- file.
- """
- super(OSConfigFlagContext, self).__init__()
- self._charm_flag = charm_flag
- self._template_flag = template_flag
-
- def __call__(self):
- config_flags = config(self._charm_flag)
- if not config_flags:
- return {}
-
- return {self._template_flag:
- config_flags_parser(config_flags)}
-
-
-class LibvirtConfigFlagsContext(OSContextGenerator):
- """
- This context provides support for extending
- the libvirt section through user-defined flags.
- """
- def __call__(self):
- ctxt = {}
- libvirt_flags = config('libvirt-flags')
- if libvirt_flags:
- ctxt['libvirt_flags'] = config_flags_parser(
- libvirt_flags)
- return ctxt
-
-
-class SubordinateConfigContext(OSContextGenerator):
-
- """
- Responsible for inspecting relations to subordinates that
- may be exporting required config via a json blob.
-
- The subordinate interface allows subordinates to export their
- configuration requirements to the principle for multiple config
- files and multiple serivces. Ie, a subordinate that has interfaces
- to both glance and nova may export to following yaml blob as json::
-
- glance:
- /etc/glance/glance-api.conf:
- sections:
- DEFAULT:
- - [key1, value1]
- /etc/glance/glance-registry.conf:
- MYSECTION:
- - [key2, value2]
- nova:
- /etc/nova/nova.conf:
- sections:
- DEFAULT:
- - [key3, value3]
-
-
- It is then up to the principle charms to subscribe this context to
- the service+config file it is interestd in. Configuration data will
- be available in the template context, in glance's case, as::
-
- ctxt = {
- ... other context ...
- 'subordinate_configuration': {
- 'DEFAULT': {
- 'key1': 'value1',
- },
- 'MYSECTION': {
- 'key2': 'value2',
- },
- }
- }
- """
-
- def __init__(self, service, config_file, interface):
- """
- :param service : Service name key to query in any subordinate
- data found
- :param config_file : Service's config file to query sections
- :param interface : Subordinate interface to inspect
- """
- self.config_file = config_file
- if isinstance(service, list):
- self.services = service
- else:
- self.services = [service]
- if isinstance(interface, list):
- self.interfaces = interface
- else:
- self.interfaces = [interface]
-
- def __call__(self):
- ctxt = {'sections': {}}
- rids = []
- for interface in self.interfaces:
- rids.extend(relation_ids(interface))
- for rid in rids:
- for unit in related_units(rid):
- sub_config = relation_get('subordinate_configuration',
- rid=rid, unit=unit)
- if sub_config and sub_config != '':
- try:
- sub_config = json.loads(sub_config)
- except:
- log('Could not parse JSON from '
- 'subordinate_configuration setting from %s'
- % rid, level=ERROR)
- continue
-
- for service in self.services:
- if service not in sub_config:
- log('Found subordinate_configuration on %s but it '
- 'contained nothing for %s service'
- % (rid, service), level=INFO)
- continue
-
- sub_config = sub_config[service]
- if self.config_file not in sub_config:
- log('Found subordinate_configuration on %s but it '
- 'contained nothing for %s'
- % (rid, self.config_file), level=INFO)
- continue
-
- sub_config = sub_config[self.config_file]
- for k, v in six.iteritems(sub_config):
- if k == 'sections':
- for section, config_list in six.iteritems(v):
- log("adding section '%s'" % (section),
- level=DEBUG)
- if ctxt[k].get(section):
- ctxt[k][section].extend(config_list)
- else:
- ctxt[k][section] = config_list
- else:
- ctxt[k] = v
- log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
- return ctxt
-
-
-class LogLevelContext(OSContextGenerator):
-
- def __call__(self):
- ctxt = {}
- ctxt['debug'] = \
- False if config('debug') is None else config('debug')
- ctxt['verbose'] = \
- False if config('verbose') is None else config('verbose')
-
- return ctxt
-
-
-class SyslogContext(OSContextGenerator):
-
- def __call__(self):
- ctxt = {'use_syslog': config('use-syslog')}
- return ctxt
-
-
-class BindHostContext(OSContextGenerator):
-
- def __call__(self):
- if config('prefer-ipv6'):
- return {'bind_host': '::'}
- else:
- return {'bind_host': '0.0.0.0'}
-
-
-class WorkerConfigContext(OSContextGenerator):
-
- @property
- def num_cpus(self):
- # NOTE: use cpu_count if present (16.04 support)
- if hasattr(psutil, 'cpu_count'):
- return psutil.cpu_count()
- else:
- return psutil.NUM_CPUS
-
- def __call__(self):
- multiplier = config('worker-multiplier') or 0
- count = int(self.num_cpus * multiplier)
- if multiplier > 0 and count == 0:
- count = 1
- ctxt = {"workers": count}
- return ctxt
-
-
-class ZeroMQContext(OSContextGenerator):
- interfaces = ['zeromq-configuration']
-
- def __call__(self):
- ctxt = {}
- if is_relation_made('zeromq-configuration', 'host'):
- for rid in relation_ids('zeromq-configuration'):
- for unit in related_units(rid):
- ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
- ctxt['zmq_host'] = relation_get('host', unit, rid)
- ctxt['zmq_redis_address'] = relation_get(
- 'zmq_redis_address', unit, rid)
-
- return ctxt
-
-
-class NotificationDriverContext(OSContextGenerator):
-
- def __init__(self, zmq_relation='zeromq-configuration',
- amqp_relation='amqp'):
- """
- :param zmq_relation: Name of Zeromq relation to check
- """
- self.zmq_relation = zmq_relation
- self.amqp_relation = amqp_relation
-
- def __call__(self):
- ctxt = {'notifications': 'False'}
- if is_relation_made(self.amqp_relation):
- ctxt['notifications'] = "True"
-
- return ctxt
-
-
-class SysctlContext(OSContextGenerator):
- """This context check if the 'sysctl' option exists on configuration
- then creates a file with the loaded contents"""
- def __call__(self):
- sysctl_dict = config('sysctl')
- if sysctl_dict:
- sysctl_create(sysctl_dict,
- '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
- return {'sysctl': sysctl_dict}
-
-
-class NeutronAPIContext(OSContextGenerator):
- '''
- Inspects current neutron-plugin-api relation for neutron settings. Return
- defaults if it is not present.
- '''
- interfaces = ['neutron-plugin-api']
-
- def __call__(self):
- self.neutron_defaults = {
- 'l2_population': {
- 'rel_key': 'l2-population',
- 'default': False,
- },
- 'overlay_network_type': {
- 'rel_key': 'overlay-network-type',
- 'default': 'gre',
- },
- 'neutron_security_groups': {
- 'rel_key': 'neutron-security-groups',
- 'default': False,
- },
- 'network_device_mtu': {
- 'rel_key': 'network-device-mtu',
- 'default': None,
- },
- 'enable_dvr': {
- 'rel_key': 'enable-dvr',
- 'default': False,
- },
- 'enable_l3ha': {
- 'rel_key': 'enable-l3ha',
- 'default': False,
- },
- }
- ctxt = self.get_neutron_options({})
- for rid in relation_ids('neutron-plugin-api'):
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- if 'l2-population' in rdata:
- ctxt.update(self.get_neutron_options(rdata))
-
- return ctxt
-
- def get_neutron_options(self, rdata):
- settings = {}
- for nkey in self.neutron_defaults.keys():
- defv = self.neutron_defaults[nkey]['default']
- rkey = self.neutron_defaults[nkey]['rel_key']
- if rkey in rdata.keys():
- if type(defv) is bool:
- settings[nkey] = bool_from_string(rdata[rkey])
- else:
- settings[nkey] = rdata[rkey]
- else:
- settings[nkey] = defv
- return settings
-
-
-class ExternalPortContext(NeutronPortContext):
-
- def __call__(self):
- ctxt = {}
- ports = config('ext-port')
- if ports:
- ports = [p.strip() for p in ports.split()]
- ports = self.resolve_ports(ports)
- if ports:
- ctxt = {"ext_port": ports[0]}
- napi_settings = NeutronAPIContext()()
- mtu = napi_settings.get('network_device_mtu')
- if mtu:
- ctxt['ext_port_mtu'] = mtu
-
- return ctxt
-
-
-class DataPortContext(NeutronPortContext):
-
- def __call__(self):
- ports = config('data-port')
- if ports:
- # Map of {port/mac:bridge}
- portmap = parse_data_port_mappings(ports)
- ports = portmap.keys()
- # Resolve provided ports or mac addresses and filter out those
- # already attached to a bridge.
- resolved = self.resolve_ports(ports)
- # FIXME: is this necessary?
- normalized = {get_nic_hwaddr(port): port for port in resolved
- if port not in ports}
- normalized.update({port: port for port in resolved
- if port in ports})
- if resolved:
- return {normalized[port]: bridge for port, bridge in
- six.iteritems(portmap) if port in normalized.keys()}
-
- return None
-
-
-class PhyNICMTUContext(DataPortContext):
-
- def __call__(self):
- ctxt = {}
- mappings = super(PhyNICMTUContext, self).__call__()
- if mappings and mappings.keys():
- ports = sorted(mappings.keys())
- napi_settings = NeutronAPIContext()()
- mtu = napi_settings.get('network_device_mtu')
- all_ports = set()
- # If any of ports is a vlan device, its underlying device must have
- # mtu applied first.
- for port in ports:
- for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
- lport = os.path.basename(lport)
- all_ports.add(lport.split('_')[1])
-
- all_ports = list(all_ports)
- all_ports.extend(ports)
- if mtu:
- ctxt["devs"] = '\\n'.join(all_ports)
- ctxt['mtu'] = mtu
-
- return ctxt
-
-
-class NetworkServiceContext(OSContextGenerator):
-
- def __init__(self, rel_name='quantum-network-service'):
- self.rel_name = rel_name
- self.interfaces = [rel_name]
-
- def __call__(self):
- for rid in relation_ids(self.rel_name):
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- ctxt = {
- 'keystone_host': rdata.get('keystone_host'),
- 'service_port': rdata.get('service_port'),
- 'auth_port': rdata.get('auth_port'),
- 'service_tenant': rdata.get('service_tenant'),
- 'service_username': rdata.get('service_username'),
- 'service_password': rdata.get('service_password'),
- 'quantum_host': rdata.get('quantum_host'),
- 'quantum_port': rdata.get('quantum_port'),
- 'quantum_url': rdata.get('quantum_url'),
- 'region': rdata.get('region'),
- 'service_protocol':
- rdata.get('service_protocol') or 'http',
- 'auth_protocol':
- rdata.get('auth_protocol') or 'http',
- 'api_version':
- rdata.get('api_version') or '2.0',
- }
- if self.context_complete(ctxt):
- return ctxt
- return {}
-
-
-class InternalEndpointContext(OSContextGenerator):
- """Internal endpoint context.
-
- This context provides the endpoint type used for communication between
- services e.g. between Nova and Cinder internally. Openstack uses Public
- endpoints by default so this allows admins to optionally use internal
- endpoints.
- """
- def __call__(self):
- return {'use_internal_endpoints': config('use-internal-endpoints')}
-
-
-class AppArmorContext(OSContextGenerator):
- """Base class for apparmor contexts."""
-
- def __init__(self):
- self._ctxt = None
- self.aa_profile = None
- self.aa_utils_packages = ['apparmor-utils']
-
- @property
- def ctxt(self):
- if self._ctxt is not None:
- return self._ctxt
- self._ctxt = self._determine_ctxt()
- return self._ctxt
-
- def _determine_ctxt(self):
- """
- Validate aa-profile-mode settings is disable, enforce, or complain.
-
- :return ctxt: Dictionary of the apparmor profile or None
- """
- if config('aa-profile-mode') in ['disable', 'enforce', 'complain']:
- ctxt = {'aa_profile_mode': config('aa-profile-mode'),
- 'ubuntu_release': lsb_release()['DISTRIB_RELEASE']}
- else:
- ctxt = None
- return ctxt
-
- def __call__(self):
- return self.ctxt
-
- def install_aa_utils(self):
- """
- Install packages required for apparmor configuration.
- """
- log("Installing apparmor utils.")
- ensure_packages(self.aa_utils_packages)
-
- def manually_disable_aa_profile(self):
- """
- Manually disable an apparmor profile.
-
- If aa-profile-mode is set to disabled (default) this is required as the
- template has been written but apparmor is yet unaware of the profile
- and aa-disable aa-profile fails. Without this the profile would kick
- into enforce mode on the next service restart.
-
- """
- profile_path = '/etc/apparmor.d'
- disable_path = '/etc/apparmor.d/disable'
- if not os.path.lexists(os.path.join(disable_path, self.aa_profile)):
- os.symlink(os.path.join(profile_path, self.aa_profile),
- os.path.join(disable_path, self.aa_profile))
-
- def setup_aa_profile(self):
- """
- Setup an apparmor profile.
- The ctxt dictionary will contain the apparmor profile mode and
- the apparmor profile name.
- Makes calls out to aa-disable, aa-complain, or aa-enforce to setup
- the apparmor profile.
- """
- self()
- if not self.ctxt:
- log("Not enabling apparmor Profile")
- return
- self.install_aa_utils()
- cmd = ['aa-{}'.format(self.ctxt['aa_profile_mode'])]
- cmd.append(self.ctxt['aa_profile'])
- log("Setting up the apparmor profile for {} in {} mode."
- "".format(self.ctxt['aa_profile'], self.ctxt['aa_profile_mode']))
- try:
- check_call(cmd)
- except CalledProcessError as e:
- # If aa-profile-mode is set to disabled (default) manual
- # disabling is required as the template has been written but
- # apparmor is yet unaware of the profile and aa-disable aa-profile
- # fails. If aa-disable learns to read profile files first this can
- # be removed.
- if self.ctxt['aa_profile_mode'] == 'disable':
- log("Manually disabling the apparmor profile for {}."
- "".format(self.ctxt['aa_profile']))
- self.manually_disable_aa_profile()
- return
- status_set('blocked', "Apparmor profile {} failed to be set to {}."
- "".format(self.ctxt['aa_profile'],
- self.ctxt['aa_profile_mode']))
- raise e
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/exceptions.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/exceptions.py
deleted file mode 100644
index f85ae4f..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/exceptions.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-class OSContextError(Exception):
- """Raised when an error occurs during context generation.
-
- This exception is principally used in contrib.openstack.context
- """
- pass
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/files/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/files/__init__.py
deleted file mode 100644
index 9df5f74..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/files/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# dummy __init__.py to fool syncer into thinking this is a syncable python
-# module
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ha/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ha/__init__.py
deleted file mode 100644
index 9b088de..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ha/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ha/utils.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ha/utils.py
deleted file mode 100644
index 1f5310b..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ha/utils.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# Copyright 2014-2016 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Copyright 2016 Canonical Ltd.
-#
-# Authors:
-# Openstack Charmers <
-#
-
-"""
-Helpers for high availability.
-"""
-
-import re
-
-from charmhelpers.core.hookenv import (
- log,
- relation_set,
- charm_name,
- config,
- status_set,
- DEBUG,
-)
-
-from charmhelpers.core.host import (
- lsb_release
-)
-
-from charmhelpers.contrib.openstack.ip import (
- resolve_address,
-)
-
-
-class DNSHAException(Exception):
- """Raised when an error occurs setting up DNS HA
- """
-
- pass
-
-
-def update_dns_ha_resource_params(resources, resource_params,
- relation_id=None,
- crm_ocf='ocf:maas:dns'):
- """ Check for os-*-hostname settings and update resource dictionaries for
- the HA relation.
-
- @param resources: Pointer to dictionary of resources.
- Usually instantiated in ha_joined().
- @param resource_params: Pointer to dictionary of resource parameters.
- Usually instantiated in ha_joined()
- @param relation_id: Relation ID of the ha relation
- @param crm_ocf: Corosync Open Cluster Framework resource agent to use for
- DNS HA
- """
-
- # Validate the charm environment for DNS HA
- assert_charm_supports_dns_ha()
-
- settings = ['os-admin-hostname', 'os-internal-hostname',
- 'os-public-hostname', 'os-access-hostname']
-
- # Check which DNS settings are set and update dictionaries
- hostname_group = []
- for setting in settings:
- hostname = config(setting)
- if hostname is None:
- log('DNS HA: Hostname setting {} is None. Ignoring.'
- ''.format(setting),
- DEBUG)
- continue
- m = re.search('os-(.+?)-hostname', setting)
- if m:
- networkspace = m.group(1)
- else:
- msg = ('Unexpected DNS hostname setting: {}. '
- 'Cannot determine network space name'
- ''.format(setting))
- status_set('blocked', msg)
- raise DNSHAException(msg)
-
- hostname_key = 'res_{}_{}_hostname'.format(charm_name(), networkspace)
- if hostname_key in hostname_group:
- log('DNS HA: Resource {}: {} already exists in '
- 'hostname group - skipping'.format(hostname_key, hostname),
- DEBUG)
- continue
-
- hostname_group.append(hostname_key)
- resources[hostname_key] = crm_ocf
- resource_params[hostname_key] = (
- 'params fqdn="{}" ip_address="{}" '
- ''.format(hostname, resolve_address(endpoint_type=networkspace,
- override=False)))
-
- if len(hostname_group) >= 1:
- log('DNS HA: Hostname group is set with {} as members. '
- 'Informing the ha relation'.format(' '.join(hostname_group)),
- DEBUG)
- relation_set(relation_id=relation_id, groups={
- 'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)})
- else:
- msg = 'DNS HA: Hostname group has no members.'
- status_set('blocked', msg)
- raise DNSHAException(msg)
-
-
-def assert_charm_supports_dns_ha():
- """Validate prerequisites for DNS HA
- The MAAS client is only available on Xenial or greater
- """
- if lsb_release().get('DISTRIB_RELEASE') < '16.04':
- msg = ('DNS HA is only supported on 16.04 and greater '
- 'versions of Ubuntu.')
- status_set('blocked', msg)
- raise DNSHAException(msg)
- return True
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ip.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ip.py
deleted file mode 100644
index 0fd3ac2..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ip.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from charmhelpers.core.hookenv import (
- config,
- unit_get,
- service_name,
- network_get_primary_address,
-)
-from charmhelpers.contrib.network.ip import (
- get_address_in_network,
- is_address_in_network,
- is_ipv6,
- get_ipv6_addr,
- resolve_network_cidr,
-)
-from charmhelpers.contrib.hahelpers.cluster import is_clustered
-
-PUBLIC = 'public'
-INTERNAL = 'int'
-ADMIN = 'admin'
-
-ADDRESS_MAP = {
- PUBLIC: {
- 'binding': 'public',
- 'config': 'os-public-network',
- 'fallback': 'public-address',
- 'override': 'os-public-hostname',
- },
- INTERNAL: {
- 'binding': 'internal',
- 'config': 'os-internal-network',
- 'fallback': 'private-address',
- 'override': 'os-internal-hostname',
- },
- ADMIN: {
- 'binding': 'admin',
- 'config': 'os-admin-network',
- 'fallback': 'private-address',
- 'override': 'os-admin-hostname',
- }
-}
-
-
-def canonical_url(configs, endpoint_type=PUBLIC):
- """Returns the correct HTTP URL to this host given the state of HTTPS
- configuration, hacluster and charm configuration.
-
- :param configs: OSTemplateRenderer config templating object to inspect
- for a complete https context.
- :param endpoint_type: str endpoint type to resolve.
- :param returns: str base URL for services on the current service unit.
- """
- scheme = _get_scheme(configs)
-
- address = resolve_address(endpoint_type)
- if is_ipv6(address):
- address = "[{}]".format(address)
-
- return '%s://%s' % (scheme, address)
-
-
-def _get_scheme(configs):
- """Returns the scheme to use for the url (either http or https)
- depending upon whether https is in the configs value.
-
- :param configs: OSTemplateRenderer config templating object to inspect
- for a complete https context.
- :returns: either 'http' or 'https' depending on whether https is
- configured within the configs context.
- """
- scheme = 'http'
- if configs and 'https' in configs.complete_contexts():
- scheme = 'https'
- return scheme
-
-
-def _get_address_override(endpoint_type=PUBLIC):
- """Returns any address overrides that the user has defined based on the
- endpoint type.
-
- Note: this function allows for the service name to be inserted into the
- address if the user specifies {service_name}.somehost.org.
-
- :param endpoint_type: the type of endpoint to retrieve the override
- value for.
- :returns: any endpoint address or hostname that the user has overridden
- or None if an override is not present.
- """
- override_key = ADDRESS_MAP[endpoint_type]['override']
- addr_override = config(override_key)
- if not addr_override:
- return None
- else:
- return addr_override.format(service_name=service_name())
-
-
-def resolve_address(endpoint_type=PUBLIC, override=True):
- """Return unit address depending on net config.
-
- If unit is clustered with vip(s) and has net splits defined, return vip on
- correct network. If clustered with no nets defined, return primary vip.
-
- If not clustered, return unit address ensuring address is on configured net
- split if one is configured, or a Juju 2.0 extra-binding has been used.
-
- :param endpoint_type: Network endpoing type
- :param override: Accept hostname overrides or not
- """
- resolved_address = None
- if override:
- resolved_address = _get_address_override(endpoint_type)
- if resolved_address:
- return resolved_address
-
- vips = config('vip')
- if vips:
- vips = vips.split()
-
- net_type = ADDRESS_MAP[endpoint_type]['config']
- net_addr = config(net_type)
- net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
- binding = ADDRESS_MAP[endpoint_type]['binding']
- clustered = is_clustered()
-
- if clustered and vips:
- if net_addr:
- for vip in vips:
- if is_address_in_network(net_addr, vip):
- resolved_address = vip
- break
- else:
- # NOTE: endeavour to check vips against network space
- # bindings
- try:
- bound_cidr = resolve_network_cidr(
- network_get_primary_address(binding)
- )
- for vip in vips:
- if is_address_in_network(bound_cidr, vip):
- resolved_address = vip
- break
- except NotImplementedError:
- # If no net-splits configured and no support for extra
- # bindings/network spaces so we expect a single vip
- resolved_address = vips[0]
- else:
- if config('prefer-ipv6'):
- fallback_addr = get_ipv6_addr(exc_list=vips)[0]
- else:
- fallback_addr = unit_get(net_fallback)
-
- if net_addr:
- resolved_address = get_address_in_network(net_addr, fallback_addr)
- else:
- # NOTE: only try to use extra bindings if legacy network
- # configuration is not in use
- try:
- resolved_address = network_get_primary_address(binding)
- except NotImplementedError:
- resolved_address = fallback_addr
-
- if resolved_address is None:
- raise ValueError("Unable to resolve a suitable IP address based on "
- "charm state and configuration. (net_type=%s, "
- "clustered=%s)" % (net_type, clustered))
-
- return resolved_address
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/neutron.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/neutron.py
deleted file mode 100644
index 03427b4..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/neutron.py
+++ /dev/null
@@ -1,382 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Various utilies for dealing with Neutron and the renaming from Quantum.
-
-import six
-from subprocess import check_output
-
-from charmhelpers.core.hookenv import (
- config,
- log,
- ERROR,
-)
-
-from charmhelpers.contrib.openstack.utils import os_release
-
-
-def headers_package():
- """Ensures correct linux-headers for running kernel are installed,
- for building DKMS package"""
- kver = check_output(['uname', '-r']).decode('UTF-8').strip()
- return 'linux-headers-%s' % kver
-
-QUANTUM_CONF_DIR = '/etc/quantum'
-
-
-def kernel_version():
- """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
- kver = check_output(['uname', '-r']).decode('UTF-8').strip()
- kver = kver.split('.')
- return (int(kver[0]), int(kver[1]))
-
-
-def determine_dkms_package():
- """ Determine which DKMS package should be used based on kernel version """
- # NOTE: 3.13 kernels have support for GRE and VXLAN native
- if kernel_version() >= (3, 13):
- return []
- else:
- return [headers_package(), 'openvswitch-datapath-dkms']
-
-
-# legacy
-
-
-def quantum_plugins():
- from charmhelpers.contrib.openstack import context
- return {
- 'ovs': {
- 'config': '/etc/quantum/plugins/openvswitch/'
- 'ovs_quantum_plugin.ini',
- 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
- 'OVSQuantumPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=QUANTUM_CONF_DIR)],
- 'services': ['quantum-plugin-openvswitch-agent'],
- 'packages': [determine_dkms_package(),
- ['quantum-plugin-openvswitch-agent']],
- 'server_packages': ['quantum-server',
- 'quantum-plugin-openvswitch'],
- 'server_services': ['quantum-server']
- },
- 'nvp': {
- 'config': '/etc/quantum/plugins/nicira/nvp.ini',
- 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
- 'QuantumPlugin.NvpPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=QUANTUM_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['quantum-server',
- 'quantum-plugin-nicira'],
- 'server_services': ['quantum-server']
- }
- }
-
-NEUTRON_CONF_DIR = '/etc/neutron'
-
-
-def neutron_plugins():
- from charmhelpers.contrib.openstack import context
- release = os_release('nova-common')
- plugins = {
- 'ovs': {
- 'config': '/etc/neutron/plugins/openvswitch/'
- 'ovs_neutron_plugin.ini',
- 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
- 'OVSNeutronPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': ['neutron-plugin-openvswitch-agent'],
- 'packages': [determine_dkms_package(),
- ['neutron-plugin-openvswitch-agent']],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-openvswitch'],
- 'server_services': ['neutron-server']
- },
- 'nvp': {
- 'config': '/etc/neutron/plugins/nicira/nvp.ini',
- 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
- 'NeutronPlugin.NvpPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-nicira'],
- 'server_services': ['neutron-server']
- },
- 'nsx': {
- 'config': '/etc/neutron/plugins/vmware/nsx.ini',
- 'driver': 'vmware',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-vmware'],
- 'server_services': ['neutron-server']
- },
- 'n1kv': {
- 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
- 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [determine_dkms_package(),
- ['neutron-plugin-cisco']],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-cisco'],
- 'server_services': ['neutron-server']
- },
- 'Calico': {
- 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
- 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': ['calico-felix',
- 'bird',
- 'neutron-dhcp-agent',
- 'nova-api-metadata',
- 'etcd'],
- 'packages': [determine_dkms_package(),
- ['calico-compute',
- 'bird',
- 'neutron-dhcp-agent',
- 'nova-api-metadata',
- 'etcd']],
- 'server_packages': ['neutron-server', 'calico-control', 'etcd'],
- 'server_services': ['neutron-server', 'etcd']
- },
- 'vsp': {
- 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
- 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
- 'server_services': ['neutron-server']
- },
- 'plumgrid': {
- 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
- 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
- 'contexts': [
- context.SharedDBContext(user=config('database-user'),
- database=config('database'),
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': ['plumgrid-lxc',
- 'iovisor-dkms'],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-plumgrid'],
- 'server_services': ['neutron-server']
- },
- 'midonet': {
- 'config': '/etc/neutron/plugins/midonet/midonet.ini',
- 'driver': 'midonet.neutron.plugin.MidonetPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [determine_dkms_package()],
- 'server_packages': ['neutron-server',
- 'python-neutron-plugin-midonet'],
- 'server_services': ['neutron-server']
- }
- }
- if release >= 'icehouse':
- # NOTE: patch in ml2 plugin for icehouse onwards
- plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
- plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
- plugins['ovs']['server_packages'] = ['neutron-server',
- 'neutron-plugin-ml2']
- # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
- plugins['nvp'] = plugins['nsx']
- if release >= 'kilo':
- plugins['midonet']['driver'] = (
- 'neutron.plugins.midonet.plugin.MidonetPluginV2')
- if release >= 'liberty':
- plugins['midonet']['driver'] = (
- 'midonet.neutron.plugin_v1.MidonetPluginV2')
- plugins['midonet']['server_packages'].remove(
- 'python-neutron-plugin-midonet')
- plugins['midonet']['server_packages'].append(
- 'python-networking-midonet')
- plugins['plumgrid']['driver'] = (
- 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2')
- plugins['plumgrid']['server_packages'].remove(
- 'neutron-plugin-plumgrid')
- return plugins
-
-
-def neutron_plugin_attribute(plugin, attr, net_manager=None):
- manager = net_manager or network_manager()
- if manager == 'quantum':
- plugins = quantum_plugins()
- elif manager == 'neutron':
- plugins = neutron_plugins()
- else:
- log("Network manager '%s' does not support plugins." % (manager),
- level=ERROR)
- raise Exception
-
- try:
- _plugin = plugins[plugin]
- except KeyError:
- log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
- raise Exception
-
- try:
- return _plugin[attr]
- except KeyError:
- return None
-
-
-def network_manager():
- '''
- Deals with the renaming of Quantum to Neutron in H and any situations
- that require compatability (eg, deploying H with network-manager=quantum,
- upgrading from G).
- '''
- release = os_release('nova-common')
- manager = config('network-manager').lower()
-
- if manager not in ['quantum', 'neutron']:
- return manager
-
- if release in ['essex']:
- # E does not support neutron
- log('Neutron networking not supported in Essex.', level=ERROR)
- raise Exception
- elif release in ['folsom', 'grizzly']:
- # neutron is named quantum in F and G
- return 'quantum'
- else:
- # ensure accurate naming for all releases post-H
- return 'neutron'
-
-
-def parse_mappings(mappings, key_rvalue=False):
- """By default mappings are lvalue keyed.
-
- If key_rvalue is True, the mapping will be reversed to allow multiple
- configs for the same lvalue.
- """
- parsed = {}
- if mappings:
- mappings = mappings.split()
- for m in mappings:
- p = m.partition(':')
-
- if key_rvalue:
- key_index = 2
- val_index = 0
- # if there is no rvalue skip to next
- if not p[1]:
- continue
- else:
- key_index = 0
- val_index = 2
-
- key = p[key_index].strip()
- parsed[key] = p[val_index].strip()
-
- return parsed
-
-
-def parse_bridge_mappings(mappings):
- """Parse bridge mappings.
-
- Mappings must be a space-delimited list of provider:bridge mappings.
-
- Returns dict of the form {provider:bridge}.
- """
- return parse_mappings(mappings)
-
-
-def parse_data_port_mappings(mappings, default_bridge='br-data'):
- """Parse data port mappings.
-
- Mappings must be a space-delimited list of bridge:port.
-
- Returns dict of the form {port:bridge} where ports may be mac addresses or
- interface names.
- """
-
- # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
- # proposed for <port> since it may be a mac address which will differ
- # across units this allowing first-known-good to be chosen.
- _mappings = parse_mappings(mappings, key_rvalue=True)
- if not _mappings or list(_mappings.values()) == ['']:
- if not mappings:
- return {}
-
- # For backwards-compatibility we need to support port-only provided in
- # config.
- _mappings = {mappings.split()[0]: default_bridge}
-
- ports = _mappings.keys()
- if len(set(ports)) != len(ports):
- raise Exception("It is not allowed to have the same port configured "
- "on more than one bridge")
-
- return _mappings
-
-
-def parse_vlan_range_mappings(mappings):
- """Parse vlan range mappings.
-
- Mappings must be a space-delimited list of provider:start:end mappings.
-
- The start:end range is optional and may be omitted.
-
- Returns dict of the form {provider: (start, end)}.
- """
- _mappings = parse_mappings(mappings)
- if not _mappings:
- return {}
-
- mappings = {}
- for p, r in six.iteritems(_mappings):
- mappings[p] = tuple(r.split(':'))
-
- return mappings
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/templates/__init__.py
deleted file mode 100644
index 9df5f74..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/templates/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# dummy __init__.py to fool syncer into thinking this is a syncable python
-# module
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/templating.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/templating.py
deleted file mode 100644
index 8958895..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/templating.py
+++ /dev/null
@@ -1,321 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import six
-
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import (
- log,
- ERROR,
- INFO
-)
-from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
-
-try:
- from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
-except ImportError:
- apt_update(fatal=True)
- apt_install('python-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
-
-
-class OSConfigException(Exception):
- pass
-
-
-def get_loader(templates_dir, os_release):
- """
- Create a jinja2.ChoiceLoader containing template dirs up to
- and including os_release. If directory template directory
- is missing at templates_dir, it will be omitted from the loader.
- templates_dir is added to the bottom of the search list as a base
- loading dir.
-
- A charm may also ship a templates dir with this module
- and it will be appended to the bottom of the search list, eg::
-
- hooks/charmhelpers/contrib/openstack/templates
-
- :param templates_dir (str): Base template directory containing release
- sub-directories.
- :param os_release (str): OpenStack release codename to construct template
- loader.
- :returns: jinja2.ChoiceLoader constructed with a list of
- jinja2.FilesystemLoaders, ordered in descending
- order by OpenStack release.
- """
- tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
- for rel in six.itervalues(OPENSTACK_CODENAMES)]
-
- if not os.path.isdir(templates_dir):
- log('Templates directory not found @ %s.' % templates_dir,
- level=ERROR)
- raise OSConfigException
-
- # the bottom contains tempaltes_dir and possibly a common templates dir
- # shipped with the helper.
- loaders = [FileSystemLoader(templates_dir)]
- helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
- if os.path.isdir(helper_templates):
- loaders.append(FileSystemLoader(helper_templates))
-
- for rel, tmpl_dir in tmpl_dirs:
- if os.path.isdir(tmpl_dir):
- loaders.insert(0, FileSystemLoader(tmpl_dir))
- if rel == os_release:
- break
- log('Creating choice loader with dirs: %s' %
- [l.searchpath for l in loaders], level=INFO)
- return ChoiceLoader(loaders)
-
-
-class OSConfigTemplate(object):
- """
- Associates a config file template with a list of context generators.
- Responsible for constructing a template context based on those generators.
- """
- def __init__(self, config_file, contexts):
- self.config_file = config_file
-
- if hasattr(contexts, '__call__'):
- self.contexts = [contexts]
- else:
- self.contexts = contexts
-
- self._complete_contexts = []
-
- def context(self):
- ctxt = {}
- for context in self.contexts:
- _ctxt = context()
- if _ctxt:
- ctxt.update(_ctxt)
- # track interfaces for every complete context.
- [self._complete_contexts.append(interface)
- for interface in context.interfaces
- if interface not in self._complete_contexts]
- return ctxt
-
- def complete_contexts(self):
- '''
- Return a list of interfaces that have satisfied contexts.
- '''
- if self._complete_contexts:
- return self._complete_contexts
- self.context()
- return self._complete_contexts
-
-
-class OSConfigRenderer(object):
- """
- This class provides a common templating system to be used by OpenStack
- charms. It is intended to help charms share common code and templates,
- and ease the burden of managing config templates across multiple OpenStack
- releases.
-
- Basic usage::
-
- # import some common context generates from charmhelpers
- from charmhelpers.contrib.openstack import context
-
- # Create a renderer object for a specific OS release.
- configs = OSConfigRenderer(templates_dir='/tmp/templates',
- openstack_release='folsom')
- # register some config files with context generators.
- configs.register(config_file='/etc/nova/nova.conf',
- contexts=[context.SharedDBContext(),
- context.AMQPContext()])
- configs.register(config_file='/etc/nova/api-paste.ini',
- contexts=[context.IdentityServiceContext()])
- configs.register(config_file='/etc/haproxy/haproxy.conf',
- contexts=[context.HAProxyContext()])
- # write out a single config
- configs.write('/etc/nova/nova.conf')
- # write out all registered configs
- configs.write_all()
-
- **OpenStack Releases and template loading**
-
- When the object is instantiated, it is associated with a specific OS
- release. This dictates how the template loader will be constructed.
-
- The constructed loader attempts to load the template from several places
- in the following order:
- - from the most recent OS release-specific template dir (if one exists)
- - the base templates_dir
- - a template directory shipped in the charm with this helper file.
-
- For the example above, '/tmp/templates' contains the following structure::
-
- /tmp/templates/nova.conf
- /tmp/templates/api-paste.ini
- /tmp/templates/grizzly/api-paste.ini
- /tmp/templates/havana/api-paste.ini
-
- Since it was registered with the grizzly release, it first seraches
- the grizzly directory for nova.conf, then the templates dir.
-
- When writing api-paste.ini, it will find the template in the grizzly
- directory.
-
- If the object were created with folsom, it would fall back to the
- base templates dir for its api-paste.ini template.
-
- This system should help manage changes in config files through
- openstack releases, allowing charms to fall back to the most recently
- updated config template for a given release
-
- The haproxy.conf, since it is not shipped in the templates dir, will
- be loaded from the module directory's template directory, eg
- $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
- us to ship common templates (haproxy, apache) with the helpers.
-
- **Context generators**
-
- Context generators are used to generate template contexts during hook
- execution. Doing so may require inspecting service relations, charm
- config, etc. When registered, a config file is associated with a list
- of generators. When a template is rendered and written, all context
- generates are called in a chain to generate the context dictionary
- passed to the jinja2 template. See context.py for more info.
- """
- def __init__(self, templates_dir, openstack_release):
- if not os.path.isdir(templates_dir):
- log('Could not locate templates dir %s' % templates_dir,
- level=ERROR)
- raise OSConfigException
-
- self.templates_dir = templates_dir
- self.openstack_release = openstack_release
- self.templates = {}
- self._tmpl_env = None
-
- if None in [Environment, ChoiceLoader, FileSystemLoader]:
- # if this code is running, the object is created pre-install hook.
- # jinja2 shouldn't get touched until the module is reloaded on next
- # hook execution, with proper jinja2 bits successfully imported.
- apt_install('python-jinja2')
-
- def register(self, config_file, contexts):
- """
- Register a config file with a list of context generators to be called
- during rendering.
- """
- self.templates[config_file] = OSConfigTemplate(config_file=config_file,
- contexts=contexts)
- log('Registered config file: %s' % config_file, level=INFO)
-
- def _get_tmpl_env(self):
- if not self._tmpl_env:
- loader = get_loader(self.templates_dir, self.openstack_release)
- self._tmpl_env = Environment(loader=loader)
-
- def _get_template(self, template):
- self._get_tmpl_env()
- template = self._tmpl_env.get_template(template)
- log('Loaded template from %s' % template.filename, level=INFO)
- return template
-
- def render(self, config_file):
- if config_file not in self.templates:
- log('Config not registered: %s' % config_file, level=ERROR)
- raise OSConfigException
- ctxt = self.templates[config_file].context()
-
- _tmpl = os.path.basename(config_file)
- try:
- template = self._get_template(_tmpl)
- except exceptions.TemplateNotFound:
- # if no template is found with basename, try looking for it
- # using a munged full path, eg:
- # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
- _tmpl = '_'.join(config_file.split('/')[1:])
- try:
- template = self._get_template(_tmpl)
- except exceptions.TemplateNotFound as e:
- log('Could not load template from %s by %s or %s.' %
- (self.templates_dir, os.path.basename(config_file), _tmpl),
- level=ERROR)
- raise e
-
- log('Rendering from template: %s' % _tmpl, level=INFO)
- return template.render(ctxt)
-
- def write(self, config_file):
- """
- Write a single config file, raises if config file is not registered.
- """
- if config_file not in self.templates:
- log('Config not registered: %s' % config_file, level=ERROR)
- raise OSConfigException
-
- _out = self.render(config_file)
-
- with open(config_file, 'wb') as out:
- out.write(_out)
-
- log('Wrote template %s.' % config_file, level=INFO)
-
- def write_all(self):
- """
- Write out all registered config files.
- """
- [self.write(k) for k in six.iterkeys(self.templates)]
-
- def set_release(self, openstack_release):
- """
- Resets the template environment and generates a new template loader
- based on a the new openstack release.
- """
- self._tmpl_env = None
- self.openstack_release = openstack_release
- self._get_tmpl_env()
-
- def complete_contexts(self):
- '''
- Returns a list of context interfaces that yield a complete context.
- '''
- interfaces = []
- [interfaces.extend(i.complete_contexts())
- for i in six.itervalues(self.templates)]
- return interfaces
-
- def get_incomplete_context_data(self, interfaces):
- '''
- Return dictionary of relation status of interfaces and any missing
- required context data. Example:
- {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
- 'zeromq-configuration': {'related': False}}
- '''
- incomplete_context_data = {}
-
- for i in six.itervalues(self.templates):
- for context in i.contexts:
- for interface in interfaces:
- related = False
- if interface in context.interfaces:
- related = context.get_related()
- missing_data = context.missing_data
- if missing_data:
- incomplete_context_data[interface] = {'missing_data': missing_data}
- if related:
- if incomplete_context_data.get(interface):
- incomplete_context_data[interface].update({'related': True})
- else:
- incomplete_context_data[interface] = {'related': True}
- else:
- incomplete_context_data[interface] = {'related': False}
- return incomplete_context_data
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/utils.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/utils.py
deleted file mode 100644
index 9d3e3d8..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/utils.py
+++ /dev/null
@@ -1,1891 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Common python helper functions used for OpenStack charms.
-from collections import OrderedDict
-from functools import wraps
-
-import subprocess
-import json
-import os
-import sys
-import re
-import itertools
-import functools
-import shutil
-
-import six
-import tempfile
-import traceback
-import uuid
-import yaml
-
-from charmhelpers.contrib.network import ip
-
-from charmhelpers.core import (
- unitdata,
-)
-
-from charmhelpers.core.hookenv import (
- action_fail,
- action_set,
- config,
- log as juju_log,
- charm_dir,
- DEBUG,
- INFO,
- ERROR,
- related_units,
- relation_ids,
- relation_set,
- service_name,
- status_set,
- hook_name
-)
-
-from charmhelpers.contrib.storage.linux.lvm import (
- deactivate_lvm_volume_group,
- is_lvm_physical_volume,
- remove_lvm_physical_volume,
-)
-
-from charmhelpers.contrib.network.ip import (
- get_ipv6_addr,
- is_ipv6,
- port_has_listener,
-)
-
-from charmhelpers.contrib.python.packages import (
- pip_create_virtualenv,
- pip_install,
-)
-
-from charmhelpers.core.host import (
- lsb_release,
- mounts,
- umount,
- service_running,
- service_pause,
- service_resume,
- restart_on_change_helper,
-)
-from charmhelpers.fetch import apt_install, apt_cache, install_remote
-from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
-from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
-from charmhelpers.contrib.openstack.exceptions import OSContextError
-
-CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
-CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
-
-DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
- 'restricted main multiverse universe')
-
-UBUNTU_OPENSTACK_RELEASE = OrderedDict([
- ('oneiric', 'diablo'),
- ('precise', 'essex'),
- ('quantal', 'folsom'),
- ('raring', 'grizzly'),
- ('saucy', 'havana'),
- ('trusty', 'icehouse'),
- ('utopic', 'juno'),
- ('vivid', 'kilo'),
- ('wily', 'liberty'),
- ('xenial', 'mitaka'),
- ('yakkety', 'newton'),
- ('zebra', 'ocata'), # TODO: upload with real Z name
-])
-
-
-OPENSTACK_CODENAMES = OrderedDict([
- ('2011.2', 'diablo'),
- ('2012.1', 'essex'),
- ('2012.2', 'folsom'),
- ('2013.1', 'grizzly'),
- ('2013.2', 'havana'),
- ('2014.1', 'icehouse'),
- ('2014.2', 'juno'),
- ('2015.1', 'kilo'),
- ('2015.2', 'liberty'),
- ('2016.1', 'mitaka'),
- ('2016.2', 'newton'),
- ('2017.1', 'ocata'),
-])
-
-# The ugly duckling - must list releases oldest to newest
-SWIFT_CODENAMES = OrderedDict([
- ('diablo',
- ['1.4.3']),
- ('essex',
- ['1.4.8']),
- ('folsom',
- ['1.7.4']),
- ('grizzly',
- ['1.7.6', '1.7.7', '1.8.0']),
- ('havana',
- ['1.9.0', '1.9.1', '1.10.0']),
- ('icehouse',
- ['1.11.0', '1.12.0', '1.13.0', '1.13.1']),
- ('juno',
- ['2.0.0', '2.1.0', '2.2.0']),
- ('kilo',
- ['2.2.1', '2.2.2']),
- ('liberty',
- ['2.3.0', '2.4.0', '2.5.0']),
- ('mitaka',
- ['2.5.0', '2.6.0', '2.7.0']),
- ('newton',
- ['2.8.0', '2.9.0']),
-])
-
-# >= Liberty version->codename mapping
-PACKAGE_CODENAMES = {
- 'nova-common': OrderedDict([
- ('12', 'liberty'),
- ('13', 'mitaka'),
- ('14', 'newton'),
- ('15', 'ocata'),
- ]),
- 'neutron-common': OrderedDict([
- ('7', 'liberty'),
- ('8', 'mitaka'),
- ('9', 'newton'),
- ('10', 'ocata'),
- ]),
- 'cinder-common': OrderedDict([
- ('7', 'liberty'),
- ('8', 'mitaka'),
- ('9', 'newton'),
- ('10', 'ocata'),
- ]),
- 'keystone': OrderedDict([
- ('8', 'liberty'),
- ('9', 'mitaka'),
- ('10', 'newton'),
- ('11', 'ocata'),
- ]),
- 'horizon-common': OrderedDict([
- ('8', 'liberty'),
- ('9', 'mitaka'),
- ('10', 'newton'),
- ('11', 'ocata'),
- ]),
- 'ceilometer-common': OrderedDict([
- ('5', 'liberty'),
- ('6', 'mitaka'),
- ('7', 'newton'),
- ('8', 'ocata'),
- ]),
- 'heat-common': OrderedDict([
- ('5', 'liberty'),
- ('6', 'mitaka'),
- ('7', 'newton'),
- ('8', 'ocata'),
- ]),
- 'glance-common': OrderedDict([
- ('11', 'liberty'),
- ('12', 'mitaka'),
- ('13', 'newton'),
- ('14', 'ocata'),
- ]),
- 'openstack-dashboard': OrderedDict([
- ('8', 'liberty'),
- ('9', 'mitaka'),
- ('10', 'newton'),
- ('11', 'ocata'),
- ]),
-}
-
-GIT_DEFAULT_REPOS = {
- 'requirements': 'git://github.com/openstack/requirements',
- 'cinder': 'git://github.com/openstack/cinder',
- 'glance': 'git://github.com/openstack/glance',
- 'horizon': 'git://github.com/openstack/horizon',
- 'keystone': 'git://github.com/openstack/keystone',
- 'networking-hyperv': 'git://github.com/openstack/networking-hyperv',
- 'neutron': 'git://github.com/openstack/neutron',
- 'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas',
- 'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas',
- 'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas',
- 'nova': 'git://github.com/openstack/nova',
-}
-
-GIT_DEFAULT_BRANCHES = {
- 'liberty': 'stable/liberty',
- 'mitaka': 'stable/mitaka',
- 'master': 'master',
-}
-
-DEFAULT_LOOPBACK_SIZE = '5G'
-
-
-def error_out(msg):
- juju_log("FATAL ERROR: %s" % msg, level='ERROR')
- sys.exit(1)
-
-
-def get_os_codename_install_source(src):
- '''Derive OpenStack release codename from a given installation source.'''
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- rel = ''
- if src is None:
- return rel
- if src in ['distro', 'distro-proposed']:
- try:
- rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
- except KeyError:
- e = 'Could not derive openstack release for '\
- 'this Ubuntu release: %s' % ubuntu_rel
- error_out(e)
- return rel
-
- if src.startswith('cloud:'):
- ca_rel = src.split(':')[1]
- ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
- return ca_rel
-
- # Best guess match based on deb string provided
- if src.startswith('deb') or src.startswith('ppa'):
- for k, v in six.iteritems(OPENSTACK_CODENAMES):
- if v in src:
- return v
-
-
-def get_os_version_install_source(src):
- codename = get_os_codename_install_source(src)
- return get_os_version_codename(codename)
-
-
-def get_os_codename_version(vers):
- '''Determine OpenStack codename from version number.'''
- try:
- return OPENSTACK_CODENAMES[vers]
- except KeyError:
- e = 'Could not determine OpenStack codename for version %s' % vers
- error_out(e)
-
-
-def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
- '''Determine OpenStack version number from codename.'''
- for k, v in six.iteritems(version_map):
- if v == codename:
- return k
- e = 'Could not derive OpenStack version for '\
- 'codename: %s' % codename
- error_out(e)
-
-
-def get_os_version_codename_swift(codename):
- '''Determine OpenStack version number of swift from codename.'''
- for k, v in six.iteritems(SWIFT_CODENAMES):
- if k == codename:
- return v[-1]
- e = 'Could not derive swift version for '\
- 'codename: %s' % codename
- error_out(e)
-
-
-def get_swift_codename(version):
- '''Determine OpenStack codename that corresponds to swift version.'''
- codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
-
- if len(codenames) > 1:
- # If more than one release codename contains this version we determine
- # the actual codename based on the highest available install source.
- for codename in reversed(codenames):
- releases = UBUNTU_OPENSTACK_RELEASE
- release = [k for k, v in six.iteritems(releases) if codename in v]
- ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
- if codename in ret or release[0] in ret:
- return codename
- elif len(codenames) == 1:
- return codenames[0]
-
- # NOTE: fallback - attempt to match with just major.minor version
- match = re.match('^(\d+)\.(\d+)', version)
- if match:
- major_minor_version = match.group(0)
- for codename, versions in six.iteritems(SWIFT_CODENAMES):
- for release_version in versions:
- if release_version.startswith(major_minor_version):
- return codename
-
- return None
-
-
-def get_os_codename_package(package, fatal=True):
- '''Derive OpenStack release codename from an installed package.'''
- import apt_pkg as apt
-
- cache = apt_cache()
-
- try:
- pkg = cache[package]
- except:
- if not fatal:
- return None
- # the package is unknown to the current apt cache.
- e = 'Could not determine version of package with no installation '\
- 'candidate: %s' % package
- error_out(e)
-
- if not pkg.current_ver:
- if not fatal:
- return None
- # package is known, but no version is currently installed.
- e = 'Could not determine version of uninstalled package: %s' % package
- error_out(e)
-
- vers = apt.upstream_version(pkg.current_ver.ver_str)
- if 'swift' in pkg.name:
- # Fully x.y.z match for swift versions
- match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
- else:
- # x.y match only for 20XX.X
- # and ignore patch level for other packages
- match = re.match('^(\d+)\.(\d+)', vers)
-
- if match:
- vers = match.group(0)
-
- # Generate a major version number for newer semantic
- # versions of openstack projects
- major_vers = vers.split('.')[0]
- # >= Liberty independent project versions
- if (package in PACKAGE_CODENAMES and
- major_vers in PACKAGE_CODENAMES[package]):
- return PACKAGE_CODENAMES[package][major_vers]
- else:
- # < Liberty co-ordinated project versions
- try:
- if 'swift' in pkg.name:
- return get_swift_codename(vers)
- else:
- return OPENSTACK_CODENAMES[vers]
- except KeyError:
- if not fatal:
- return None
- e = 'Could not determine OpenStack codename for version %s' % vers
- error_out(e)
-
-
-def get_os_version_package(pkg, fatal=True):
- '''Derive OpenStack version number from an installed package.'''
- codename = get_os_codename_package(pkg, fatal=fatal)
-
- if not codename:
- return None
-
- if 'swift' in pkg:
- vers_map = SWIFT_CODENAMES
- for cname, version in six.iteritems(vers_map):
- if cname == codename:
- return version[-1]
- else:
- vers_map = OPENSTACK_CODENAMES
- for version, cname in six.iteritems(vers_map):
- if cname == codename:
- return version
- # e = "Could not determine OpenStack version for package: %s" % pkg
- # error_out(e)
-
-
-os_rel = None
-
-
-def os_release(package, base='essex'):
- '''
- Returns OpenStack release codename from a cached global.
- If the codename can not be determined from either an installed package or
- the installation source, the earliest release supported by the charm should
- be returned.
- '''
- global os_rel
- if os_rel:
- return os_rel
- os_rel = (git_os_codename_install_source(config('openstack-origin-git')) or
- get_os_codename_package(package, fatal=False) or
- get_os_codename_install_source(config('openstack-origin')) or
- base)
- return os_rel
-
-
-def import_key(keyid):
- key = keyid.strip()
- if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and
- key.endswith('-----END PGP PUBLIC KEY BLOCK-----')):
- juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
- juju_log("Importing ASCII Armor PGP key", level=DEBUG)
- with tempfile.NamedTemporaryFile() as keyfile:
- with open(keyfile.name, 'w') as fd:
- fd.write(key)
- fd.write("\n")
-
- cmd = ['apt-key', 'add', keyfile.name]
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- error_out("Error importing PGP key '%s'" % key)
- else:
- juju_log("PGP key found (looks like Radix64 format)", level=DEBUG)
- juju_log("Importing PGP key from keyserver", level=DEBUG)
- cmd = ['apt-key', 'adv', '--keyserver',
- 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- error_out("Error importing PGP key '%s'" % key)
-
-
-def get_source_and_pgp_key(input):
- """Look for a pgp key ID or ascii-armor key in the given input."""
- index = input.strip()
- index = input.rfind('|')
- if index < 0:
- return input, None
-
- key = input[index + 1:].strip('|')
- source = input[:index]
- return source, key
-
-
-def configure_installation_source(rel):
- '''Configure apt installation source.'''
- if rel == 'distro':
- return
- elif rel == 'distro-proposed':
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
- f.write(DISTRO_PROPOSED % ubuntu_rel)
- elif rel[:4] == "ppa:":
- src, key = get_source_and_pgp_key(rel)
- if key:
- import_key(key)
-
- subprocess.check_call(["add-apt-repository", "-y", src])
- elif rel[:3] == "deb":
- src, key = get_source_and_pgp_key(rel)
- if key:
- import_key(key)
-
- with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
- f.write(src)
- elif rel[:6] == 'cloud:':
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- rel = rel.split(':')[1]
- u_rel = rel.split('-')[0]
- ca_rel = rel.split('-')[1]
-
- if u_rel != ubuntu_rel:
- e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
- 'version (%s)' % (ca_rel, ubuntu_rel)
- error_out(e)
-
- if 'staging' in ca_rel:
- # staging is just a regular PPA.
- os_rel = ca_rel.split('/')[0]
- ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
- cmd = 'add-apt-repository -y %s' % ppa
- subprocess.check_call(cmd.split(' '))
- return
-
- # map charm config options to actual archive pockets.
- pockets = {
- 'folsom': 'precise-updates/folsom',
- 'folsom/updates': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'grizzly': 'precise-updates/grizzly',
- 'grizzly/updates': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'havana': 'precise-updates/havana',
- 'havana/updates': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'icehouse': 'precise-updates/icehouse',
- 'icehouse/updates': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'juno': 'trusty-updates/juno',
- 'juno/updates': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'kilo': 'trusty-updates/kilo',
- 'kilo/updates': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'liberty': 'trusty-updates/liberty',
- 'liberty/updates': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'mitaka': 'trusty-updates/mitaka',
- 'mitaka/updates': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- 'newton': 'xenial-updates/newton',
- 'newton/updates': 'xenial-updates/newton',
- 'newton/proposed': 'xenial-proposed/newton',
- }
-
- try:
- pocket = pockets[ca_rel]
- except KeyError:
- e = 'Invalid Cloud Archive release specified: %s' % rel
- error_out(e)
-
- src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
- apt_install('ubuntu-cloud-keyring', fatal=True)
-
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
- f.write(src)
- else:
- error_out("Invalid openstack-release specified: %s" % rel)
-
-
-def config_value_changed(option):
- """
- Determine if config value changed since last call to this function.
- """
- hook_data = unitdata.HookData()
- with hook_data():
- db = unitdata.kv()
- current = config(option)
- saved = db.get(option)
- db.set(option, current)
- if saved is None:
- return False
- return current != saved
-
-
-def save_script_rc(script_path="scripts/scriptrc", **env_vars):
- """
- Write an rc file in the charm-delivered directory containing
- exported environment variables provided by env_vars. Any charm scripts run
- outside the juju hook environment can source this scriptrc to obtain
- updated config information necessary to perform health checks or
- service changes.
- """
- juju_rc_path = "%s/%s" % (charm_dir(), script_path)
- if not os.path.exists(os.path.dirname(juju_rc_path)):
- os.mkdir(os.path.dirname(juju_rc_path))
- with open(juju_rc_path, 'wb') as rc_script:
- rc_script.write(
- "#!/bin/bash\n")
- [rc_script.write('export %s=%s\n' % (u, p))
- for u, p in six.iteritems(env_vars) if u != "script_path"]
-
-
-def openstack_upgrade_available(package):
- """
- Determines if an OpenStack upgrade is available from installation
- source, based on version of installed package.
-
- :param package: str: Name of installed package.
-
- :returns: bool: : Returns True if configured installation source offers
- a newer version of package.
-
- """
-
- import apt_pkg as apt
- src = config('openstack-origin')
- cur_vers = get_os_version_package(package)
- if "swift" in package:
- codename = get_os_codename_install_source(src)
- avail_vers = get_os_version_codename_swift(codename)
- else:
- avail_vers = get_os_version_install_source(src)
- apt.init()
- if "swift" in package:
- major_cur_vers = cur_vers.split('.', 1)[0]
- major_avail_vers = avail_vers.split('.', 1)[0]
- major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
- return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
- return apt.version_compare(avail_vers, cur_vers) == 1
-
-
-def ensure_block_device(block_device):
- '''
- Confirm block_device, create as loopback if necessary.
-
- :param block_device: str: Full path of block device to ensure.
-
- :returns: str: Full path of ensured block device.
- '''
- _none = ['None', 'none', None]
- if (block_device in _none):
- error_out('prepare_storage(): Missing required input: block_device=%s.'
- % block_device)
-
- if block_device.startswith('/dev/'):
- bdev = block_device
- elif block_device.startswith('/'):
- _bd = block_device.split('|')
- if len(_bd) == 2:
- bdev, size = _bd
- else:
- bdev = block_device
- size = DEFAULT_LOOPBACK_SIZE
- bdev = ensure_loopback_device(bdev, size)
- else:
- bdev = '/dev/%s' % block_device
-
- if not is_block_device(bdev):
- error_out('Failed to locate valid block device at %s' % bdev)
-
- return bdev
-
-
-def clean_storage(block_device):
- '''
- Ensures a block device is clean. That is:
- - unmounted
- - any lvm volume groups are deactivated
- - any lvm physical device signatures removed
- - partition table wiped
-
- :param block_device: str: Full path to block device to clean.
- '''
- for mp, d in mounts():
- if d == block_device:
- juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
- (d, mp), level=INFO)
- umount(mp, persist=True)
-
- if is_lvm_physical_volume(block_device):
- deactivate_lvm_volume_group(block_device)
- remove_lvm_physical_volume(block_device)
- else:
- zap_disk(block_device)
-
-is_ip = ip.is_ip
-ns_query = ip.ns_query
-get_host_ip = ip.get_host_ip
-get_hostname = ip.get_hostname
-
-
-def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
- mm_map = {}
- if os.path.isfile(mm_file):
- with open(mm_file, 'r') as f:
- mm_map = json.load(f)
- return mm_map
-
-
-def sync_db_with_multi_ipv6_addresses(database, database_user,
- relation_prefix=None):
- hosts = get_ipv6_addr(dynamic_only=False)
-
- if config('vip'):
- vips = config('vip').split()
- for vip in vips:
- if vip and is_ipv6(vip):
- hosts.append(vip)
-
- kwargs = {'database': database,
- 'username': database_user,
- 'hostname': json.dumps(hosts)}
-
- if relation_prefix:
- for key in list(kwargs.keys()):
- kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
- del kwargs[key]
-
- for rid in relation_ids('shared-db'):
- relation_set(relation_id=rid, **kwargs)
-
-
-def os_requires_version(ostack_release, pkg):
- """
- Decorator for hook to specify minimum supported release
- """
- def wrap(f):
- @wraps(f)
- def wrapped_f(*args):
- if os_release(pkg) < ostack_release:
- raise Exception("This hook is not supported on releases"
- " before %s" % ostack_release)
- f(*args)
- return wrapped_f
- return wrap
-
-
-def git_install_requested():
- """
- Returns true if openstack-origin-git is specified.
- """
- return config('openstack-origin-git') is not None
-
-
-def git_os_codename_install_source(projects_yaml):
- """
- Returns OpenStack codename of release being installed from source.
- """
- if git_install_requested():
- projects = _git_yaml_load(projects_yaml)
-
- if projects in GIT_DEFAULT_BRANCHES.keys():
- if projects == 'master':
- return 'newton'
- return projects
-
- if 'release' in projects:
- if projects['release'] == 'master':
- return 'newton'
- return projects['release']
-
- return None
-
-
-def git_default_repos(projects_yaml):
- """
- Returns default repos if a default openstack-origin-git value is specified.
- """
- service = service_name()
- core_project = service
-
- for default, branch in GIT_DEFAULT_BRANCHES.iteritems():
- if projects_yaml == default:
-
- # add the requirements repo first
- repo = {
- 'name': 'requirements',
- 'repository': GIT_DEFAULT_REPOS['requirements'],
- 'branch': branch,
- }
- repos = [repo]
-
- # neutron-* and nova-* charms require some additional repos
- if service in ['neutron-api', 'neutron-gateway',
- 'neutron-openvswitch']:
- core_project = 'neutron'
- if service == 'neutron-api':
- repo = {
- 'name': 'networking-hyperv',
- 'repository': GIT_DEFAULT_REPOS['networking-hyperv'],
- 'branch': branch,
- }
- repos.append(repo)
- for project in ['neutron-fwaas', 'neutron-lbaas',
- 'neutron-vpnaas', 'nova']:
- repo = {
- 'name': project,
- 'repository': GIT_DEFAULT_REPOS[project],
- 'branch': branch,
- }
- repos.append(repo)
-
- elif service in ['nova-cloud-controller', 'nova-compute']:
- core_project = 'nova'
- repo = {
- 'name': 'neutron',
- 'repository': GIT_DEFAULT_REPOS['neutron'],
- 'branch': branch,
- }
- repos.append(repo)
- elif service == 'openstack-dashboard':
- core_project = 'horizon'
-
- # finally add the current service's core project repo
- repo = {
- 'name': core_project,
- 'repository': GIT_DEFAULT_REPOS[core_project],
- 'branch': branch,
- }
- repos.append(repo)
-
- return yaml.dump(dict(repositories=repos, release=default))
-
- return projects_yaml
-
-
-def _git_yaml_load(projects_yaml):
- """
- Load the specified yaml into a dictionary.
- """
- if not projects_yaml:
- return None
-
- return yaml.load(projects_yaml)
-
-
-requirements_dir = None
-
-
-def git_clone_and_install(projects_yaml, core_project):
- """
- Clone/install all specified OpenStack repositories.
-
- The expected format of projects_yaml is:
-
- repositories:
- - {name: keystone,
- repository: 'git://git.openstack.org/openstack/keystone.git',
- branch: 'stable/icehouse'}
- - {name: requirements,
- repository: 'git://git.openstack.org/openstack/requirements.git',
- branch: 'stable/icehouse'}
-
- directory: /mnt/openstack-git
- http_proxy: squid-proxy-url
- https_proxy: squid-proxy-url
-
- The directory, http_proxy, and https_proxy keys are optional.
-
- """
- global requirements_dir
- parent_dir = '/mnt/openstack-git'
- http_proxy = None
-
- projects = _git_yaml_load(projects_yaml)
- _git_validate_projects_yaml(projects, core_project)
-
- old_environ = dict(os.environ)
-
- if 'http_proxy' in projects.keys():
- http_proxy = projects['http_proxy']
- os.environ['http_proxy'] = projects['http_proxy']
- if 'https_proxy' in projects.keys():
- os.environ['https_proxy'] = projects['https_proxy']
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
-
- # Upgrade setuptools and pip from default virtualenv versions. The default
- # versions in trusty break master OpenStack branch deployments.
- for p in ['pip', 'setuptools']:
- pip_install(p, upgrade=True, proxy=http_proxy,
- venv=os.path.join(parent_dir, 'venv'))
-
- constraints = None
- for p in projects['repositories']:
- repo = p['repository']
- branch = p['branch']
- depth = '1'
- if 'depth' in p.keys():
- depth = p['depth']
- if p['name'] == 'requirements':
- repo_dir = _git_clone_and_install_single(repo, branch, depth,
- parent_dir, http_proxy,
- update_requirements=False)
- requirements_dir = repo_dir
- constraints = os.path.join(repo_dir, "upper-constraints.txt")
- # upper-constraints didn't exist until after icehouse
- if not os.path.isfile(constraints):
- constraints = None
- # use constraints unless project yaml sets use_constraints to false
- if 'use_constraints' in projects.keys():
- if not projects['use_constraints']:
- constraints = None
- else:
- repo_dir = _git_clone_and_install_single(repo, branch, depth,
- parent_dir, http_proxy,
- update_requirements=True,
- constraints=constraints)
-
- os.environ = old_environ
-
-
-def _git_validate_projects_yaml(projects, core_project):
- """
- Validate the projects yaml.
- """
- _git_ensure_key_exists('repositories', projects)
-
- for project in projects['repositories']:
- _git_ensure_key_exists('name', project.keys())
- _git_ensure_key_exists('repository', project.keys())
- _git_ensure_key_exists('branch', project.keys())
-
- if projects['repositories'][0]['name'] != 'requirements':
- error_out('{} git repo must be specified first'.format('requirements'))
-
- if projects['repositories'][-1]['name'] != core_project:
- error_out('{} git repo must be specified last'.format(core_project))
-
- _git_ensure_key_exists('release', projects)
-
-
-def _git_ensure_key_exists(key, keys):
- """
- Ensure that key exists in keys.
- """
- if key not in keys:
- error_out('openstack-origin-git key \'{}\' is missing'.format(key))
-
-
-def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
- update_requirements, constraints=None):
- """
- Clone and install a single git repository.
- """
- if not os.path.exists(parent_dir):
- juju_log('Directory already exists at {}. '
- 'No need to create directory.'.format(parent_dir))
- os.mkdir(parent_dir)
-
- juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
- repo_dir = install_remote(
- repo, dest=parent_dir, branch=branch, depth=depth)
-
- venv = os.path.join(parent_dir, 'venv')
-
- if update_requirements:
- if not requirements_dir:
- error_out('requirements repo must be cloned before '
- 'updating from global requirements.')
- _git_update_requirements(venv, repo_dir, requirements_dir)
-
- juju_log('Installing git repo from dir: {}'.format(repo_dir))
- if http_proxy:
- pip_install(repo_dir, proxy=http_proxy, venv=venv,
- constraints=constraints)
- else:
- pip_install(repo_dir, venv=venv, constraints=constraints)
-
- return repo_dir
-
-
-def _git_update_requirements(venv, package_dir, reqs_dir):
- """
- Update from global requirements.
-
- Update an OpenStack git directory's requirements.txt and
- test-requirements.txt from global-requirements.txt.
- """
- orig_dir = os.getcwd()
- os.chdir(reqs_dir)
- python = os.path.join(venv, 'bin/python')
- cmd = [python, 'update.py', package_dir]
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- package = os.path.basename(package_dir)
- error_out("Error updating {} from "
- "global-requirements.txt".format(package))
- os.chdir(orig_dir)
-
-
-def git_pip_venv_dir(projects_yaml):
- """
- Return the pip virtualenv path.
- """
- parent_dir = '/mnt/openstack-git'
-
- projects = _git_yaml_load(projects_yaml)
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- return os.path.join(parent_dir, 'venv')
-
-
-def git_src_dir(projects_yaml, project):
- """
- Return the directory where the specified project's source is located.
- """
- parent_dir = '/mnt/openstack-git'
-
- projects = _git_yaml_load(projects_yaml)
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- for p in projects['repositories']:
- if p['name'] == project:
- return os.path.join(parent_dir, os.path.basename(p['repository']))
-
- return None
-
-
-def git_yaml_value(projects_yaml, key):
- """
- Return the value in projects_yaml for the specified key.
- """
- projects = _git_yaml_load(projects_yaml)
-
- if key in projects.keys():
- return projects[key]
-
- return None
-
-
-def git_generate_systemd_init_files(templates_dir):
- """
- Generate systemd init files.
-
- Generates and installs systemd init units and script files based on the
- *.init.in files contained in the templates_dir directory.
-
- This code is based on the openstack-pkg-tools package and its init
- script generation, which is used by the OpenStack packages.
- """
- for f in os.listdir(templates_dir):
- # Create the init script and systemd unit file from the template
- if f.endswith(".init.in"):
- init_in_file = f
- init_file = f[:-8]
- service_file = "{}.service".format(init_file)
-
- init_in_source = os.path.join(templates_dir, init_in_file)
- init_source = os.path.join(templates_dir, init_file)
- service_source = os.path.join(templates_dir, service_file)
-
- init_dest = os.path.join('/etc/init.d', init_file)
- service_dest = os.path.join('/lib/systemd/system', service_file)
-
- shutil.copyfile(init_in_source, init_source)
- with open(init_source, 'a') as outfile:
- template = '/usr/share/openstack-pkg-tools/init-script-template'
- with open(template) as infile:
- outfile.write('\n\n{}'.format(infile.read()))
-
- cmd = ['pkgos-gen-systemd-unit', init_in_source]
- subprocess.check_call(cmd)
-
- if os.path.exists(init_dest):
- os.remove(init_dest)
- if os.path.exists(service_dest):
- os.remove(service_dest)
- shutil.copyfile(init_source, init_dest)
- shutil.copyfile(service_source, service_dest)
- os.chmod(init_dest, 0o755)
-
- for f in os.listdir(templates_dir):
- # If there's a service.in file, use it instead of the generated one
- if f.endswith(".service.in"):
- service_in_file = f
- service_file = f[:-3]
-
- service_in_source = os.path.join(templates_dir, service_in_file)
- service_source = os.path.join(templates_dir, service_file)
- service_dest = os.path.join('/lib/systemd/system', service_file)
-
- shutil.copyfile(service_in_source, service_source)
-
- if os.path.exists(service_dest):
- os.remove(service_dest)
- shutil.copyfile(service_source, service_dest)
-
- for f in os.listdir(templates_dir):
- # Generate the systemd unit if there's no existing .service.in
- if f.endswith(".init.in"):
- init_in_file = f
- init_file = f[:-8]
- service_in_file = "{}.service.in".format(init_file)
- service_file = "{}.service".format(init_file)
-
- init_in_source = os.path.join(templates_dir, init_in_file)
- service_in_source = os.path.join(templates_dir, service_in_file)
- service_source = os.path.join(templates_dir, service_file)
- service_dest = os.path.join('/lib/systemd/system', service_file)
-
- if not os.path.exists(service_in_source):
- cmd = ['pkgos-gen-systemd-unit', init_in_source]
- subprocess.check_call(cmd)
-
- if os.path.exists(service_dest):
- os.remove(service_dest)
- shutil.copyfile(service_source, service_dest)
-
-
-def os_workload_status(configs, required_interfaces, charm_func=None):
- """
- Decorator to set workload status based on complete contexts
- """
- def wrap(f):
- @wraps(f)
- def wrapped_f(*args, **kwargs):
- # Run the original function first
- f(*args, **kwargs)
- # Set workload status now that contexts have been
- # acted on
- set_os_workload_status(configs, required_interfaces, charm_func)
- return wrapped_f
- return wrap
-
-
-def set_os_workload_status(configs, required_interfaces, charm_func=None,
- services=None, ports=None):
- """Set the state of the workload status for the charm.
-
- This calls _determine_os_workload_status() to get the new state, message
- and sets the status using status_set()
-
- @param configs: a templating.OSConfigRenderer() object
- @param required_interfaces: {generic: [specific, specific2, ...]}
- @param charm_func: a callable function that returns state, message. The
- signature is charm_func(configs) -> (state, message)
- @param services: list of strings OR dictionary specifying services/ports
- @param ports: OPTIONAL list of port numbers.
- @returns state, message: the new workload status, user message
- """
- state, message = _determine_os_workload_status(
- configs, required_interfaces, charm_func, services, ports)
- status_set(state, message)
-
-
-def _determine_os_workload_status(
- configs, required_interfaces, charm_func=None,
- services=None, ports=None):
- """Determine the state of the workload status for the charm.
-
- This function returns the new workload status for the charm based
- on the state of the interfaces, the paused state and whether the
- services are actually running and any specified ports are open.
-
- This checks:
-
- 1. if the unit should be paused, that it is actually paused. If so the
- state is 'maintenance' + message, else 'broken'.
- 2. that the interfaces/relations are complete. If they are not then
- it sets the state to either 'broken' or 'waiting' and an appropriate
- message.
- 3. If all the relation data is set, then it checks that the actual
- services really are running. If not it sets the state to 'broken'.
-
- If everything is okay then the state returns 'active'.
-
- @param configs: a templating.OSConfigRenderer() object
- @param required_interfaces: {generic: [specific, specific2, ...]}
- @param charm_func: a callable function that returns state, message. The
- signature is charm_func(configs) -> (state, message)
- @param services: list of strings OR dictionary specifying services/ports
- @param ports: OPTIONAL list of port numbers.
- @returns state, message: the new workload status, user message
- """
- state, message = _ows_check_if_paused(services, ports)
-
- if state is None:
- state, message = _ows_check_generic_interfaces(
- configs, required_interfaces)
-
- if state != 'maintenance' and charm_func:
- # _ows_check_charm_func() may modify the state, message
- state, message = _ows_check_charm_func(
- state, message, lambda: charm_func(configs))
-
- if state is None:
- state, message = _ows_check_services_running(services, ports)
-
- if state is None:
- state = 'active'
- message = "Unit is ready"
- juju_log(message, 'INFO')
-
- return state, message
-
-
-def _ows_check_if_paused(services=None, ports=None):
- """Check if the unit is supposed to be paused, and if so check that the
- services/ports (if passed) are actually stopped/not being listened to.
-
- if the unit isn't supposed to be paused, just return None, None
-
- @param services: OPTIONAL services spec or list of service names.
- @param ports: OPTIONAL list of port numbers.
- @returns state, message or None, None
- """
- if is_unit_paused_set():
- state, message = check_actually_paused(services=services,
- ports=ports)
- if state is None:
- # we're paused okay, so set maintenance and return
- state = "maintenance"
- message = "Paused. Use 'resume' action to resume normal service."
- return state, message
- return None, None
-
-
-def _ows_check_generic_interfaces(configs, required_interfaces):
- """Check the complete contexts to determine the workload status.
-
- - Checks for missing or incomplete contexts
- - juju log details of missing required data.
- - determines the correct workload status
- - creates an appropriate message for status_set(...)
-
- if there are no problems then the function returns None, None
-
- @param configs: a templating.OSConfigRenderer() object
- @params required_interfaces: {generic_interface: [specific_interface], }
- @returns state, message or None, None
- """
- incomplete_rel_data = incomplete_relation_data(configs,
- required_interfaces)
- state = None
- message = None
- missing_relations = set()
- incomplete_relations = set()
-
- for generic_interface, relations_states in incomplete_rel_data.items():
- related_interface = None
- missing_data = {}
- # Related or not?
- for interface, relation_state in relations_states.items():
- if relation_state.get('related'):
- related_interface = interface
- missing_data = relation_state.get('missing_data')
- break
- # No relation ID for the generic_interface?
- if not related_interface:
- juju_log("{} relation is missing and must be related for "
- "functionality. ".format(generic_interface), 'WARN')
- state = 'blocked'
- missing_relations.add(generic_interface)
- else:
- # Relation ID eists but no related unit
- if not missing_data:
- # Edge case - relation ID exists but departings
- _hook_name = hook_name()
- if (('departed' in _hook_name or 'broken' in _hook_name) and
- related_interface in _hook_name):
- state = 'blocked'
- missing_relations.add(generic_interface)
- juju_log("{} relation's interface, {}, "
- "relationship is departed or broken "
- "and is required for functionality."
- "".format(generic_interface, related_interface),
- "WARN")
- # Normal case relation ID exists but no related unit
- # (joining)
- else:
- juju_log("{} relations's interface, {}, is related but has"
- " no units in the relation."
- "".format(generic_interface, related_interface),
- "INFO")
- # Related unit exists and data missing on the relation
- else:
- juju_log("{} relation's interface, {}, is related awaiting "
- "the following data from the relationship: {}. "
- "".format(generic_interface, related_interface,
- ", ".join(missing_data)), "INFO")
- if state != 'blocked':
- state = 'waiting'
- if generic_interface not in missing_relations:
- incomplete_relations.add(generic_interface)
-
- if missing_relations:
- message = "Missing relations: {}".format(", ".join(missing_relations))
- if incomplete_relations:
- message += "; incomplete relations: {}" \
- "".format(", ".join(incomplete_relations))
- state = 'blocked'
- elif incomplete_relations:
- message = "Incomplete relations: {}" \
- "".format(", ".join(incomplete_relations))
- state = 'waiting'
-
- return state, message
-
-
-def _ows_check_charm_func(state, message, charm_func_with_configs):
- """Run a custom check function for the charm to see if it wants to
- change the state. This is only run if not in 'maintenance' and
- tests to see if the new state is more important that the previous
- one determined by the interfaces/relations check.
-
- @param state: the previously determined state so far.
- @param message: the user orientated message so far.
- @param charm_func: a callable function that returns state, message
- @returns state, message strings.
- """
- if charm_func_with_configs:
- charm_state, charm_message = charm_func_with_configs()
- if charm_state != 'active' and charm_state != 'unknown':
- state = workload_state_compare(state, charm_state)
- if message:
- charm_message = charm_message.replace("Incomplete relations: ",
- "")
- message = "{}, {}".format(message, charm_message)
- else:
- message = charm_message
- return state, message
-
-
-def _ows_check_services_running(services, ports):
- """Check that the services that should be running are actually running
- and that any ports specified are being listened to.
-
- @param services: list of strings OR dictionary specifying services/ports
- @param ports: list of ports
- @returns state, message: strings or None, None
- """
- messages = []
- state = None
- if services is not None:
- services = _extract_services_list_helper(services)
- services_running, running = _check_running_services(services)
- if not all(running):
- messages.append(
- "Services not running that should be: {}"
- .format(", ".join(_filter_tuples(services_running, False))))
- state = 'blocked'
- # also verify that the ports that should be open are open
- # NB, that ServiceManager objects only OPTIONALLY have ports
- map_not_open, ports_open = (
- _check_listening_on_services_ports(services))
- if not all(ports_open):
- # find which service has missing ports. They are in service
- # order which makes it a bit easier.
- message_parts = {service: ", ".join([str(v) for v in open_ports])
- for service, open_ports in map_not_open.items()}
- message = ", ".join(
- ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
- messages.append(
- "Services with ports not open that should be: {}"
- .format(message))
- state = 'blocked'
-
- if ports is not None:
- # and we can also check ports which we don't know the service for
- ports_open, ports_open_bools = _check_listening_on_ports_list(ports)
- if not all(ports_open_bools):
- messages.append(
- "Ports which should be open, but are not: {}"
- .format(", ".join([str(p) for p, v in ports_open
- if not v])))
- state = 'blocked'
-
- if state is not None:
- message = "; ".join(messages)
- return state, message
-
- return None, None
-
-
-def _extract_services_list_helper(services):
- """Extract a OrderedDict of {service: [ports]} of the supplied services
- for use by the other functions.
-
- The services object can either be:
- - None : no services were passed (an empty dict is returned)
- - a list of strings
- - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- - An array of [{'service': service_name, ...}, ...]
-
- @param services: see above
- @returns OrderedDict(service: [ports], ...)
- """
- if services is None:
- return {}
- if isinstance(services, dict):
- services = services.values()
- # either extract the list of services from the dictionary, or if
- # it is a simple string, use that. i.e. works with mixed lists.
- _s = OrderedDict()
- for s in services:
- if isinstance(s, dict) and 'service' in s:
- _s[s['service']] = s.get('ports', [])
- if isinstance(s, str):
- _s[s] = []
- return _s
-
-
-def _check_running_services(services):
- """Check that the services dict provided is actually running and provide
- a list of (service, boolean) tuples for each service.
-
- Returns both a zipped list of (service, boolean) and a list of booleans
- in the same order as the services.
-
- @param services: OrderedDict of strings: [ports], one for each service to
- check.
- @returns [(service, boolean), ...], : results for checks
- [boolean] : just the result of the service checks
- """
- services_running = [service_running(s) for s in services]
- return list(zip(services, services_running)), services_running
-
-
-def _check_listening_on_services_ports(services, test=False):
- """Check that the unit is actually listening (has the port open) on the
- ports that the service specifies are open. If test is True then the
- function returns the services with ports that are open rather than
- closed.
-
- Returns an OrderedDict of service: ports and a list of booleans
-
- @param services: OrderedDict(service: [port, ...], ...)
- @param test: default=False, if False, test for closed, otherwise open.
- @returns OrderedDict(service: [port-not-open, ...]...), [boolean]
- """
- test = not(not(test)) # ensure test is True or False
- all_ports = list(itertools.chain(*services.values()))
- ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
- map_ports = OrderedDict()
- matched_ports = [p for p, opened in zip(all_ports, ports_states)
- if opened == test] # essentially opened xor test
- for service, ports in services.items():
- set_ports = set(ports).intersection(matched_ports)
- if set_ports:
- map_ports[service] = set_ports
- return map_ports, ports_states
-
-
-def _check_listening_on_ports_list(ports):
- """Check that the ports list given are being listened to
-
- Returns a list of ports being listened to and a list of the
- booleans.
-
- @param ports: LIST or port numbers.
- @returns [(port_num, boolean), ...], [boolean]
- """
- ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
- return zip(ports, ports_open), ports_open
-
-
-def _filter_tuples(services_states, state):
- """Return a simple list from a list of tuples according to the condition
-
- @param services_states: LIST of (string, boolean): service and running
- state.
- @param state: Boolean to match the tuple against.
- @returns [LIST of strings] that matched the tuple RHS.
- """
- return [s for s, b in services_states if b == state]
-
-
-def workload_state_compare(current_workload_state, workload_state):
- """ Return highest priority of two states"""
- hierarchy = {'unknown': -1,
- 'active': 0,
- 'maintenance': 1,
- 'waiting': 2,
- 'blocked': 3,
- }
-
- if hierarchy.get(workload_state) is None:
- workload_state = 'unknown'
- if hierarchy.get(current_workload_state) is None:
- current_workload_state = 'unknown'
-
- # Set workload_state based on hierarchy of statuses
- if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
- return current_workload_state
- else:
- return workload_state
-
-
-def incomplete_relation_data(configs, required_interfaces):
- """Check complete contexts against required_interfaces
- Return dictionary of incomplete relation data.
-
- configs is an OSConfigRenderer object with configs registered
-
- required_interfaces is a dictionary of required general interfaces
- with dictionary values of possible specific interfaces.
- Example:
- required_interfaces = {'database': ['shared-db', 'pgsql-db']}
-
- The interface is said to be satisfied if anyone of the interfaces in the
- list has a complete context.
-
- Return dictionary of incomplete or missing required contexts with relation
- status of interfaces and any missing data points. Example:
- {'message':
- {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
- 'zeromq-configuration': {'related': False}},
- 'identity':
- {'identity-service': {'related': False}},
- 'database':
- {'pgsql-db': {'related': False},
- 'shared-db': {'related': True}}}
- """
- complete_ctxts = configs.complete_contexts()
- incomplete_relations = [
- svc_type
- for svc_type, interfaces in required_interfaces.items()
- if not set(interfaces).intersection(complete_ctxts)]
- return {
- i: configs.get_incomplete_context_data(required_interfaces[i])
- for i in incomplete_relations}
-
-
-def do_action_openstack_upgrade(package, upgrade_callback, configs):
- """Perform action-managed OpenStack upgrade.
-
- Upgrades packages to the configured openstack-origin version and sets
- the corresponding action status as a result.
-
- If the charm was installed from source we cannot upgrade it.
- For backwards compatibility a config flag (action-managed-upgrade) must
- be set for this code to run, otherwise a full service level upgrade will
- fire on config-changed.
-
- @param package: package name for determining if upgrade available
- @param upgrade_callback: function callback to charm's upgrade function
- @param configs: templating object derived from OSConfigRenderer class
-
- @return: True if upgrade successful; False if upgrade failed or skipped
- """
- ret = False
-
- if git_install_requested():
- action_set({'outcome': 'installed from source, skipped upgrade.'})
- else:
- if openstack_upgrade_available(package):
- if config('action-managed-upgrade'):
- juju_log('Upgrading OpenStack release')
-
- try:
- upgrade_callback(configs=configs)
- action_set({'outcome': 'success, upgrade completed.'})
- ret = True
- except:
- action_set({'outcome': 'upgrade failed, see traceback.'})
- action_set({'traceback': traceback.format_exc()})
- action_fail('do_openstack_upgrade resulted in an '
- 'unexpected error')
- else:
- action_set({'outcome': 'action-managed-upgrade config is '
- 'False, skipped upgrade.'})
- else:
- action_set({'outcome': 'no upgrade available.'})
-
- return ret
-
-
-def remote_restart(rel_name, remote_service=None):
- trigger = {
- 'restart-trigger': str(uuid.uuid4()),
- }
- if remote_service:
- trigger['remote-service'] = remote_service
- for rid in relation_ids(rel_name):
- # This subordinate can be related to two seperate services using
- # different subordinate relations so only issue the restart if
- # the principle is conencted down the relation we think it is
- if related_units(relid=rid):
- relation_set(relation_id=rid,
- relation_settings=trigger,
- )
-
-
-def check_actually_paused(services=None, ports=None):
- """Check that services listed in the services object and and ports
- are actually closed (not listened to), to verify that the unit is
- properly paused.
-
- @param services: See _extract_services_list_helper
- @returns status, : string for status (None if okay)
- message : string for problem for status_set
- """
- state = None
- message = None
- messages = []
- if services is not None:
- services = _extract_services_list_helper(services)
- services_running, services_states = _check_running_services(services)
- if any(services_states):
- # there shouldn't be any running so this is a problem
- messages.append("these services running: {}"
- .format(", ".join(
- _filter_tuples(services_running, True))))
- state = "blocked"
- ports_open, ports_open_bools = (
- _check_listening_on_services_ports(services, True))
- if any(ports_open_bools):
- message_parts = {service: ", ".join([str(v) for v in open_ports])
- for service, open_ports in ports_open.items()}
- message = ", ".join(
- ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
- messages.append(
- "these service:ports are open: {}".format(message))
- state = 'blocked'
- if ports is not None:
- ports_open, bools = _check_listening_on_ports_list(ports)
- if any(bools):
- messages.append(
- "these ports which should be closed, but are open: {}"
- .format(", ".join([str(p) for p, v in ports_open if v])))
- state = 'blocked'
- if messages:
- message = ("Services should be paused but {}"
- .format(", ".join(messages)))
- return state, message
-
-
-def set_unit_paused():
- """Set the unit to a paused state in the local kv() store.
- This does NOT actually pause the unit
- """
- with unitdata.HookData()() as t:
- kv = t[0]
- kv.set('unit-paused', True)
-
-
-def clear_unit_paused():
- """Clear the unit from a paused state in the local kv() store
- This does NOT actually restart any services - it only clears the
- local state.
- """
- with unitdata.HookData()() as t:
- kv = t[0]
- kv.set('unit-paused', False)
-
-
-def is_unit_paused_set():
- """Return the state of the kv().get('unit-paused').
- This does NOT verify that the unit really is paused.
-
- To help with units that don't have HookData() (testing)
- if it excepts, return False
- """
- try:
- with unitdata.HookData()() as t:
- kv = t[0]
- # transform something truth-y into a Boolean.
- return not(not(kv.get('unit-paused')))
- except:
- return False
-
-
-def pause_unit(assess_status_func, services=None, ports=None,
- charm_func=None):
- """Pause a unit by stopping the services and setting 'unit-paused'
- in the local kv() store.
-
- Also checks that the services have stopped and ports are no longer
- being listened to.
-
- An optional charm_func() can be called that can either raise an
- Exception or return non None, None to indicate that the unit
- didn't pause cleanly.
-
- The signature for charm_func is:
- charm_func() -> message: string
-
- charm_func() is executed after any services are stopped, if supplied.
-
- The services object can either be:
- - None : no services were passed (an empty dict is returned)
- - a list of strings
- - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- - An array of [{'service': service_name, ...}, ...]
-
- @param assess_status_func: (f() -> message: string | None) or None
- @param services: OPTIONAL see above
- @param ports: OPTIONAL list of port
- @param charm_func: function to run for custom charm pausing.
- @returns None
- @raises Exception(message) on an error for action_fail().
- """
- services = _extract_services_list_helper(services)
- messages = []
- if services:
- for service in services.keys():
- stopped = service_pause(service)
- if not stopped:
- messages.append("{} didn't stop cleanly.".format(service))
- if charm_func:
- try:
- message = charm_func()
- if message:
- messages.append(message)
- except Exception as e:
- message.append(str(e))
- set_unit_paused()
- if assess_status_func:
- message = assess_status_func()
- if message:
- messages.append(message)
- if messages:
- raise Exception("Couldn't pause: {}".format("; ".join(messages)))
-
-
-def resume_unit(assess_status_func, services=None, ports=None,
- charm_func=None):
- """Resume a unit by starting the services and clearning 'unit-paused'
- in the local kv() store.
-
- Also checks that the services have started and ports are being listened to.
-
- An optional charm_func() can be called that can either raise an
- Exception or return non None to indicate that the unit
- didn't resume cleanly.
-
- The signature for charm_func is:
- charm_func() -> message: string
-
- charm_func() is executed after any services are started, if supplied.
-
- The services object can either be:
- - None : no services were passed (an empty dict is returned)
- - a list of strings
- - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- - An array of [{'service': service_name, ...}, ...]
-
- @param assess_status_func: (f() -> message: string | None) or None
- @param services: OPTIONAL see above
- @param ports: OPTIONAL list of port
- @param charm_func: function to run for custom charm resuming.
- @returns None
- @raises Exception(message) on an error for action_fail().
- """
- services = _extract_services_list_helper(services)
- messages = []
- if services:
- for service in services.keys():
- started = service_resume(service)
- if not started:
- messages.append("{} didn't start cleanly.".format(service))
- if charm_func:
- try:
- message = charm_func()
- if message:
- messages.append(message)
- except Exception as e:
- message.append(str(e))
- clear_unit_paused()
- if assess_status_func:
- message = assess_status_func()
- if message:
- messages.append(message)
- if messages:
- raise Exception("Couldn't resume: {}".format("; ".join(messages)))
-
-
-def make_assess_status_func(*args, **kwargs):
- """Creates an assess_status_func() suitable for handing to pause_unit()
- and resume_unit().
-
- This uses the _determine_os_workload_status(...) function to determine
- what the workload_status should be for the unit. If the unit is
- not in maintenance or active states, then the message is returned to
- the caller. This is so an action that doesn't result in either a
- complete pause or complete resume can signal failure with an action_fail()
- """
- def _assess_status_func():
- state, message = _determine_os_workload_status(*args, **kwargs)
- status_set(state, message)
- if state not in ['maintenance', 'active']:
- return message
- return None
-
- return _assess_status_func
-
-
-def pausable_restart_on_change(restart_map, stopstart=False,
- restart_functions=None):
- """A restart_on_change decorator that checks to see if the unit is
- paused. If it is paused then the decorated function doesn't fire.
-
- This is provided as a helper, as the @restart_on_change(...) decorator
- is in core.host, yet the openstack specific helpers are in this file
- (contrib.openstack.utils). Thus, this needs to be an optional feature
- for openstack charms (or charms that wish to use the openstack
- pause/resume type features).
-
- It is used as follows:
-
- from contrib.openstack.utils import (
- pausable_restart_on_change as restart_on_change)
-
- @restart_on_change(restart_map, stopstart=<boolean>)
- def some_hook(...):
- pass
-
- see core.utils.restart_on_change() for more details.
-
- @param f: the function to decorate
- @param restart_map: the restart map {conf_file: [services]}
- @param stopstart: DEFAULT false; whether to stop, start or just restart
- @returns decorator to use a restart_on_change with pausability
- """
- def wrap(f):
- @functools.wraps(f)
- def wrapped_f(*args, **kwargs):
- if is_unit_paused_set():
- return f(*args, **kwargs)
- # otherwise, normal restart_on_change functionality
- return restart_on_change_helper(
- (lambda: f(*args, **kwargs)), restart_map, stopstart,
- restart_functions)
- return wrapped_f
- return wrap
-
-
-def config_flags_parser(config_flags):
- """Parses config flags string into dict.
-
- This parsing method supports a few different formats for the config
- flag values to be parsed:
-
- 1. A string in the simple format of key=value pairs, with the possibility
- of specifying multiple key value pairs within the same string. For
- example, a string in the format of 'key1=value1, key2=value2' will
- return a dict of:
-
- {'key1': 'value1',
- 'key2': 'value2'}.
-
- 2. A string in the above format, but supporting a comma-delimited list
- of values for the same key. For example, a string in the format of
- 'key1=value1, key2=value3,value4,value5' will return a dict of:
-
- {'key1', 'value1',
- 'key2', 'value2,value3,value4'}
-
- 3. A string containing a colon character (:) prior to an equal
- character (=) will be treated as yaml and parsed as such. This can be
- used to specify more complex key value pairs. For example,
- a string in the format of 'key1: subkey1=value1, subkey2=value2' will
- return a dict of:
-
- {'key1', 'subkey1=value1, subkey2=value2'}
-
- The provided config_flags string may be a list of comma-separated values
- which themselves may be comma-separated list of values.
- """
- # If we find a colon before an equals sign then treat it as yaml.
- # Note: limit it to finding the colon first since this indicates assignment
- # for inline yaml.
- colon = config_flags.find(':')
- equals = config_flags.find('=')
- if colon > 0:
- if colon < equals or equals < 0:
- return yaml.safe_load(config_flags)
-
- if config_flags.find('==') >= 0:
- juju_log("config_flags is not in expected format (key=value)",
- level=ERROR)
- raise OSContextError
-
- # strip the following from each value.
- post_strippers = ' ,'
- # we strip any leading/trailing '=' or ' ' from the string then
- # split on '='.
- split = config_flags.strip(' =').split('=')
- limit = len(split)
- flags = {}
- for i in range(0, limit - 1):
- current = split[i]
- next = split[i + 1]
- vindex = next.rfind(',')
- if (i == limit - 2) or (vindex < 0):
- value = next
- else:
- value = next[:vindex]
-
- if i == 0:
- key = current
- else:
- # if this not the first entry, expect an embedded key.
- index = current.rfind(',')
- if index < 0:
- juju_log("Invalid config value(s) at index %s" % (i),
- level=ERROR)
- raise OSContextError
- key = current[index + 1:]
-
- # Add to collection.
- flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
-
- return flags