aboutsummaryrefslogtreecommitdiffstats
path: root/charms/trusty/ceilometer/tests
diff options
context:
space:
mode:
Diffstat (limited to 'charms/trusty/ceilometer/tests')
-rwxr-xr-xcharms/trusty/ceilometer/tests/014-basic-precise-icehouse11
-rwxr-xr-xcharms/trusty/ceilometer/tests/015-basic-trusty-icehouse9
-rwxr-xr-xcharms/trusty/ceilometer/tests/016-basic-trusty-juno11
-rwxr-xr-xcharms/trusty/ceilometer/tests/017-basic-trusty-kilo11
-rwxr-xr-xcharms/trusty/ceilometer/tests/018-basic-trusty-liberty11
-rwxr-xr-xcharms/trusty/ceilometer/tests/019-basic-trusty-mitaka11
-rwxr-xr-xcharms/trusty/ceilometer/tests/020-basic-wily-liberty9
-rwxr-xr-xcharms/trusty/ceilometer/tests/021-basic-xenial-mitaka9
-rw-r--r--charms/trusty/ceilometer/tests/README113
-rw-r--r--charms/trusty/ceilometer/tests/basic_deployment.py664
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/__init__.py38
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/__init__.py15
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/__init__.py15
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/deployment.py95
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/utils.py829
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/__init__.py15
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/__init__.py15
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/deployment.py304
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/utils.py1012
-rwxr-xr-xcharms/trusty/ceilometer/tests/setup/00-setup18
-rw-r--r--charms/trusty/ceilometer/tests/tests.yaml21
21 files changed, 0 insertions, 3236 deletions
diff --git a/charms/trusty/ceilometer/tests/014-basic-precise-icehouse b/charms/trusty/ceilometer/tests/014-basic-precise-icehouse
deleted file mode 100755
index 0f5c590..0000000
--- a/charms/trusty/ceilometer/tests/014-basic-precise-icehouse
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on precise-icehouse."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='precise',
- openstack='cloud:precise-icehouse',
- source='cloud:precise-updates/icehouse')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/015-basic-trusty-icehouse b/charms/trusty/ceilometer/tests/015-basic-trusty-icehouse
deleted file mode 100755
index 8530390..0000000
--- a/charms/trusty/ceilometer/tests/015-basic-trusty-icehouse
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on trusty-icehouse."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='trusty')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/016-basic-trusty-juno b/charms/trusty/ceilometer/tests/016-basic-trusty-juno
deleted file mode 100755
index f1ca57d..0000000
--- a/charms/trusty/ceilometer/tests/016-basic-trusty-juno
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on trusty-juno."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='trusty',
- openstack='cloud:trusty-juno',
- source='cloud:trusty-updates/juno')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/017-basic-trusty-kilo b/charms/trusty/ceilometer/tests/017-basic-trusty-kilo
deleted file mode 100755
index cc89564..0000000
--- a/charms/trusty/ceilometer/tests/017-basic-trusty-kilo
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on trusty-kilo."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='trusty',
- openstack='cloud:trusty-kilo',
- source='cloud:trusty-updates/kilo')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/018-basic-trusty-liberty b/charms/trusty/ceilometer/tests/018-basic-trusty-liberty
deleted file mode 100755
index 18b9500..0000000
--- a/charms/trusty/ceilometer/tests/018-basic-trusty-liberty
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on trusty-liberty."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='trusty',
- openstack='cloud:trusty-liberty',
- source='cloud:trusty-updates/liberty')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/019-basic-trusty-mitaka b/charms/trusty/ceilometer/tests/019-basic-trusty-mitaka
deleted file mode 100755
index 06c0849..0000000
--- a/charms/trusty/ceilometer/tests/019-basic-trusty-mitaka
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on trusty-mitaka."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='trusty',
- openstack='cloud:trusty-mitaka',
- source='cloud:trusty-updates/mitaka')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/020-basic-wily-liberty b/charms/trusty/ceilometer/tests/020-basic-wily-liberty
deleted file mode 100755
index 7b5908f..0000000
--- a/charms/trusty/ceilometer/tests/020-basic-wily-liberty
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on wily-liberty."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='wily')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/021-basic-xenial-mitaka b/charms/trusty/ceilometer/tests/021-basic-xenial-mitaka
deleted file mode 100755
index 1706607..0000000
--- a/charms/trusty/ceilometer/tests/021-basic-xenial-mitaka
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on xenial-mitaka."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='xenial')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/README b/charms/trusty/ceilometer/tests/README
deleted file mode 100644
index 79c5b06..0000000
--- a/charms/trusty/ceilometer/tests/README
+++ /dev/null
@@ -1,113 +0,0 @@
-This directory provides Amulet tests to verify basic deployment functionality
-from the perspective of this charm, its requirements and its features, as
-exercised in a subset of the full OpenStack deployment test bundle topology.
-
-Reference: lp:openstack-charm-testing for full test bundles.
-
-A single topology and configuration is defined and deployed, once for each of
-the defined Ubuntu:OpenStack release combos. The ongoing goal is for this
-charm to always possess tests and combo definitions for all currently-supported
-release combinations of U:OS.
-
-test_* methods are called in lexical sort order, as with most runners. However,
-each individual test method should be idempotent and expected to pass regardless
-of run order or Ubuntu:OpenStack combo. When writing or modifying tests,
-ensure that every individual test is not dependent on another test_ method.
-
-Test naming convention, purely for code organization purposes:
- 1xx service and endpoint checks
- 2xx relation checks
- 3xx config checks
- 4xx functional checks
- 9xx restarts, config changes, actions and other final checks
-
-In order to run tests, charm-tools and juju must be installed:
- sudo add-apt-repository ppa:juju/stable
- sudo apt-get update
- sudo apt-get install charm-tools juju juju-deployer amulet
-
-Alternatively, tests may be exercised with proposed or development versions
-of juju and related tools:
-
- # juju proposed version
- sudo add-apt-repository ppa:juju/proposed
- sudo apt-get update
- sudo apt-get install charm-tools juju juju-deployer
-
- # juju development version
- sudo add-apt-repository ppa:juju/devel
- sudo apt-get update
- sudo apt-get install charm-tools juju juju-deployer
-
-Some tests may need to download files. If a web proxy server is required in
-the environment, the AMULET_HTTP_PROXY environment variable must be set and
-passed into the juju test command. This is unrelated to juju's http proxy
-settings or behavior.
-
-The following examples demonstrate different ways that tests can be executed.
-All examples are run from the charm's root directory.
-
- * To run all +x tests in the tests directory:
-
- bzr branch lp:charms/trusty/foo
- cd foo
- make functional_test
-
- * To run the tests against a specific release combo as defined in tests/:
-
- bzr branch lp:charms/trusty/foo
- cd foo
- juju test -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse
-
- * To run tests and keep the juju environment deployed after a failure:
-
- bzr branch lp:charms/trusty/foo
- cd foo
- juju test --set-e -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse
-
- * To re-run a test module against an already deployed environment (one
- that was deployed by a previous call to 'juju test --set-e'):
-
- ./tests/015-basic-trusty-icehouse
-
- * Even with --set-e, `juju test` will tear down the deployment when all
- tests pass. The following work flow may be more effective when
- iterating on test writing.
-
- bzr branch lp:charms/trusty/foo
- cd foo
- ./tests/setup/00-setup
- juju bootstrap
- ./tests/015-basic-trusty-icehouse
- # make some changes, run tests again
- ./tests/015-basic-trusty-icehouse
- # make some changes, run tests again
- ./tests/015-basic-trusty-icehouse
-
- * There may be test definitions in the tests/ dir which are not set +x
- executable. This is generally true for deprecated releases, or for
- upcoming releases which are not yet validated and enabled. To enable
- and run these tests:
- bzr branch lp:charms/trusty/foo
- cd foo
- ls tests
- chmod +x tests/017-basic-trusty-kilo
- ./tests/setup/00-setup
- juju bootstrap
- ./tests/017-basic-trusty-kilo
-
-
-Additional notes:
-
- * Use DEBUG to turn on debug logging, use ERROR otherwise.
- u = OpenStackAmuletUtils(ERROR)
- u = OpenStackAmuletUtils(DEBUG)
-
- * To interact with the deployed environment:
- export OS_USERNAME=admin
- export OS_PASSWORD=openstack
- export OS_TENANT_NAME=admin
- export OS_REGION_NAME=RegionOne
- export OS_AUTH_URL=${OS_AUTH_PROTOCOL:-http}://`juju-deployer -e trusty -f keystone`:5000/v2.0
- keystone user-list
- glance image-list
diff --git a/charms/trusty/ceilometer/tests/basic_deployment.py b/charms/trusty/ceilometer/tests/basic_deployment.py
deleted file mode 100644
index 9769759..0000000
--- a/charms/trusty/ceilometer/tests/basic_deployment.py
+++ /dev/null
@@ -1,664 +0,0 @@
-import subprocess
-import amulet
-import json
-import time
-import ceilometerclient.v2.client as ceilo_client
-
-from charmhelpers.contrib.openstack.amulet.deployment import (
- OpenStackAmuletDeployment
-)
-
-from charmhelpers.contrib.openstack.amulet.utils import (
- OpenStackAmuletUtils,
- DEBUG,
- # ERROR
-)
-
-# Use DEBUG to turn on debug logging
-u = OpenStackAmuletUtils(DEBUG)
-
-
-class CeilometerBasicDeployment(OpenStackAmuletDeployment):
- """Amulet tests on a basic ceilometer deployment."""
-
- def __init__(self, series, openstack=None, source=None, stable=True):
- """Deploy the entire test environment."""
- super(CeilometerBasicDeployment, self).__init__(series, openstack,
- source, stable)
- self._add_services()
- self._add_relations()
- self._configure_services()
- self._deploy()
-
- u.log.info('Waiting on extended status checks...')
- exclude_services = ['mysql', 'mongodb']
- self._auto_wait_for_status(exclude_services=exclude_services)
-
- self._initialize_tests()
-
- def _add_services(self):
- """Add services
-
- Add the services that we're testing, where ceilometer is local,
- and the rest of the service are from lp branches that are
- compatible with the local charm (e.g. stable or next).
- """
- this_service = {'name': 'ceilometer'}
- other_services = [{'name': 'mysql'},
- {'name': 'rabbitmq-server'},
- {'name': 'keystone'},
- {'name': 'mongodb'},
- {'name': 'glance'}, # to satisfy workload status
- {'name': 'ceilometer-agent'},
- {'name': 'nova-compute'}]
- super(CeilometerBasicDeployment, self)._add_services(this_service,
- other_services)
-
- def _add_relations(self):
- """Add all of the relations for the services."""
- relations = {
- 'ceilometer:shared-db': 'mongodb:database',
- 'ceilometer:amqp': 'rabbitmq-server:amqp',
- 'ceilometer:identity-service': 'keystone:identity-service',
- 'ceilometer:identity-notifications': 'keystone:'
- 'identity-notifications',
- 'keystone:shared-db': 'mysql:shared-db',
- 'ceilometer:ceilometer-service': 'ceilometer-agent:'
- 'ceilometer-service',
- 'nova-compute:nova-ceilometer': 'ceilometer-agent:nova-ceilometer',
- 'nova-compute:shared-db': 'mysql:shared-db',
- 'nova-compute:amqp': 'rabbitmq-server:amqp',
- 'glance:identity-service': 'keystone:identity-service',
- 'glance:shared-db': 'mysql:shared-db',
- 'glance:amqp': 'rabbitmq-server:amqp',
- 'nova-compute:image-service': 'glance:image-service'
- }
- super(CeilometerBasicDeployment, self)._add_relations(relations)
-
- def _configure_services(self):
- """Configure all of the services."""
- keystone_config = {'admin-password': 'openstack',
- 'admin-token': 'ubuntutesting'}
- configs = {'keystone': keystone_config}
- super(CeilometerBasicDeployment, self)._configure_services(configs)
-
- def _get_token(self):
- return self.keystone.service_catalog.catalog['token']['id']
-
- def _initialize_tests(self):
- """Perform final initialization before tests get run."""
- # Access the sentries for inspecting service units
- self.ceil_sentry = self.d.sentry.unit['ceilometer/0']
- self.ceil_agent_sentry = self.d.sentry.unit['ceilometer-agent/0']
- self.mysql_sentry = self.d.sentry.unit['mysql/0']
- self.keystone_sentry = self.d.sentry.unit['keystone/0']
- self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
- self.mongodb_sentry = self.d.sentry.unit['mongodb/0']
- self.nova_sentry = self.d.sentry.unit['nova-compute/0']
- u.log.debug('openstack release val: {}'.format(
- self._get_openstack_release()))
- u.log.debug('openstack release str: {}'.format(
- self._get_openstack_release_string()))
-
- # Authenticate admin with keystone endpoint
- self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
- user='admin',
- password='openstack',
- tenant='admin')
-
- # Authenticate admin with ceilometer endpoint
- ep = self.keystone.service_catalog.url_for(service_type='metering',
- endpoint_type='publicURL')
- os_token = self.keystone.auth_token
- self.log.debug('Instantiating ceilometer client...')
- self.ceil = ceilo_client.Client(endpoint=ep, token=os_token)
-
- def _run_action(self, unit_id, action, *args):
- command = ["juju", "action", "do", "--format=json", unit_id, action]
- command.extend(args)
- print("Running command: %s\n" % " ".join(command))
- output = subprocess.check_output(command)
- output_json = output.decode(encoding="UTF-8")
- data = json.loads(output_json)
- action_id = data[u'Action queued with id']
- return action_id
-
- def _wait_on_action(self, action_id):
- command = ["juju", "action", "fetch", "--format=json", action_id]
- while True:
- try:
- output = subprocess.check_output(command)
- except Exception as e:
- print(e)
- return False
- output_json = output.decode(encoding="UTF-8")
- data = json.loads(output_json)
- if data[u"status"] == "completed":
- return True
- elif data[u"status"] == "failed":
- return False
- time.sleep(2)
-
- def test_100_services(self):
- """Verify the expected services are running on the corresponding
- service units."""
- u.log.debug('Checking system services on units...')
-
- ceilometer_svcs = [
- 'ceilometer-agent-central',
- 'ceilometer-collector',
- 'ceilometer-api',
- 'ceilometer-agent-notification',
- ]
-
- if self._get_openstack_release() < self.trusty_mitaka:
- ceilometer_svcs.append('ceilometer-alarm-evaluator')
- ceilometer_svcs.append('ceilometer-alarm-notifier')
-
- service_names = {
- self.ceil_sentry: ceilometer_svcs,
- }
-
- ret = u.validate_services_by_name(service_names)
- if ret:
- amulet.raise_status(amulet.FAIL, msg=ret)
-
- u.log.debug('OK')
-
- def test_110_service_catalog(self):
- """Verify that the service catalog endpoint data is valid."""
- u.log.debug('Checking keystone service catalog data...')
- endpoint_check = {
- 'adminURL': u.valid_url,
- 'id': u.not_null,
- 'region': 'RegionOne',
- 'publicURL': u.valid_url,
- 'internalURL': u.valid_url
- }
- expected = {
- 'metering': [endpoint_check],
- 'identity': [endpoint_check]
- }
- actual = self.keystone.service_catalog.get_endpoints()
-
- ret = u.validate_svc_catalog_endpoint_data(expected, actual)
- if ret:
- amulet.raise_status(amulet.FAIL, msg=ret)
-
- u.log.debug('OK')
-
- def test_112_keystone_api_endpoint(self):
- """Verify the ceilometer api endpoint data."""
- u.log.debug('Checking keystone api endpoint data...')
- endpoints = self.keystone.endpoints.list()
- u.log.debug(endpoints)
- internal_port = public_port = '5000'
- admin_port = '35357'
- expected = {'id': u.not_null,
- 'region': 'RegionOne',
- 'adminurl': u.valid_url,
- 'internalurl': u.valid_url,
- 'publicurl': u.valid_url,
- 'service_id': u.not_null}
-
- ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
- public_port, expected)
- if ret:
- message = 'Keystone endpoint: {}'.format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_114_ceilometer_api_endpoint(self):
- """Verify the ceilometer api endpoint data."""
- u.log.debug('Checking ceilometer api endpoint data...')
- endpoints = self.keystone.endpoints.list()
- u.log.debug(endpoints)
- admin_port = internal_port = public_port = '8777'
- expected = {'id': u.not_null,
- 'region': 'RegionOne',
- 'adminurl': u.valid_url,
- 'internalurl': u.valid_url,
- 'publicurl': u.valid_url,
- 'service_id': u.not_null}
-
- ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
- public_port, expected)
- if ret:
- message = 'Ceilometer endpoint: {}'.format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_200_ceilometer_identity_relation(self):
- """Verify the ceilometer to keystone identity-service relation data"""
- u.log.debug('Checking ceilometer to keystone identity-service '
- 'relation data...')
- unit = self.ceil_sentry
- relation = ['identity-service', 'keystone:identity-service']
- ceil_ip = unit.relation('identity-service',
- 'keystone:identity-service')['private-address']
- ceil_endpoint = "http://%s:8777" % (ceil_ip)
-
- expected = {
- 'admin_url': ceil_endpoint,
- 'internal_url': ceil_endpoint,
- 'private-address': ceil_ip,
- 'public_url': ceil_endpoint,
- 'region': 'RegionOne',
- 'requested_roles': 'ResellerAdmin',
- 'service': 'ceilometer',
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer identity-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_201_keystone_ceilometer_identity_relation(self):
- """Verify the keystone to ceilometer identity-service relation data"""
- u.log.debug('Checking keystone:ceilometer identity relation data...')
- unit = self.keystone_sentry
- relation = ['identity-service', 'ceilometer:identity-service']
- id_relation = unit.relation('identity-service',
- 'ceilometer:identity-service')
- id_ip = id_relation['private-address']
- expected = {
- 'admin_token': 'ubuntutesting',
- 'auth_host': id_ip,
- 'auth_port': "35357",
- 'auth_protocol': 'http',
- 'private-address': id_ip,
- 'service_host': id_ip,
- 'service_password': u.not_null,
- 'service_port': "5000",
- 'service_protocol': 'http',
- 'service_tenant': 'services',
- 'service_tenant_id': u.not_null,
- 'service_username': 'ceilometer',
- }
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('keystone identity-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_202_keystone_ceilometer_identity_notes_relation(self):
- """Verify ceilometer to keystone identity-notifications relation"""
- u.log.debug('Checking keystone:ceilometer '
- 'identity-notifications relation data...')
-
- # Relation data may vary depending on timing of hooks and relations.
- # May be glance- or keystone- or another endpoint-changed value, so
- # check that at least one ???-endpoint-changed value exists.
- unit = self.keystone_sentry
- relation_data = unit.relation('identity-service',
- 'ceilometer:identity-notifications')
-
- expected = '-endpoint-changed'
- found = 0
- for key in relation_data.keys():
- if expected in key and relation_data[key]:
- found += 1
- u.log.debug('{}: {}'.format(key, relation_data[key]))
-
- if not found:
- message = ('keystone:ceilometer identity-notification relation '
- 'error\n expected something like: {}\n actual: '
- '{}'.format(expected, relation_data))
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_203_ceilometer_amqp_relation(self):
- """Verify the ceilometer to rabbitmq-server amqp relation data"""
- u.log.debug('Checking ceilometer:rabbitmq amqp relation data...')
- unit = self.ceil_sentry
- relation = ['amqp', 'rabbitmq-server:amqp']
- expected = {
- 'username': 'ceilometer',
- 'private-address': u.valid_ip,
- 'vhost': 'openstack'
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer amqp', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_204_amqp_ceilometer_relation(self):
- """Verify the rabbitmq-server to ceilometer amqp relation data"""
- u.log.debug('Checking rabbitmq:ceilometer amqp relation data...')
- unit = self.rabbitmq_sentry
- relation = ['amqp', 'ceilometer:amqp']
- expected = {
- 'hostname': u.valid_ip,
- 'private-address': u.valid_ip,
- 'password': u.not_null,
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('rabbitmq amqp', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_205_ceilometer_to_mongodb_relation(self):
- """Verify the ceilometer to mongodb relation data"""
- u.log.debug('Checking ceilometer:mongodb relation data...')
- unit = self.ceil_sentry
- relation = ['shared-db', 'mongodb:database']
- expected = {
- 'ceilometer_database': 'ceilometer',
- 'private-address': u.valid_ip,
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer shared-db', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_206_mongodb_to_ceilometer_relation(self):
- """Verify the mongodb to ceilometer relation data"""
- u.log.debug('Checking mongodb:ceilometer relation data...')
- unit = self.mongodb_sentry
- relation = ['database', 'ceilometer:shared-db']
- expected = {
- 'hostname': u.valid_ip,
- 'port': '27017',
- 'private-address': u.valid_ip,
- 'type': 'database',
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('mongodb database', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_207_ceilometer_ceilometer_agent_relation(self):
- """Verify the ceilometer to ceilometer-agent relation data"""
- u.log.debug('Checking ceilometer:ceilometer-agent relation data...')
- unit = self.ceil_sentry
- relation = ['ceilometer-service',
- 'ceilometer-agent:ceilometer-service']
- expected = {
- 'rabbitmq_user': 'ceilometer',
- 'verbose': 'False',
- 'rabbitmq_host': u.valid_ip,
- 'service_ports': "{'ceilometer_api': [8777, 8767]}",
- 'use_syslog': 'False',
- 'metering_secret': u.not_null,
- 'rabbitmq_virtual_host': 'openstack',
- 'db_port': '27017',
- 'private-address': u.valid_ip,
- 'db_name': 'ceilometer',
- 'db_host': u.valid_ip,
- 'debug': 'False',
- 'rabbitmq_password': u.not_null,
- 'port': '8767'
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_208_ceilometer_agent_ceilometer_relation(self):
- """Verify the ceilometer-agent to ceilometer relation data"""
- u.log.debug('Checking ceilometer-agent:ceilometer relation data...')
- unit = self.ceil_agent_sentry
- relation = ['ceilometer-service', 'ceilometer:ceilometer-service']
- expected = {'private-address': u.valid_ip}
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_209_nova_compute_ceilometer_agent_relation(self):
- """Verify the nova-compute to ceilometer relation data"""
- u.log.debug('Checking nova-compute:ceilometer relation data...')
- unit = self.nova_sentry
- relation = ['nova-ceilometer', 'ceilometer-agent:nova-ceilometer']
- expected = {'private-address': u.valid_ip}
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_210_ceilometer_agent_nova_compute_relation(self):
- """Verify the ceilometer to nova-compute relation data"""
- u.log.debug('Checking ceilometer:nova-compute relation data...')
- unit = self.ceil_agent_sentry
- relation = ['nova-ceilometer', 'nova-compute:nova-ceilometer']
- sub = ('{"nova": {"/etc/nova/nova.conf": {"sections": {"DEFAULT": '
- '[["instance_usage_audit", "True"], '
- '["instance_usage_audit_period", "hour"], '
- '["notify_on_state_change", "vm_and_task_state"], '
- '["notification_driver", "ceilometer.compute.nova_notifier"], '
- '["notification_driver", '
- '"nova.openstack.common.notifier.rpc_notifier"]]}}}}')
- expected = {
- 'subordinate_configuration': sub,
- 'private-address': u.valid_ip
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_300_ceilometer_config(self):
- """Verify the data in the ceilometer config file."""
- u.log.debug('Checking ceilometer config file data...')
- unit = self.ceil_sentry
- ks_rel = self.keystone_sentry.relation('identity-service',
- 'ceilometer:identity-service')
- auth_uri = '%s://%s:%s/' % (ks_rel['service_protocol'],
- ks_rel['service_host'],
- ks_rel['service_port'])
- db_relation = self.mongodb_sentry.relation('database',
- 'ceilometer:shared-db')
- db_conn = 'mongodb://%s:%s/ceilometer' % (db_relation['hostname'],
- db_relation['port'])
- conf = '/etc/ceilometer/ceilometer.conf'
- expected = {
- 'DEFAULT': {
- 'verbose': 'False',
- 'debug': 'False',
- 'use_syslog': 'False',
- },
- 'api': {
- 'port': '8767',
- },
- 'service_credentials': {
- 'os_auth_url': auth_uri + 'v2.0',
- 'os_tenant_name': 'services',
- 'os_username': 'ceilometer',
- 'os_password': ks_rel['service_password'],
- },
- 'database': {
- 'connection': db_conn,
- },
- }
-
- for section, pairs in expected.iteritems():
- ret = u.validate_config_data(unit, conf, section, pairs)
- if ret:
- message = "ceilometer config error: {}".format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_301_nova_config(self):
- """Verify data in the nova compute nova config file"""
- u.log.debug('Checking nova compute config file...')
- unit = self.nova_sentry
- conf = '/etc/nova/nova.conf'
- expected = {
- 'DEFAULT': {
- 'verbose': 'False',
- 'debug': 'False',
- 'use_syslog': 'False',
- 'my_ip': u.valid_ip,
- }
- }
-
- # NOTE(beisner): notification_driver is not checked like the
- # others, as configparser does not support duplicate config
- # options, and dicts cant have duplicate keys.
- # Ex. from conf file:
- # notification_driver = ceilometer.compute.nova_notifier
- # notification_driver = nova.openstack.common.notifier.rpc_notifier
- for section, pairs in expected.iteritems():
- ret = u.validate_config_data(unit, conf, section, pairs)
- if ret:
- message = "ceilometer config error: {}".format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- # Check notification_driver existence via simple grep cmd
- lines = [('notification_driver = '
- 'ceilometer.compute.nova_notifier'),
- ('notification_driver = '
- 'nova.openstack.common.notifier.rpc_notifier')]
-
- sentry_units = [unit]
- cmds = []
- for line in lines:
- cmds.append('grep "{}" {}'.format(line, conf))
-
- ret = u.check_commands_on_units(cmds, sentry_units)
- if ret:
- amulet.raise_status(amulet.FAIL, msg=ret)
-
- u.log.debug('OK')
-
- def test_302_nova_ceilometer_config(self):
- """Verify data in the ceilometer config file on the
- nova-compute (ceilometer-agent) unit."""
- u.log.debug('Checking nova ceilometer config file...')
- unit = self.nova_sentry
- conf = '/etc/ceilometer/ceilometer.conf'
- expected = {
- 'DEFAULT': {
- 'logdir': '/var/log/ceilometer'
- },
- 'database': {
- 'backend': 'sqlalchemy',
- 'connection': 'sqlite:////var/lib/ceilometer/$sqlite_db'
- }
- }
-
- for section, pairs in expected.iteritems():
- ret = u.validate_config_data(unit, conf, section, pairs)
- if ret:
- message = "ceilometer config error: {}".format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_400_api_connection(self):
- """Simple api calls to check service is up and responding"""
- u.log.debug('Checking api functionality...')
- assert(self.ceil.samples.list() == [])
- assert(self.ceil.meters.list() == [])
- u.log.debug('OK')
-
- # NOTE(beisner): need to add more functional tests
-
- def test_900_restart_on_config_change(self):
- """Verify that the specified services are restarted when the config
- is changed.
- """
- sentry = self.ceil_sentry
- juju_service = 'ceilometer'
-
- # Expected default and alternate values
- set_default = {'debug': 'False'}
- set_alternate = {'debug': 'True'}
-
- # Services which are expected to restart upon config change,
- # and corresponding config files affected by the change
- conf_file = '/etc/ceilometer/ceilometer.conf'
- services = {
- 'ceilometer-collector': conf_file,
- 'ceilometer-api': conf_file,
- 'ceilometer-agent-notification': conf_file,
- }
-
- if self._get_openstack_release() < self.trusty_mitaka:
- services['ceilometer-alarm-notifier'] = conf_file
- services['ceilometer-alarm-evaluator'] = conf_file
-
- if self._get_openstack_release() == self.trusty_liberty or \
- self._get_openstack_release() >= self.wily_liberty:
- # Liberty and later
- services['ceilometer-polling'] = conf_file
- else:
- # Juno and earlier
- services['ceilometer-agent-central'] = conf_file
-
- # Make config change, check for service restarts
- u.log.debug('Making config change on {}...'.format(juju_service))
- mtime = u.get_sentry_time(sentry)
- self.d.configure(juju_service, set_alternate)
-
- sleep_time = 40
- for s, conf_file in services.iteritems():
- u.log.debug("Checking that service restarted: {}".format(s))
- if not u.validate_service_config_changed(sentry, mtime, s,
- conf_file,
- retry_count=4,
- retry_sleep_time=20,
- sleep_time=sleep_time):
- self.d.configure(juju_service, set_default)
- msg = "service {} didn't restart after config change".format(s)
- amulet.raise_status(amulet.FAIL, msg=msg)
- sleep_time = 0
-
- self.d.configure(juju_service, set_default)
- u.log.debug('OK')
-
- def test_910_pause_and_resume(self):
- """The services can be paused and resumed. """
- u.log.debug('Checking pause and resume actions...')
- unit_name = "ceilometer/0"
- unit = self.d.sentry.unit[unit_name]
- juju_service = 'ceilometer'
-
- assert u.status_get(unit)[0] == "active"
-
- action_id = self._run_action(unit_name, "pause")
- assert self._wait_on_action(action_id), "Pause action failed."
- assert u.status_get(unit)[0] == "maintenance"
-
- # trigger config-changed to ensure that services are still stopped
- u.log.debug("Making config change on ceilometer ...")
- self.d.configure(juju_service, {'debug': 'True'})
- assert u.status_get(unit)[0] == "maintenance"
- self.d.configure(juju_service, {'debug': 'False'})
- assert u.status_get(unit)[0] == "maintenance"
-
- action_id = self._run_action(unit_name, "resume")
- assert self._wait_on_action(action_id), "Resume action failed."
- assert u.status_get(unit)[0] == "active"
- u.log.debug('OK')
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/__init__.py b/charms/trusty/ceilometer/tests/charmhelpers/__init__.py
deleted file mode 100644
index f72e7f8..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Bootstrap charm-helpers, installing its dependencies if necessary using
-# only standard libraries.
-import subprocess
-import sys
-
-try:
- import six # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
- import six # flake8: noqa
-
-try:
- import yaml # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
- import yaml # flake8: noqa
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/__init__.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/__init__.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/deployment.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/deployment.py
deleted file mode 100644
index d451698..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/deployment.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import amulet
-import os
-import six
-
-
-class AmuletDeployment(object):
- """Amulet deployment.
-
- This class provides generic Amulet deployment and test runner
- methods.
- """
-
- def __init__(self, series=None):
- """Initialize the deployment environment."""
- self.series = None
-
- if series:
- self.series = series
- self.d = amulet.Deployment(series=self.series)
- else:
- self.d = amulet.Deployment()
-
- def _add_services(self, this_service, other_services):
- """Add services.
-
- Add services to the deployment where this_service is the local charm
- that we're testing and other_services are the other services that
- are being used in the local amulet tests.
- """
- if this_service['name'] != os.path.basename(os.getcwd()):
- s = this_service['name']
- msg = "The charm's root directory name needs to be {}".format(s)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- if 'units' not in this_service:
- this_service['units'] = 1
-
- self.d.add(this_service['name'], units=this_service['units'],
- constraints=this_service.get('constraints'))
-
- for svc in other_services:
- if 'location' in svc:
- branch_location = svc['location']
- elif self.series:
- branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
- else:
- branch_location = None
-
- if 'units' not in svc:
- svc['units'] = 1
-
- self.d.add(svc['name'], charm=branch_location, units=svc['units'],
- constraints=svc.get('constraints'))
-
- def _add_relations(self, relations):
- """Add all of the relations for the services."""
- for k, v in six.iteritems(relations):
- self.d.relate(k, v)
-
- def _configure_services(self, configs):
- """Configure all of the services."""
- for service, config in six.iteritems(configs):
- self.d.configure(service, config)
-
- def _deploy(self):
- """Deploy environment and wait for all hooks to finish executing."""
- try:
- self.d.setup(timeout=900)
- self.d.sentry.wait(timeout=900)
- except amulet.helpers.TimeoutError:
- amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
- except Exception:
- raise
-
- def run_tests(self):
- """Run all of the methods that are prefixed with 'test_'."""
- for test in dir(self):
- if test.startswith('test_'):
- getattr(self, test)()
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/utils.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/utils.py
deleted file mode 100644
index 7e5c25a..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/utils.py
+++ /dev/null
@@ -1,829 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import io
-import json
-import logging
-import os
-import re
-import socket
-import subprocess
-import sys
-import time
-import uuid
-
-import amulet
-import distro_info
-import six
-from six.moves import configparser
-if six.PY3:
- from urllib import parse as urlparse
-else:
- import urlparse
-
-
-class AmuletUtils(object):
- """Amulet utilities.
-
- This class provides common utility functions that are used by Amulet
- tests.
- """
-
- def __init__(self, log_level=logging.ERROR):
- self.log = self.get_logger(level=log_level)
- self.ubuntu_releases = self.get_ubuntu_releases()
-
- def get_logger(self, name="amulet-logger", level=logging.DEBUG):
- """Get a logger object that will log to stdout."""
- log = logging
- logger = log.getLogger(name)
- fmt = log.Formatter("%(asctime)s %(funcName)s "
- "%(levelname)s: %(message)s")
-
- handler = log.StreamHandler(stream=sys.stdout)
- handler.setLevel(level)
- handler.setFormatter(fmt)
-
- logger.addHandler(handler)
- logger.setLevel(level)
-
- return logger
-
- def valid_ip(self, ip):
- if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
- return True
- else:
- return False
-
- def valid_url(self, url):
- p = re.compile(
- r'^(?:http|ftp)s?://'
- r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
- r'localhost|'
- r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
- r'(?::\d+)?'
- r'(?:/?|[/?]\S+)$',
- re.IGNORECASE)
- if p.match(url):
- return True
- else:
- return False
-
- def get_ubuntu_release_from_sentry(self, sentry_unit):
- """Get Ubuntu release codename from sentry unit.
-
- :param sentry_unit: amulet sentry/service unit pointer
- :returns: list of strings - release codename, failure message
- """
- msg = None
- cmd = 'lsb_release -cs'
- release, code = sentry_unit.run(cmd)
- if code == 0:
- self.log.debug('{} lsb_release: {}'.format(
- sentry_unit.info['unit_name'], release))
- else:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, release, code))
- if release not in self.ubuntu_releases:
- msg = ("Release ({}) not found in Ubuntu releases "
- "({})".format(release, self.ubuntu_releases))
- return release, msg
-
- def validate_services(self, commands):
- """Validate that lists of commands succeed on service units. Can be
- used to verify system services are running on the corresponding
- service units.
-
- :param commands: dict with sentry keys and arbitrary command list vals
- :returns: None if successful, Failure string message otherwise
- """
- self.log.debug('Checking status of system services...')
-
- # /!\ DEPRECATION WARNING (beisner):
- # New and existing tests should be rewritten to use
- # validate_services_by_name() as it is aware of init systems.
- self.log.warn('DEPRECATION WARNING: use '
- 'validate_services_by_name instead of validate_services '
- 'due to init system differences.')
-
- for k, v in six.iteritems(commands):
- for cmd in v:
- output, code = k.run(cmd)
- self.log.debug('{} `{}` returned '
- '{}'.format(k.info['unit_name'],
- cmd, code))
- if code != 0:
- return "command `{}` returned {}".format(cmd, str(code))
- return None
-
- def validate_services_by_name(self, sentry_services):
- """Validate system service status by service name, automatically
- detecting init system based on Ubuntu release codename.
-
- :param sentry_services: dict with sentry keys and svc list values
- :returns: None if successful, Failure string message otherwise
- """
- self.log.debug('Checking status of system services...')
-
- # Point at which systemd became a thing
- systemd_switch = self.ubuntu_releases.index('vivid')
-
- for sentry_unit, services_list in six.iteritems(sentry_services):
- # Get lsb_release codename from unit
- release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
- if ret:
- return ret
-
- for service_name in services_list:
- if (self.ubuntu_releases.index(release) >= systemd_switch or
- service_name in ['rabbitmq-server', 'apache2']):
- # init is systemd (or regular sysv)
- cmd = 'sudo service {} status'.format(service_name)
- output, code = sentry_unit.run(cmd)
- service_running = code == 0
- elif self.ubuntu_releases.index(release) < systemd_switch:
- # init is upstart
- cmd = 'sudo status {}'.format(service_name)
- output, code = sentry_unit.run(cmd)
- service_running = code == 0 and "start/running" in output
-
- self.log.debug('{} `{}` returned '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code))
- if not service_running:
- return u"command `{}` returned {} {}".format(
- cmd, output, str(code))
- return None
-
- def _get_config(self, unit, filename):
- """Get a ConfigParser object for parsing a unit's config file."""
- file_contents = unit.file_contents(filename)
-
- # NOTE(beisner): by default, ConfigParser does not handle options
- # with no value, such as the flags used in the mysql my.cnf file.
- # https://bugs.python.org/issue7005
- config = configparser.ConfigParser(allow_no_value=True)
- config.readfp(io.StringIO(file_contents))
- return config
-
- def validate_config_data(self, sentry_unit, config_file, section,
- expected):
- """Validate config file data.
-
- Verify that the specified section of the config file contains
- the expected option key:value pairs.
-
- Compare expected dictionary data vs actual dictionary data.
- The values in the 'expected' dictionary can be strings, bools, ints,
- longs, or can be a function that evaluates a variable and returns a
- bool.
- """
- self.log.debug('Validating config file data ({} in {} on {})'
- '...'.format(section, config_file,
- sentry_unit.info['unit_name']))
- config = self._get_config(sentry_unit, config_file)
-
- if section != 'DEFAULT' and not config.has_section(section):
- return "section [{}] does not exist".format(section)
-
- for k in expected.keys():
- if not config.has_option(section, k):
- return "section [{}] is missing option {}".format(section, k)
-
- actual = config.get(section, k)
- v = expected[k]
- if (isinstance(v, six.string_types) or
- isinstance(v, bool) or
- isinstance(v, six.integer_types)):
- # handle explicit values
- if actual != v:
- return "section [{}] {}:{} != expected {}:{}".format(
- section, k, actual, k, expected[k])
- # handle function pointers, such as not_null or valid_ip
- elif not v(actual):
- return "section [{}] {}:{} != expected {}:{}".format(
- section, k, actual, k, expected[k])
- return None
-
- def _validate_dict_data(self, expected, actual):
- """Validate dictionary data.
-
- Compare expected dictionary data vs actual dictionary data.
- The values in the 'expected' dictionary can be strings, bools, ints,
- longs, or can be a function that evaluates a variable and returns a
- bool.
- """
- self.log.debug('actual: {}'.format(repr(actual)))
- self.log.debug('expected: {}'.format(repr(expected)))
-
- for k, v in six.iteritems(expected):
- if k in actual:
- if (isinstance(v, six.string_types) or
- isinstance(v, bool) or
- isinstance(v, six.integer_types)):
- # handle explicit values
- if v != actual[k]:
- return "{}:{}".format(k, actual[k])
- # handle function pointers, such as not_null or valid_ip
- elif not v(actual[k]):
- return "{}:{}".format(k, actual[k])
- else:
- return "key '{}' does not exist".format(k)
- return None
-
- def validate_relation_data(self, sentry_unit, relation, expected):
- """Validate actual relation data based on expected relation data."""
- actual = sentry_unit.relation(relation[0], relation[1])
- return self._validate_dict_data(expected, actual)
-
- def _validate_list_data(self, expected, actual):
- """Compare expected list vs actual list data."""
- for e in expected:
- if e not in actual:
- return "expected item {} not found in actual list".format(e)
- return None
-
- def not_null(self, string):
- if string is not None:
- return True
- else:
- return False
-
- def _get_file_mtime(self, sentry_unit, filename):
- """Get last modification time of file."""
- return sentry_unit.file_stat(filename)['mtime']
-
- def _get_dir_mtime(self, sentry_unit, directory):
- """Get last modification time of directory."""
- return sentry_unit.directory_stat(directory)['mtime']
-
- def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
- """Get start time of a process based on the last modification time
- of the /proc/pid directory.
-
- :sentry_unit: The sentry unit to check for the service on
- :service: service name to look for in process table
- :pgrep_full: [Deprecated] Use full command line search mode with pgrep
- :returns: epoch time of service process start
- :param commands: list of bash commands
- :param sentry_units: list of sentry unit pointers
- :returns: None if successful; Failure message otherwise
- """
- if pgrep_full is not None:
- # /!\ DEPRECATION WARNING (beisner):
- # No longer implemented, as pidof is now used instead of pgrep.
- # https://bugs.launchpad.net/charm-helpers/+bug/1474030
- self.log.warn('DEPRECATION WARNING: pgrep_full bool is no '
- 'longer implemented re: lp 1474030.')
-
- pid_list = self.get_process_id_list(sentry_unit, service)
- pid = pid_list[0]
- proc_dir = '/proc/{}'.format(pid)
- self.log.debug('Pid for {} on {}: {}'.format(
- service, sentry_unit.info['unit_name'], pid))
-
- return self._get_dir_mtime(sentry_unit, proc_dir)
-
- def service_restarted(self, sentry_unit, service, filename,
- pgrep_full=None, sleep_time=20):
- """Check if service was restarted.
-
- Compare a service's start time vs a file's last modification time
- (such as a config file for that service) to determine if the service
- has been restarted.
- """
- # /!\ DEPRECATION WARNING (beisner):
- # This method is prone to races in that no before-time is known.
- # Use validate_service_config_changed instead.
-
- # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
- # used instead of pgrep. pgrep_full is still passed through to ensure
- # deprecation WARNS. lp1474030
- self.log.warn('DEPRECATION WARNING: use '
- 'validate_service_config_changed instead of '
- 'service_restarted due to known races.')
-
- time.sleep(sleep_time)
- if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
- self._get_file_mtime(sentry_unit, filename)):
- return True
- else:
- return False
-
- def service_restarted_since(self, sentry_unit, mtime, service,
- pgrep_full=None, sleep_time=20,
- retry_count=30, retry_sleep_time=10):
- """Check if service was been started after a given time.
-
- Args:
- sentry_unit (sentry): The sentry unit to check for the service on
- mtime (float): The epoch time to check against
- service (string): service name to look for in process table
- pgrep_full: [Deprecated] Use full command line search mode with pgrep
- sleep_time (int): Initial sleep time (s) before looking for file
- retry_sleep_time (int): Time (s) to sleep between retries
- retry_count (int): If file is not found, how many times to retry
-
- Returns:
- bool: True if service found and its start time it newer than mtime,
- False if service is older than mtime or if service was
- not found.
- """
- # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
- # used instead of pgrep. pgrep_full is still passed through to ensure
- # deprecation WARNS. lp1474030
-
- unit_name = sentry_unit.info['unit_name']
- self.log.debug('Checking that %s service restarted since %s on '
- '%s' % (service, mtime, unit_name))
- time.sleep(sleep_time)
- proc_start_time = None
- tries = 0
- while tries <= retry_count and not proc_start_time:
- try:
- proc_start_time = self._get_proc_start_time(sentry_unit,
- service,
- pgrep_full)
- self.log.debug('Attempt {} to get {} proc start time on {} '
- 'OK'.format(tries, service, unit_name))
- except IOError as e:
- # NOTE(beisner) - race avoidance, proc may not exist yet.
- # https://bugs.launchpad.net/charm-helpers/+bug/1474030
- self.log.debug('Attempt {} to get {} proc start time on {} '
- 'failed\n{}'.format(tries, service,
- unit_name, e))
- time.sleep(retry_sleep_time)
- tries += 1
-
- if not proc_start_time:
- self.log.warn('No proc start time found, assuming service did '
- 'not start')
- return False
- if proc_start_time >= mtime:
- self.log.debug('Proc start time is newer than provided mtime'
- '(%s >= %s) on %s (OK)' % (proc_start_time,
- mtime, unit_name))
- return True
- else:
- self.log.warn('Proc start time (%s) is older than provided mtime '
- '(%s) on %s, service did not '
- 'restart' % (proc_start_time, mtime, unit_name))
- return False
-
- def config_updated_since(self, sentry_unit, filename, mtime,
- sleep_time=20, retry_count=30,
- retry_sleep_time=10):
- """Check if file was modified after a given time.
-
- Args:
- sentry_unit (sentry): The sentry unit to check the file mtime on
- filename (string): The file to check mtime of
- mtime (float): The epoch time to check against
- sleep_time (int): Initial sleep time (s) before looking for file
- retry_sleep_time (int): Time (s) to sleep between retries
- retry_count (int): If file is not found, how many times to retry
-
- Returns:
- bool: True if file was modified more recently than mtime, False if
- file was modified before mtime, or if file not found.
- """
- unit_name = sentry_unit.info['unit_name']
- self.log.debug('Checking that %s updated since %s on '
- '%s' % (filename, mtime, unit_name))
- time.sleep(sleep_time)
- file_mtime = None
- tries = 0
- while tries <= retry_count and not file_mtime:
- try:
- file_mtime = self._get_file_mtime(sentry_unit, filename)
- self.log.debug('Attempt {} to get {} file mtime on {} '
- 'OK'.format(tries, filename, unit_name))
- except IOError as e:
- # NOTE(beisner) - race avoidance, file may not exist yet.
- # https://bugs.launchpad.net/charm-helpers/+bug/1474030
- self.log.debug('Attempt {} to get {} file mtime on {} '
- 'failed\n{}'.format(tries, filename,
- unit_name, e))
- time.sleep(retry_sleep_time)
- tries += 1
-
- if not file_mtime:
- self.log.warn('Could not determine file mtime, assuming '
- 'file does not exist')
- return False
-
- if file_mtime >= mtime:
- self.log.debug('File mtime is newer than provided mtime '
- '(%s >= %s) on %s (OK)' % (file_mtime,
- mtime, unit_name))
- return True
- else:
- self.log.warn('File mtime is older than provided mtime'
- '(%s < on %s) on %s' % (file_mtime,
- mtime, unit_name))
- return False
-
- def validate_service_config_changed(self, sentry_unit, mtime, service,
- filename, pgrep_full=None,
- sleep_time=20, retry_count=30,
- retry_sleep_time=10):
- """Check service and file were updated after mtime
-
- Args:
- sentry_unit (sentry): The sentry unit to check for the service on
- mtime (float): The epoch time to check against
- service (string): service name to look for in process table
- filename (string): The file to check mtime of
- pgrep_full: [Deprecated] Use full command line search mode with pgrep
- sleep_time (int): Initial sleep in seconds to pass to test helpers
- retry_count (int): If service is not found, how many times to retry
- retry_sleep_time (int): Time in seconds to wait between retries
-
- Typical Usage:
- u = OpenStackAmuletUtils(ERROR)
- ...
- mtime = u.get_sentry_time(self.cinder_sentry)
- self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'})
- if not u.validate_service_config_changed(self.cinder_sentry,
- mtime,
- 'cinder-api',
- '/etc/cinder/cinder.conf')
- amulet.raise_status(amulet.FAIL, msg='update failed')
- Returns:
- bool: True if both service and file where updated/restarted after
- mtime, False if service is older than mtime or if service was
- not found or if filename was modified before mtime.
- """
-
- # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
- # used instead of pgrep. pgrep_full is still passed through to ensure
- # deprecation WARNS. lp1474030
-
- service_restart = self.service_restarted_since(
- sentry_unit, mtime,
- service,
- pgrep_full=pgrep_full,
- sleep_time=sleep_time,
- retry_count=retry_count,
- retry_sleep_time=retry_sleep_time)
-
- config_update = self.config_updated_since(
- sentry_unit,
- filename,
- mtime,
- sleep_time=sleep_time,
- retry_count=retry_count,
- retry_sleep_time=retry_sleep_time)
-
- return service_restart and config_update
-
- def get_sentry_time(self, sentry_unit):
- """Return current epoch time on a sentry"""
- cmd = "date +'%s'"
- return float(sentry_unit.run(cmd)[0])
-
- def relation_error(self, name, data):
- return 'unexpected relation data in {} - {}'.format(name, data)
-
- def endpoint_error(self, name, data):
- return 'unexpected endpoint data in {} - {}'.format(name, data)
-
- def get_ubuntu_releases(self):
- """Return a list of all Ubuntu releases in order of release."""
- _d = distro_info.UbuntuDistroInfo()
- _release_list = _d.all
- return _release_list
-
- def file_to_url(self, file_rel_path):
- """Convert a relative file path to a file URL."""
- _abs_path = os.path.abspath(file_rel_path)
- return urlparse.urlparse(_abs_path, scheme='file').geturl()
-
- def check_commands_on_units(self, commands, sentry_units):
- """Check that all commands in a list exit zero on all
- sentry units in a list.
-
- :param commands: list of bash commands
- :param sentry_units: list of sentry unit pointers
- :returns: None if successful; Failure message otherwise
- """
- self.log.debug('Checking exit codes for {} commands on {} '
- 'sentry units...'.format(len(commands),
- len(sentry_units)))
- for sentry_unit in sentry_units:
- for cmd in commands:
- output, code = sentry_unit.run(cmd)
- if code == 0:
- self.log.debug('{} `{}` returned {} '
- '(OK)'.format(sentry_unit.info['unit_name'],
- cmd, code))
- else:
- return ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- return None
-
- def get_process_id_list(self, sentry_unit, process_name,
- expect_success=True):
- """Get a list of process ID(s) from a single sentry juju unit
- for a single process name.
-
- :param sentry_unit: Amulet sentry instance (juju unit)
- :param process_name: Process name
- :param expect_success: If False, expect the PID to be missing,
- raise if it is present.
- :returns: List of process IDs
- """
- cmd = 'pidof -x {}'.format(process_name)
- if not expect_success:
- cmd += " || exit 0 && exit 1"
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return str(output).split()
-
- def get_unit_process_ids(self, unit_processes, expect_success=True):
- """Construct a dict containing unit sentries, process names, and
- process IDs.
-
- :param unit_processes: A dictionary of Amulet sentry instance
- to list of process names.
- :param expect_success: if False expect the processes to not be
- running, raise if they are.
- :returns: Dictionary of Amulet sentry instance to dictionary
- of process names to PIDs.
- """
- pid_dict = {}
- for sentry_unit, process_list in six.iteritems(unit_processes):
- pid_dict[sentry_unit] = {}
- for process in process_list:
- pids = self.get_process_id_list(
- sentry_unit, process, expect_success=expect_success)
- pid_dict[sentry_unit].update({process: pids})
- return pid_dict
-
- def validate_unit_process_ids(self, expected, actual):
- """Validate process id quantities for services on units."""
- self.log.debug('Checking units for running processes...')
- self.log.debug('Expected PIDs: {}'.format(expected))
- self.log.debug('Actual PIDs: {}'.format(actual))
-
- if len(actual) != len(expected):
- return ('Unit count mismatch. expected, actual: {}, '
- '{} '.format(len(expected), len(actual)))
-
- for (e_sentry, e_proc_names) in six.iteritems(expected):
- e_sentry_name = e_sentry.info['unit_name']
- if e_sentry in actual.keys():
- a_proc_names = actual[e_sentry]
- else:
- return ('Expected sentry ({}) not found in actual dict data.'
- '{}'.format(e_sentry_name, e_sentry))
-
- if len(e_proc_names.keys()) != len(a_proc_names.keys()):
- return ('Process name count mismatch. expected, actual: {}, '
- '{}'.format(len(expected), len(actual)))
-
- for (e_proc_name, e_pids), (a_proc_name, a_pids) in \
- zip(e_proc_names.items(), a_proc_names.items()):
- if e_proc_name != a_proc_name:
- return ('Process name mismatch. expected, actual: {}, '
- '{}'.format(e_proc_name, a_proc_name))
-
- a_pids_length = len(a_pids)
- fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
- '{}, {} ({})'.format(e_sentry_name, e_proc_name,
- e_pids, a_pids_length,
- a_pids))
-
- # If expected is a list, ensure at least one PID quantity match
- if isinstance(e_pids, list) and \
- a_pids_length not in e_pids:
- return fail_msg
- # If expected is not bool and not list,
- # ensure PID quantities match
- elif not isinstance(e_pids, bool) and \
- not isinstance(e_pids, list) and \
- a_pids_length != e_pids:
- return fail_msg
- # If expected is bool True, ensure 1 or more PIDs exist
- elif isinstance(e_pids, bool) and \
- e_pids is True and a_pids_length < 1:
- return fail_msg
- # If expected is bool False, ensure 0 PIDs exist
- elif isinstance(e_pids, bool) and \
- e_pids is False and a_pids_length != 0:
- return fail_msg
- else:
- self.log.debug('PID check OK: {} {} {}: '
- '{}'.format(e_sentry_name, e_proc_name,
- e_pids, a_pids))
- return None
-
- def validate_list_of_identical_dicts(self, list_of_dicts):
- """Check that all dicts within a list are identical."""
- hashes = []
- for _dict in list_of_dicts:
- hashes.append(hash(frozenset(_dict.items())))
-
- self.log.debug('Hashes: {}'.format(hashes))
- if len(set(hashes)) == 1:
- self.log.debug('Dicts within list are identical')
- else:
- return 'Dicts within list are not identical'
-
- return None
-
- def validate_sectionless_conf(self, file_contents, expected):
- """A crude conf parser. Useful to inspect configuration files which
- do not have section headers (as would be necessary in order to use
- the configparser). Such as openstack-dashboard or rabbitmq confs."""
- for line in file_contents.split('\n'):
- if '=' in line:
- args = line.split('=')
- if len(args) <= 1:
- continue
- key = args[0].strip()
- value = args[1].strip()
- if key in expected.keys():
- if expected[key] != value:
- msg = ('Config mismatch. Expected, actual: {}, '
- '{}'.format(expected[key], value))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- def get_unit_hostnames(self, units):
- """Return a dict of juju unit names to hostnames."""
- host_names = {}
- for unit in units:
- host_names[unit.info['unit_name']] = \
- str(unit.file_contents('/etc/hostname').strip())
- self.log.debug('Unit host names: {}'.format(host_names))
- return host_names
-
- def run_cmd_unit(self, sentry_unit, cmd):
- """Run a command on a unit, return the output and exit code."""
- output, code = sentry_unit.run(cmd)
- if code == 0:
- self.log.debug('{} `{}` command returned {} '
- '(OK)'.format(sentry_unit.info['unit_name'],
- cmd, code))
- else:
- msg = ('{} `{}` command returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return str(output), code
-
- def file_exists_on_unit(self, sentry_unit, file_name):
- """Check if a file exists on a unit."""
- try:
- sentry_unit.file_stat(file_name)
- return True
- except IOError:
- return False
- except Exception as e:
- msg = 'Error checking file {}: {}'.format(file_name, e)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- def file_contents_safe(self, sentry_unit, file_name,
- max_wait=60, fatal=False):
- """Get file contents from a sentry unit. Wrap amulet file_contents
- with retry logic to address races where a file checks as existing,
- but no longer exists by the time file_contents is called.
- Return None if file not found. Optionally raise if fatal is True."""
- unit_name = sentry_unit.info['unit_name']
- file_contents = False
- tries = 0
- while not file_contents and tries < (max_wait / 4):
- try:
- file_contents = sentry_unit.file_contents(file_name)
- except IOError:
- self.log.debug('Attempt {} to open file {} from {} '
- 'failed'.format(tries, file_name,
- unit_name))
- time.sleep(4)
- tries += 1
-
- if file_contents:
- return file_contents
- elif not fatal:
- return None
- elif fatal:
- msg = 'Failed to get file contents from unit.'
- amulet.raise_status(amulet.FAIL, msg)
-
- def port_knock_tcp(self, host="localhost", port=22, timeout=15):
- """Open a TCP socket to check for a listening sevice on a host.
-
- :param host: host name or IP address, default to localhost
- :param port: TCP port number, default to 22
- :param timeout: Connect timeout, default to 15 seconds
- :returns: True if successful, False if connect failed
- """
-
- # Resolve host name if possible
- try:
- connect_host = socket.gethostbyname(host)
- host_human = "{} ({})".format(connect_host, host)
- except socket.error as e:
- self.log.warn('Unable to resolve address: '
- '{} ({}) Trying anyway!'.format(host, e))
- connect_host = host
- host_human = connect_host
-
- # Attempt socket connection
- try:
- knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- knock.settimeout(timeout)
- knock.connect((connect_host, port))
- knock.close()
- self.log.debug('Socket connect OK for host '
- '{} on port {}.'.format(host_human, port))
- return True
- except socket.error as e:
- self.log.debug('Socket connect FAIL for'
- ' {} port {} ({})'.format(host_human, port, e))
- return False
-
- def port_knock_units(self, sentry_units, port=22,
- timeout=15, expect_success=True):
- """Open a TCP socket to check for a listening sevice on each
- listed juju unit.
-
- :param sentry_units: list of sentry unit pointers
- :param port: TCP port number, default to 22
- :param timeout: Connect timeout, default to 15 seconds
- :expect_success: True by default, set False to invert logic
- :returns: None if successful, Failure message otherwise
- """
- for unit in sentry_units:
- host = unit.info['public-address']
- connected = self.port_knock_tcp(host, port, timeout)
- if not connected and expect_success:
- return 'Socket connect failed.'
- elif connected and not expect_success:
- return 'Socket connected unexpectedly.'
-
- def get_uuid_epoch_stamp(self):
- """Returns a stamp string based on uuid4 and epoch time. Useful in
- generating test messages which need to be unique-ish."""
- return '[{}-{}]'.format(uuid.uuid4(), time.time())
-
-# amulet juju action helpers:
- def run_action(self, unit_sentry, action,
- _check_output=subprocess.check_output,
- params=None):
- """Run the named action on a given unit sentry.
-
- params a dict of parameters to use
- _check_output parameter is used for dependency injection.
-
- @return action_id.
- """
- unit_id = unit_sentry.info["unit_name"]
- command = ["juju", "action", "do", "--format=json", unit_id, action]
- if params is not None:
- for key, value in params.iteritems():
- command.append("{}={}".format(key, value))
- self.log.info("Running command: %s\n" % " ".join(command))
- output = _check_output(command, universal_newlines=True)
- data = json.loads(output)
- action_id = data[u'Action queued with id']
- return action_id
-
- def wait_on_action(self, action_id, _check_output=subprocess.check_output):
- """Wait for a given action, returning if it completed or not.
-
- _check_output parameter is used for dependency injection.
- """
- command = ["juju", "action", "fetch", "--format=json", "--wait=0",
- action_id]
- output = _check_output(command, universal_newlines=True)
- data = json.loads(output)
- return data.get(u"status") == "completed"
-
- def status_get(self, unit):
- """Return the current service status of this unit."""
- raw_status, return_code = unit.run(
- "status-get --format=json --include-data")
- if return_code != 0:
- return ("unknown", "")
- status = json.loads(raw_status)
- return (status["status"], status["message"])
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/__init__.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/deployment.py
deleted file mode 100644
index d21c9c7..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/deployment.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import logging
-import re
-import sys
-import six
-from collections import OrderedDict
-from charmhelpers.contrib.amulet.deployment import (
- AmuletDeployment
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-
-class OpenStackAmuletDeployment(AmuletDeployment):
- """OpenStack amulet deployment.
-
- This class inherits from AmuletDeployment and has additional support
- that is specifically for use by OpenStack charms.
- """
-
- def __init__(self, series=None, openstack=None, source=None,
- stable=True, log_level=DEBUG):
- """Initialize the deployment environment."""
- super(OpenStackAmuletDeployment, self).__init__(series)
- self.log = self.get_logger(level=log_level)
- self.log.info('OpenStackAmuletDeployment: init')
- self.openstack = openstack
- self.source = source
- self.stable = stable
- # Note(coreycb): this needs to be changed when new next branches come
- # out.
- self.current_next = "trusty"
-
- def get_logger(self, name="deployment-logger", level=logging.DEBUG):
- """Get a logger object that will log to stdout."""
- log = logging
- logger = log.getLogger(name)
- fmt = log.Formatter("%(asctime)s %(funcName)s "
- "%(levelname)s: %(message)s")
-
- handler = log.StreamHandler(stream=sys.stdout)
- handler.setLevel(level)
- handler.setFormatter(fmt)
-
- logger.addHandler(handler)
- logger.setLevel(level)
-
- return logger
-
- def _determine_branch_locations(self, other_services):
- """Determine the branch locations for the other services.
-
- Determine if the local branch being tested is derived from its
- stable or next (dev) branch, and based on this, use the corresonding
- stable or next branches for the other_services."""
-
- self.log.info('OpenStackAmuletDeployment: determine branch locations')
-
- # Charms outside the lp:~openstack-charmers namespace
- base_charms = ['mysql', 'mongodb', 'nrpe']
-
- # Force these charms to current series even when using an older series.
- # ie. Use trusty/nrpe even when series is precise, as the P charm
- # does not possess the necessary external master config and hooks.
- force_series_current = ['nrpe']
-
- if self.series in ['precise', 'trusty']:
- base_series = self.series
- else:
- base_series = self.current_next
-
- for svc in other_services:
- if svc['name'] in force_series_current:
- base_series = self.current_next
- # If a location has been explicitly set, use it
- if svc.get('location'):
- continue
- if self.stable:
- temp = 'lp:charms/{}/{}'
- svc['location'] = temp.format(base_series,
- svc['name'])
- else:
- if svc['name'] in base_charms:
- temp = 'lp:charms/{}/{}'
- svc['location'] = temp.format(base_series,
- svc['name'])
- else:
- temp = 'lp:~openstack-charmers/charms/{}/{}/next'
- svc['location'] = temp.format(self.current_next,
- svc['name'])
-
- return other_services
-
- def _add_services(self, this_service, other_services):
- """Add services to the deployment and set openstack-origin/source."""
- self.log.info('OpenStackAmuletDeployment: adding services')
-
- other_services = self._determine_branch_locations(other_services)
-
- super(OpenStackAmuletDeployment, self)._add_services(this_service,
- other_services)
-
- services = other_services
- services.append(this_service)
-
- # Charms which should use the source config option
- use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
- 'ceph-osd', 'ceph-radosgw', 'ceph-mon']
-
- # Charms which can not use openstack-origin, ie. many subordinates
- no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
- 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
- 'cinder-backup', 'nexentaedge-data',
- 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
- 'cinder-nexentaedge', 'nexentaedge-mgmt']
-
- if self.openstack:
- for svc in services:
- if svc['name'] not in use_source + no_origin:
- config = {'openstack-origin': self.openstack}
- self.d.configure(svc['name'], config)
-
- if self.source:
- for svc in services:
- if svc['name'] in use_source and svc['name'] not in no_origin:
- config = {'source': self.source}
- self.d.configure(svc['name'], config)
-
- def _configure_services(self, configs):
- """Configure all of the services."""
- self.log.info('OpenStackAmuletDeployment: configure services')
- for service, config in six.iteritems(configs):
- self.d.configure(service, config)
-
- def _auto_wait_for_status(self, message=None, exclude_services=None,
- include_only=None, timeout=1800):
- """Wait for all units to have a specific extended status, except
- for any defined as excluded. Unless specified via message, any
- status containing any case of 'ready' will be considered a match.
-
- Examples of message usage:
-
- Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
- message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
-
- Wait for all units to reach this status (exact match):
- message = re.compile('^Unit is ready and clustered$')
-
- Wait for all units to reach any one of these (exact match):
- message = re.compile('Unit is ready|OK|Ready')
-
- Wait for at least one unit to reach this status (exact match):
- message = {'ready'}
-
- See Amulet's sentry.wait_for_messages() for message usage detail.
- https://github.com/juju/amulet/blob/master/amulet/sentry.py
-
- :param message: Expected status match
- :param exclude_services: List of juju service names to ignore,
- not to be used in conjuction with include_only.
- :param include_only: List of juju service names to exclusively check,
- not to be used in conjuction with exclude_services.
- :param timeout: Maximum time in seconds to wait for status match
- :returns: None. Raises if timeout is hit.
- """
- self.log.info('Waiting for extended status on units...')
-
- all_services = self.d.services.keys()
-
- if exclude_services and include_only:
- raise ValueError('exclude_services can not be used '
- 'with include_only')
-
- if message:
- if isinstance(message, re._pattern_type):
- match = message.pattern
- else:
- match = message
-
- self.log.debug('Custom extended status wait match: '
- '{}'.format(match))
- else:
- self.log.debug('Default extended status wait match: contains '
- 'READY (case-insensitive)')
- message = re.compile('.*ready.*', re.IGNORECASE)
-
- if exclude_services:
- self.log.debug('Excluding services from extended status match: '
- '{}'.format(exclude_services))
- else:
- exclude_services = []
-
- if include_only:
- services = include_only
- else:
- services = list(set(all_services) - set(exclude_services))
-
- self.log.debug('Waiting up to {}s for extended status on services: '
- '{}'.format(timeout, services))
- service_messages = {service: message for service in services}
- self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
- self.log.info('OK')
-
- def _get_openstack_release(self):
- """Get openstack release.
-
- Return an integer representing the enum value of the openstack
- release.
- """
- # Must be ordered by OpenStack release (not by Ubuntu release):
- (self.precise_essex, self.precise_folsom, self.precise_grizzly,
- self.precise_havana, self.precise_icehouse,
- self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
- self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
- self.wily_liberty, self.trusty_mitaka,
- self.xenial_mitaka) = range(14)
-
- releases = {
- ('precise', None): self.precise_essex,
- ('precise', 'cloud:precise-folsom'): self.precise_folsom,
- ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
- ('precise', 'cloud:precise-havana'): self.precise_havana,
- ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
- ('trusty', None): self.trusty_icehouse,
- ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
- ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
- ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
- ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
- ('utopic', None): self.utopic_juno,
- ('vivid', None): self.vivid_kilo,
- ('wily', None): self.wily_liberty,
- ('xenial', None): self.xenial_mitaka}
- return releases[(self.series, self.openstack)]
-
- def _get_openstack_release_string(self):
- """Get openstack release string.
-
- Return a string representing the openstack release.
- """
- releases = OrderedDict([
- ('precise', 'essex'),
- ('quantal', 'folsom'),
- ('raring', 'grizzly'),
- ('saucy', 'havana'),
- ('trusty', 'icehouse'),
- ('utopic', 'juno'),
- ('vivid', 'kilo'),
- ('wily', 'liberty'),
- ('xenial', 'mitaka'),
- ])
- if self.openstack:
- os_origin = self.openstack.split(':')[1]
- return os_origin.split('%s-' % self.series)[1].split('/')[0]
- else:
- return releases[self.series]
-
- def get_ceph_expected_pools(self, radosgw=False):
- """Return a list of expected ceph pools in a ceph + cinder + glance
- test scenario, based on OpenStack release and whether ceph radosgw
- is flagged as present or not."""
-
- if self._get_openstack_release() >= self.trusty_kilo:
- # Kilo or later
- pools = [
- 'rbd',
- 'cinder',
- 'glance'
- ]
- else:
- # Juno or earlier
- pools = [
- 'data',
- 'metadata',
- 'rbd',
- 'cinder',
- 'glance'
- ]
-
- if radosgw:
- pools.extend([
- '.rgw.root',
- '.rgw.control',
- '.rgw',
- '.rgw.gc',
- '.users.uid'
- ])
-
- return pools
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/utils.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/utils.py
deleted file mode 100644
index ef3bdcc..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/utils.py
+++ /dev/null
@@ -1,1012 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import amulet
-import json
-import logging
-import os
-import re
-import six
-import time
-import urllib
-
-import cinderclient.v1.client as cinder_client
-import glanceclient.v1.client as glance_client
-import heatclient.v1.client as heat_client
-import keystoneclient.v2_0 as keystone_client
-from keystoneclient.auth.identity import v3 as keystone_id_v3
-from keystoneclient import session as keystone_session
-from keystoneclient.v3 import client as keystone_client_v3
-
-import novaclient.client as nova_client
-import pika
-import swiftclient
-
-from charmhelpers.contrib.amulet.utils import (
- AmuletUtils
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-NOVA_CLIENT_VERSION = "2"
-
-
-class OpenStackAmuletUtils(AmuletUtils):
- """OpenStack amulet utilities.
-
- This class inherits from AmuletUtils and has additional support
- that is specifically for use by OpenStack charm tests.
- """
-
- def __init__(self, log_level=ERROR):
- """Initialize the deployment environment."""
- super(OpenStackAmuletUtils, self).__init__(log_level)
-
- def validate_endpoint_data(self, endpoints, admin_port, internal_port,
- public_port, expected):
- """Validate endpoint data.
-
- Validate actual endpoint data vs expected endpoint data. The ports
- are used to find the matching endpoint.
- """
- self.log.debug('Validating endpoint data...')
- self.log.debug('actual: {}'.format(repr(endpoints)))
- found = False
- for ep in endpoints:
- self.log.debug('endpoint: {}'.format(repr(ep)))
- if (admin_port in ep.adminurl and
- internal_port in ep.internalurl and
- public_port in ep.publicurl):
- found = True
- actual = {'id': ep.id,
- 'region': ep.region,
- 'adminurl': ep.adminurl,
- 'internalurl': ep.internalurl,
- 'publicurl': ep.publicurl,
- 'service_id': ep.service_id}
- ret = self._validate_dict_data(expected, actual)
- if ret:
- return 'unexpected endpoint data - {}'.format(ret)
-
- if not found:
- return 'endpoint not found'
-
- def validate_svc_catalog_endpoint_data(self, expected, actual):
- """Validate service catalog endpoint data.
-
- Validate a list of actual service catalog endpoints vs a list of
- expected service catalog endpoints.
- """
- self.log.debug('Validating service catalog endpoint data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for k, v in six.iteritems(expected):
- if k in actual:
- ret = self._validate_dict_data(expected[k][0], actual[k][0])
- if ret:
- return self.endpoint_error(k, ret)
- else:
- return "endpoint {} does not exist".format(k)
- return ret
-
- def validate_tenant_data(self, expected, actual):
- """Validate tenant data.
-
- Validate a list of actual tenant data vs list of expected tenant
- data.
- """
- self.log.debug('Validating tenant data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'enabled': act.enabled, 'description': act.description,
- 'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected tenant data - {}".format(ret)
- if not found:
- return "tenant {} does not exist".format(e['name'])
- return ret
-
- def validate_role_data(self, expected, actual):
- """Validate role data.
-
- Validate a list of actual role data vs a list of expected role
- data.
- """
- self.log.debug('Validating role data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected role data - {}".format(ret)
- if not found:
- return "role {} does not exist".format(e['name'])
- return ret
-
- def validate_user_data(self, expected, actual, api_version=None):
- """Validate user data.
-
- Validate a list of actual user data vs a list of expected user
- data.
- """
- self.log.debug('Validating user data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- if e['name'] == act.name:
- a = {'enabled': act.enabled, 'name': act.name,
- 'email': act.email, 'id': act.id}
- if api_version == 3:
- a['default_project_id'] = getattr(act,
- 'default_project_id',
- 'none')
- else:
- a['tenantId'] = act.tenantId
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected user data - {}".format(ret)
- if not found:
- return "user {} does not exist".format(e['name'])
- return ret
-
- def validate_flavor_data(self, expected, actual):
- """Validate flavor data.
-
- Validate a list of actual flavors vs a list of expected flavors.
- """
- self.log.debug('Validating flavor data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- act = [a.name for a in actual]
- return self._validate_list_data(expected, act)
-
- def tenant_exists(self, keystone, tenant):
- """Return True if tenant exists."""
- self.log.debug('Checking if tenant exists ({})...'.format(tenant))
- return tenant in [t.name for t in keystone.tenants.list()]
-
- def authenticate_cinder_admin(self, keystone_sentry, username,
- password, tenant):
- """Authenticates admin user with cinder."""
- # NOTE(beisner): cinder python client doesn't accept tokens.
- service_ip = \
- keystone_sentry.relation('shared-db',
- 'mysql:shared-db')['private-address']
- ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
- return cinder_client.Client(username, password, tenant, ept)
-
- def authenticate_keystone_admin(self, keystone_sentry, user, password,
- tenant=None, api_version=None,
- keystone_ip=None):
- """Authenticates admin user with the keystone admin endpoint."""
- self.log.debug('Authenticating keystone admin...')
- unit = keystone_sentry
- if not keystone_ip:
- keystone_ip = unit.relation('shared-db',
- 'mysql:shared-db')['private-address']
- base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8'))
- if not api_version or api_version == 2:
- ep = base_ep + "/v2.0"
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
- else:
- ep = base_ep + "/v3"
- auth = keystone_id_v3.Password(
- user_domain_name='admin_domain',
- username=user,
- password=password,
- domain_name='admin_domain',
- auth_url=ep,
- )
- sess = keystone_session.Session(auth=auth)
- return keystone_client_v3.Client(session=sess)
-
- def authenticate_keystone_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with the keystone public endpoint."""
- self.log.debug('Authenticating keystone user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
-
- def authenticate_glance_admin(self, keystone):
- """Authenticates admin user with glance."""
- self.log.debug('Authenticating glance admin...')
- ep = keystone.service_catalog.url_for(service_type='image',
- endpoint_type='adminURL')
- return glance_client.Client(ep, token=keystone.auth_token)
-
- def authenticate_heat_admin(self, keystone):
- """Authenticates the admin user with heat."""
- self.log.debug('Authenticating heat admin...')
- ep = keystone.service_catalog.url_for(service_type='orchestration',
- endpoint_type='publicURL')
- return heat_client.Client(endpoint=ep, token=keystone.auth_token)
-
- def authenticate_nova_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with nova-api."""
- self.log.debug('Authenticating nova user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return nova_client.Client(NOVA_CLIENT_VERSION,
- username=user, api_key=password,
- project_id=tenant, auth_url=ep)
-
- def authenticate_swift_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with swift api."""
- self.log.debug('Authenticating swift user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return swiftclient.Connection(authurl=ep,
- user=user,
- key=password,
- tenant_name=tenant,
- auth_version='2.0')
-
- def create_cirros_image(self, glance, image_name):
- """Download the latest cirros image and upload it to glance,
- validate and return a resource pointer.
-
- :param glance: pointer to authenticated glance connection
- :param image_name: display name for new image
- :returns: glance image pointer
- """
- self.log.debug('Creating glance cirros image '
- '({})...'.format(image_name))
-
- # Download cirros image
- http_proxy = os.getenv('AMULET_HTTP_PROXY')
- self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
- if http_proxy:
- proxies = {'http': http_proxy}
- opener = urllib.FancyURLopener(proxies)
- else:
- opener = urllib.FancyURLopener()
-
- f = opener.open('http://download.cirros-cloud.net/version/released')
- version = f.read().strip()
- cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
- local_path = os.path.join('tests', cirros_img)
-
- if not os.path.exists(local_path):
- cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
- version, cirros_img)
- opener.retrieve(cirros_url, local_path)
- f.close()
-
- # Create glance image
- with open(local_path) as f:
- image = glance.images.create(name=image_name, is_public=True,
- disk_format='qcow2',
- container_format='bare', data=f)
-
- # Wait for image to reach active status
- img_id = image.id
- ret = self.resource_reaches_status(glance.images, img_id,
- expected_stat='active',
- msg='Image status wait')
- if not ret:
- msg = 'Glance image failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new image
- self.log.debug('Validating image attributes...')
- val_img_name = glance.images.get(img_id).name
- val_img_stat = glance.images.get(img_id).status
- val_img_pub = glance.images.get(img_id).is_public
- val_img_cfmt = glance.images.get(img_id).container_format
- val_img_dfmt = glance.images.get(img_id).disk_format
- msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
- 'container fmt:{} disk fmt:{}'.format(
- val_img_name, val_img_pub, img_id,
- val_img_stat, val_img_cfmt, val_img_dfmt))
-
- if val_img_name == image_name and val_img_stat == 'active' \
- and val_img_pub is True and val_img_cfmt == 'bare' \
- and val_img_dfmt == 'qcow2':
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return image
-
- def delete_image(self, glance, image):
- """Delete the specified image."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_image.')
- self.log.debug('Deleting glance image ({})...'.format(image))
- return self.delete_resource(glance.images, image, msg='glance image')
-
- def create_instance(self, nova, image_name, instance_name, flavor):
- """Create the specified instance."""
- self.log.debug('Creating instance '
- '({}|{}|{})'.format(instance_name, image_name, flavor))
- image = nova.images.find(name=image_name)
- flavor = nova.flavors.find(name=flavor)
- instance = nova.servers.create(name=instance_name, image=image,
- flavor=flavor)
-
- count = 1
- status = instance.status
- while status != 'ACTIVE' and count < 60:
- time.sleep(3)
- instance = nova.servers.get(instance.id)
- status = instance.status
- self.log.debug('instance status: {}'.format(status))
- count += 1
-
- if status != 'ACTIVE':
- self.log.error('instance creation timed out')
- return None
-
- return instance
-
- def delete_instance(self, nova, instance):
- """Delete the specified instance."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_instance.')
- self.log.debug('Deleting instance ({})...'.format(instance))
- return self.delete_resource(nova.servers, instance,
- msg='nova instance')
-
- def create_or_get_keypair(self, nova, keypair_name="testkey"):
- """Create a new keypair, or return pointer if it already exists."""
- try:
- _keypair = nova.keypairs.get(keypair_name)
- self.log.debug('Keypair ({}) already exists, '
- 'using it.'.format(keypair_name))
- return _keypair
- except:
- self.log.debug('Keypair ({}) does not exist, '
- 'creating it.'.format(keypair_name))
-
- _keypair = nova.keypairs.create(name=keypair_name)
- return _keypair
-
- def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
- img_id=None, src_vol_id=None, snap_id=None):
- """Create cinder volume, optionally from a glance image, OR
- optionally as a clone of an existing volume, OR optionally
- from a snapshot. Wait for the new volume status to reach
- the expected status, validate and return a resource pointer.
-
- :param vol_name: cinder volume display name
- :param vol_size: size in gigabytes
- :param img_id: optional glance image id
- :param src_vol_id: optional source volume id to clone
- :param snap_id: optional snapshot id to use
- :returns: cinder volume pointer
- """
- # Handle parameter input and avoid impossible combinations
- if img_id and not src_vol_id and not snap_id:
- # Create volume from image
- self.log.debug('Creating cinder volume from glance image...')
- bootable = 'true'
- elif src_vol_id and not img_id and not snap_id:
- # Clone an existing volume
- self.log.debug('Cloning cinder volume...')
- bootable = cinder.volumes.get(src_vol_id).bootable
- elif snap_id and not src_vol_id and not img_id:
- # Create volume from snapshot
- self.log.debug('Creating cinder volume from snapshot...')
- snap = cinder.volume_snapshots.find(id=snap_id)
- vol_size = snap.size
- snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
- bootable = cinder.volumes.get(snap_vol_id).bootable
- elif not img_id and not src_vol_id and not snap_id:
- # Create volume
- self.log.debug('Creating cinder volume...')
- bootable = 'false'
- else:
- # Impossible combination of parameters
- msg = ('Invalid method use - name:{} size:{} img_id:{} '
- 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
- img_id, src_vol_id,
- snap_id))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Create new volume
- try:
- vol_new = cinder.volumes.create(display_name=vol_name,
- imageRef=img_id,
- size=vol_size,
- source_volid=src_vol_id,
- snapshot_id=snap_id)
- vol_id = vol_new.id
- except Exception as e:
- msg = 'Failed to create volume: {}'.format(e)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Wait for volume to reach available status
- ret = self.resource_reaches_status(cinder.volumes, vol_id,
- expected_stat="available",
- msg="Volume status wait")
- if not ret:
- msg = 'Cinder volume failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new volume
- self.log.debug('Validating volume attributes...')
- val_vol_name = cinder.volumes.get(vol_id).display_name
- val_vol_boot = cinder.volumes.get(vol_id).bootable
- val_vol_stat = cinder.volumes.get(vol_id).status
- val_vol_size = cinder.volumes.get(vol_id).size
- msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
- '{} size:{}'.format(val_vol_name, vol_id,
- val_vol_stat, val_vol_boot,
- val_vol_size))
-
- if val_vol_boot == bootable and val_vol_stat == 'available' \
- and val_vol_name == vol_name and val_vol_size == vol_size:
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return vol_new
-
- def delete_resource(self, resource, resource_id,
- msg="resource", max_wait=120):
- """Delete one openstack resource, such as one instance, keypair,
- image, volume, stack, etc., and confirm deletion within max wait time.
-
- :param resource: pointer to os resource type, ex:glance_client.images
- :param resource_id: unique name or id for the openstack resource
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, otherwise False
- """
- self.log.debug('Deleting OpenStack resource '
- '{} ({})'.format(resource_id, msg))
- num_before = len(list(resource.list()))
- resource.delete(resource_id)
-
- tries = 0
- num_after = len(list(resource.list()))
- while num_after != (num_before - 1) and tries < (max_wait / 4):
- self.log.debug('{} delete check: '
- '{} [{}:{}] {}'.format(msg, tries,
- num_before,
- num_after,
- resource_id))
- time.sleep(4)
- num_after = len(list(resource.list()))
- tries += 1
-
- self.log.debug('{}: expected, actual count = {}, '
- '{}'.format(msg, num_before - 1, num_after))
-
- if num_after == (num_before - 1):
- return True
- else:
- self.log.error('{} delete timed out'.format(msg))
- return False
-
- def resource_reaches_status(self, resource, resource_id,
- expected_stat='available',
- msg='resource', max_wait=120):
- """Wait for an openstack resources status to reach an
- expected status within a specified time. Useful to confirm that
- nova instances, cinder vols, snapshots, glance images, heat stacks
- and other resources eventually reach the expected status.
-
- :param resource: pointer to os resource type, ex: heat_client.stacks
- :param resource_id: unique id for the openstack resource
- :param expected_stat: status to expect resource to reach
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, False if status is not reached
- """
-
- tries = 0
- resource_stat = resource.get(resource_id).status
- while resource_stat != expected_stat and tries < (max_wait / 4):
- self.log.debug('{} status check: '
- '{} [{}:{}] {}'.format(msg, tries,
- resource_stat,
- expected_stat,
- resource_id))
- time.sleep(4)
- resource_stat = resource.get(resource_id).status
- tries += 1
-
- self.log.debug('{}: expected, actual status = {}, '
- '{}'.format(msg, resource_stat, expected_stat))
-
- if resource_stat == expected_stat:
- return True
- else:
- self.log.debug('{} never reached expected status: '
- '{}'.format(resource_id, expected_stat))
- return False
-
- def get_ceph_osd_id_cmd(self, index):
- """Produce a shell command that will return a ceph-osd id."""
- return ("`initctl list | grep 'ceph-osd ' | "
- "awk 'NR=={} {{ print $2 }}' | "
- "grep -o '[0-9]*'`".format(index + 1))
-
- def get_ceph_pools(self, sentry_unit):
- """Return a dict of ceph pools from a single ceph unit, with
- pool name as keys, pool id as vals."""
- pools = {}
- cmd = 'sudo ceph osd lspools'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
- for pool in str(output).split(','):
- pool_id_name = pool.split(' ')
- if len(pool_id_name) == 2:
- pool_id = pool_id_name[0]
- pool_name = pool_id_name[1]
- pools[pool_name] = int(pool_id)
-
- self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
- pools))
- return pools
-
- def get_ceph_df(self, sentry_unit):
- """Return dict of ceph df json output, including ceph pool state.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :returns: Dict of ceph df output
- """
- cmd = 'sudo ceph df --format=json'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return json.loads(output)
-
- def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
- """Take a sample of attributes of a ceph pool, returning ceph
- pool name, object count and disk space used for the specified
- pool ID number.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :param pool_id: Ceph pool ID
- :returns: List of pool name, object count, kb disk space used
- """
- df = self.get_ceph_df(sentry_unit)
- pool_name = df['pools'][pool_id]['name']
- obj_count = df['pools'][pool_id]['stats']['objects']
- kb_used = df['pools'][pool_id]['stats']['kb_used']
- self.log.debug('Ceph {} pool (ID {}): {} objects, '
- '{} kb used'.format(pool_name, pool_id,
- obj_count, kb_used))
- return pool_name, obj_count, kb_used
-
- def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
- """Validate ceph pool samples taken over time, such as pool
- object counts or pool kb used, before adding, after adding, and
- after deleting items which affect those pool attributes. The
- 2nd element is expected to be greater than the 1st; 3rd is expected
- to be less than the 2nd.
-
- :param samples: List containing 3 data samples
- :param sample_type: String for logging and usage context
- :returns: None if successful, Failure message otherwise
- """
- original, created, deleted = range(3)
- if samples[created] <= samples[original] or \
- samples[deleted] >= samples[created]:
- return ('Ceph {} samples ({}) '
- 'unexpected.'.format(sample_type, samples))
- else:
- self.log.debug('Ceph {} samples (OK): '
- '{}'.format(sample_type, samples))
- return None
-
- # rabbitmq/amqp specific helpers:
-
- def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
- """Wait for rmq units extended status to show cluster readiness,
- after an optional initial sleep period. Initial sleep is likely
- necessary to be effective following a config change, as status
- message may not instantly update to non-ready."""
-
- if init_sleep:
- time.sleep(init_sleep)
-
- message = re.compile('^Unit is ready and clustered$')
- deployment._auto_wait_for_status(message=message,
- timeout=timeout,
- include_only=['rabbitmq-server'])
-
- def add_rmq_test_user(self, sentry_units,
- username="testuser1", password="changeme"):
- """Add a test user via the first rmq juju unit, check connection as
- the new user against all sentry units.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Adding rmq user ({})...'.format(username))
-
- # Check that user does not already exist
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
- if username in output:
- self.log.warning('User ({}) already exists, returning '
- 'gracefully.'.format(username))
- return
-
- perms = '".*" ".*" ".*"'
- cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
- 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
-
- # Add user via first unit
- for cmd in cmds:
- output, _ = self.run_cmd_unit(sentry_units[0], cmd)
-
- # Check connection against the other sentry_units
- self.log.debug('Checking user connect against units...')
- for sentry_unit in sentry_units:
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
- username=username,
- password=password)
- connection.close()
-
- def delete_rmq_test_user(self, sentry_units, username="testuser1"):
- """Delete a rabbitmq user via the first rmq juju unit.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful or no such user.
- """
- self.log.debug('Deleting rmq user ({})...'.format(username))
-
- # Check that the user exists
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
-
- if username not in output:
- self.log.warning('User ({}) does not exist, returning '
- 'gracefully.'.format(username))
- return
-
- # Delete the user
- cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
-
- def get_rmq_cluster_status(self, sentry_unit):
- """Execute rabbitmq cluster status command on a unit and return
- the full output.
-
- :param unit: sentry unit
- :returns: String containing console output of cluster status command
- """
- cmd = 'rabbitmqctl cluster_status'
- output, _ = self.run_cmd_unit(sentry_unit, cmd)
- self.log.debug('{} cluster_status:\n{}'.format(
- sentry_unit.info['unit_name'], output))
- return str(output)
-
- def get_rmq_cluster_running_nodes(self, sentry_unit):
- """Parse rabbitmqctl cluster_status output string, return list of
- running rabbitmq cluster nodes.
-
- :param unit: sentry unit
- :returns: List containing node names of running nodes
- """
- # NOTE(beisner): rabbitmqctl cluster_status output is not
- # json-parsable, do string chop foo, then json.loads that.
- str_stat = self.get_rmq_cluster_status(sentry_unit)
- if 'running_nodes' in str_stat:
- pos_start = str_stat.find("{running_nodes,") + 15
- pos_end = str_stat.find("]},", pos_start) + 1
- str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
- run_nodes = json.loads(str_run_nodes)
- return run_nodes
- else:
- return []
-
- def validate_rmq_cluster_running_nodes(self, sentry_units):
- """Check that all rmq unit hostnames are represented in the
- cluster_status output of all units.
-
- :param host_names: dict of juju unit names to host names
- :param units: list of sentry unit pointers (all rmq units)
- :returns: None if successful, otherwise return error message
- """
- host_names = self.get_unit_hostnames(sentry_units)
- errors = []
-
- # Query every unit for cluster_status running nodes
- for query_unit in sentry_units:
- query_unit_name = query_unit.info['unit_name']
- running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
-
- # Confirm that every unit is represented in the queried unit's
- # cluster_status running nodes output.
- for validate_unit in sentry_units:
- val_host_name = host_names[validate_unit.info['unit_name']]
- val_node_name = 'rabbit@{}'.format(val_host_name)
-
- if val_node_name not in running_nodes:
- errors.append('Cluster member check failed on {}: {} not '
- 'in {}\n'.format(query_unit_name,
- val_node_name,
- running_nodes))
- if errors:
- return ''.join(errors)
-
- def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
- """Check a single juju rmq unit for ssl and port in the config file."""
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- conf_file = '/etc/rabbitmq/rabbitmq.config'
- conf_contents = str(self.file_contents_safe(sentry_unit,
- conf_file, max_wait=16))
- # Checks
- conf_ssl = 'ssl' in conf_contents
- conf_port = str(port) in conf_contents
-
- # Port explicitly checked in config
- if port and conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif port and not conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{} but not on port {} '
- '({})'.format(host, port, unit_name))
- return False
- # Port not checked (useful when checking that ssl is disabled)
- elif not port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif not conf_ssl:
- self.log.debug('SSL not enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return False
- else:
- msg = ('Unknown condition when checking SSL status @{}:{} '
- '({})'.format(host, port, unit_name))
- amulet.raise_status(amulet.FAIL, msg)
-
- def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
- """Check that ssl is enabled on rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :param port: optional ssl port override to validate
- :returns: None if successful, otherwise return error message
- """
- for sentry_unit in sentry_units:
- if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
- return ('Unexpected condition: ssl is disabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def validate_rmq_ssl_disabled_units(self, sentry_units):
- """Check that ssl is enabled on listed rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :returns: True if successful. Raise on error.
- """
- for sentry_unit in sentry_units:
- if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
- return ('Unexpected condition: ssl is enabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def configure_rmq_ssl_on(self, sentry_units, deployment,
- port=None, max_wait=60):
- """Turn ssl charm config option on, with optional non-default
- ssl port specification. Confirm that it is enabled on every
- unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param port: amqp port, use defaults if None
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: on')
-
- # Enable RMQ SSL
- config = {'ssl': 'on'}
- if port:
- config['ssl_port'] = port
-
- deployment.d.configure('rabbitmq-server', config)
-
- # Wait for unit status
- self.rmq_wait_for_cluster(deployment)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
- """Turn ssl charm config option off, confirm that it is disabled
- on every unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: off')
-
- # Disable RMQ SSL
- config = {'ssl': 'off'}
- deployment.d.configure('rabbitmq-server', config)
-
- # Wait for unit status
- self.rmq_wait_for_cluster(deployment)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def connect_amqp_by_unit(self, sentry_unit, ssl=False,
- port=None, fatal=True,
- username="testuser1", password="changeme"):
- """Establish and return a pika amqp connection to the rabbitmq service
- running on a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :param fatal: boolean, default to True (raises on connect error)
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: pika amqp connection pointer or None if failed and non-fatal
- """
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- # Default port logic if port is not specified
- if ssl and not port:
- port = 5671
- elif not ssl and not port:
- port = 5672
-
- self.log.debug('Connecting to amqp on {}:{} ({}) as '
- '{}...'.format(host, port, unit_name, username))
-
- try:
- credentials = pika.PlainCredentials(username, password)
- parameters = pika.ConnectionParameters(host=host, port=port,
- credentials=credentials,
- ssl=ssl,
- connection_attempts=3,
- retry_delay=5,
- socket_timeout=1)
- connection = pika.BlockingConnection(parameters)
- assert connection.server_properties['product'] == 'RabbitMQ'
- self.log.debug('Connect OK')
- return connection
- except Exception as e:
- msg = ('amqp connection failed to {}:{} as '
- '{} ({})'.format(host, port, username, str(e)))
- if fatal:
- amulet.raise_status(amulet.FAIL, msg)
- else:
- self.log.warn(msg)
- return None
-
- def publish_amqp_message_by_unit(self, sentry_unit, message,
- queue="test", ssl=False,
- username="testuser1",
- password="changeme",
- port=None):
- """Publish an amqp message to a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param message: amqp message string
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: None. Raises exception if publish failed.
- """
- self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
- message))
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
-
- # NOTE(beisner): extra debug here re: pika hang potential:
- # https://github.com/pika/pika/issues/297
- # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
- self.log.debug('Defining channel...')
- channel = connection.channel()
- self.log.debug('Declaring queue...')
- channel.queue_declare(queue=queue, auto_delete=False, durable=True)
- self.log.debug('Publishing message...')
- channel.basic_publish(exchange='', routing_key=queue, body=message)
- self.log.debug('Closing channel...')
- channel.close()
- self.log.debug('Closing connection...')
- connection.close()
-
- def get_amqp_message_by_unit(self, sentry_unit, queue="test",
- username="testuser1",
- password="changeme",
- ssl=False, port=None):
- """Get an amqp message from a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: amqp message body as string. Raise if get fails.
- """
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
- channel = connection.channel()
- method_frame, _, body = channel.basic_get(queue)
-
- if method_frame:
- self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
- body))
- channel.basic_ack(method_frame.delivery_tag)
- channel.close()
- connection.close()
- return body
- else:
- msg = 'No message retrieved.'
- amulet.raise_status(amulet.FAIL, msg)
diff --git a/charms/trusty/ceilometer/tests/setup/00-setup b/charms/trusty/ceilometer/tests/setup/00-setup
deleted file mode 100755
index 658eb60..0000000
--- a/charms/trusty/ceilometer/tests/setup/00-setup
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-sudo add-apt-repository --yes ppa:juju/stable
-sudo apt-get update --yes
-sudo apt-get install --yes amulet \
- distro-info-data \
- python-ceilometerclient \
- python-cinderclient \
- python-distro-info \
- python-glanceclient \
- python-heatclient \
- python-keystoneclient \
- python-neutronclient \
- python-novaclient \
- python-pika \
- python-swiftclient
diff --git a/charms/trusty/ceilometer/tests/tests.yaml b/charms/trusty/ceilometer/tests/tests.yaml
deleted file mode 100644
index 4d17631..0000000
--- a/charms/trusty/ceilometer/tests/tests.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-bootstrap: true
-reset: false
-virtualenv: true
-makefile:
- - lint
- - test
-sources:
- - ppa:juju/stable
-packages:
- - amulet
- - distro-info-data
- - python-ceilometerclient
- - python-cinderclient
- - python-distro-info
- - python-glanceclient
- - python-heatclient
- - python-keystoneclient
- - python-neutronclient
- - python-novaclient
- - python-pika
- - python-swiftclient