aboutsummaryrefslogtreecommitdiffstats
path: root/charms/trusty/ceilometer
diff options
context:
space:
mode:
Diffstat (limited to 'charms/trusty/ceilometer')
-rw-r--r--charms/trusty/ceilometer/.bzr/README3
-rw-r--r--charms/trusty/ceilometer/.bzr/branch-format1
-rw-r--r--charms/trusty/ceilometer/.bzr/branch/format1
-rw-r--r--charms/trusty/ceilometer/.bzr/branch/location1
-rw-r--r--charms/trusty/ceilometer/.bzr/checkout/conflicts1
-rw-r--r--charms/trusty/ceilometer/.bzr/checkout/dirstatebin70502 -> 0 bytes
-rw-r--r--charms/trusty/ceilometer/.bzr/checkout/format1
-rw-r--r--charms/trusty/ceilometer/.bzr/checkout/views0
-rw-r--r--charms/trusty/ceilometer/.coveragerc6
-rw-r--r--charms/trusty/ceilometer/.gitignore7
-rw-r--r--charms/trusty/ceilometer/.gitreview5
-rw-r--r--charms/trusty/ceilometer/.project17
-rw-r--r--charms/trusty/ceilometer/.pydevproject9
-rw-r--r--charms/trusty/ceilometer/.testr.conf8
-rw-r--r--charms/trusty/ceilometer/Makefile31
-rw-r--r--charms/trusty/ceilometer/README.md64
-rw-r--r--charms/trusty/ceilometer/actions.yaml6
-rwxr-xr-xcharms/trusty/ceilometer/actions/actions.py48
l---------charms/trusty/ceilometer/actions/ceilometer_contexts.py1
l---------charms/trusty/ceilometer/actions/ceilometer_hooks.py1
l---------charms/trusty/ceilometer/actions/ceilometer_utils.py1
l---------charms/trusty/ceilometer/actions/charmhelpers1
l---------charms/trusty/ceilometer/actions/openstack-upgrade1
-rwxr-xr-xcharms/trusty/ceilometer/actions/openstack_upgrade.py37
l---------charms/trusty/ceilometer/actions/pause1
l---------charms/trusty/ceilometer/actions/resume1
-rw-r--r--charms/trusty/ceilometer/charm-helpers-hooks.yaml15
-rw-r--r--charms/trusty/ceilometer/charm-helpers-tests.yaml5
-rw-r--r--charms/trusty/ceilometer/charmhelpers/__init__.py38
-rw-r--r--charms/trusty/ceilometer/charmhelpers/cli/__init__.py191
-rw-r--r--charms/trusty/ceilometer/charmhelpers/cli/benchmark.py36
-rw-r--r--charms/trusty/ceilometer/charmhelpers/cli/commands.py32
-rw-r--r--charms/trusty/ceilometer/charmhelpers/cli/hookenv.py23
-rw-r--r--charms/trusty/ceilometer/charmhelpers/cli/host.py31
-rw-r--r--charms/trusty/ceilometer/charmhelpers/cli/unitdata.py39
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/nrpe.py398
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/volumes.py175
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/apache.py82
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/cluster.py316
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/README.hardening.md38
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/__init__.py19
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/__init__.py31
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/config.py100
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/__init__.py0
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/alias.conf31
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/hardening.conf18
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/__init__.py63
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apache.py100
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apt.py105
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/file.py552
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/__init__.py0
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml13
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml.schema9
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml38
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml67
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml.schema42
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml49
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema42
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/harden.py84
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/__init__.py19
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/__init__.py50
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/apt.py39
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/limits.py55
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/login.py67
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/minimize_access.py52
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/pam.py134
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/profile.py45
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/securetty.py39
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/suid_sgid.py131
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/sysctl.py211
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf8
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf7
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/__init__.py0
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/login.defs349
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/modules117
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/passwdqc.conf11
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh8
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/securetty11
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/tally214
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/__init__.py19
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/__init__.py31
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/config.py89
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/__init__.py0
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf12
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/__init__.py19
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/__init__.py31
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/config.py394
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/__init__.py0
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/ssh_config70
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/sshd_config159
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/templating.py71
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/utils.py157
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/network/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/network/ip.py499
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/alternatives.py33
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/deployment.py304
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/utils.py1012
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/context.py1583
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/__init__.py18
-rwxr-xr-xcharms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy.sh34
-rwxr-xr-xcharms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh30
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/ip.py179
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/neutron.py384
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/__init__.py18
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/ceph.conf21
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/git.upstart17
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/haproxy.cfg66
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend26
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf26
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken12
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy10
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka12
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo22
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-zeromq14
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templating.py323
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/utils.py1576
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/peerstorage/__init__.py269
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/python/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/python/packages.py145
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/storage/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/ceph.py1206
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/loopback.py88
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/lvm.py105
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/utils.py71
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/decorators.py57
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/files.py45
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/fstab.py134
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/hookenv.py1009
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/host.py714
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/hugepage.py71
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/kernel.py68
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/services/__init__.py18
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/services/base.py353
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/services/helpers.py292
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/strutils.py72
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/sysctl.py56
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/templating.py81
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/unitdata.py521
-rw-r--r--charms/trusty/ceilometer/charmhelpers/fetch/__init__.py464
-rw-r--r--charms/trusty/ceilometer/charmhelpers/fetch/archiveurl.py167
-rw-r--r--charms/trusty/ceilometer/charmhelpers/fetch/bzrurl.py68
-rw-r--r--charms/trusty/ceilometer/charmhelpers/fetch/giturl.py70
-rw-r--r--charms/trusty/ceilometer/charmhelpers/payload/__init__.py17
-rw-r--r--charms/trusty/ceilometer/charmhelpers/payload/execd.py66
-rw-r--r--charms/trusty/ceilometer/config.yaml194
-rw-r--r--charms/trusty/ceilometer/copyright32
-rw-r--r--charms/trusty/ceilometer/hardening.yaml5
l---------charms/trusty/ceilometer/hooks/amqp-relation-changed1
l---------charms/trusty/ceilometer/hooks/amqp-relation-departed1
l---------charms/trusty/ceilometer/hooks/amqp-relation-joined1
l---------charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-broken1
l---------charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-changed1
l---------charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-departed1
l---------charms/trusty/ceilometer/hooks/ceilometer-service-relation-joined1
l---------charms/trusty/ceilometer/hooks/ceilometer_contexts.py1
-rwxr-xr-xcharms/trusty/ceilometer/hooks/ceilometer_hooks.py366
l---------charms/trusty/ceilometer/hooks/ceilometer_utils.py1
l---------charms/trusty/ceilometer/hooks/charmhelpers1
l---------charms/trusty/ceilometer/hooks/cluster-relation-changed1
l---------charms/trusty/ceilometer/hooks/cluster-relation-departed1
l---------charms/trusty/ceilometer/hooks/cluster-relation-joined1
l---------charms/trusty/ceilometer/hooks/config-changed1
l---------charms/trusty/ceilometer/hooks/ha-relation-changed1
l---------charms/trusty/ceilometer/hooks/ha-relation-joined1
l---------charms/trusty/ceilometer/hooks/identity-notifications-relation-changed1
l---------charms/trusty/ceilometer/hooks/identity-service-relation-changed1
l---------charms/trusty/ceilometer/hooks/identity-service-relation-joined1
-rwxr-xr-xcharms/trusty/ceilometer/hooks/install20
l---------charms/trusty/ceilometer/hooks/install.real1
l---------charms/trusty/ceilometer/hooks/nrpe-external-master-relation-changed1
l---------charms/trusty/ceilometer/hooks/nrpe-external-master-relation-joined1
l---------charms/trusty/ceilometer/hooks/shared-db-relation-changed1
l---------charms/trusty/ceilometer/hooks/shared-db-relation-departed1
l---------charms/trusty/ceilometer/hooks/shared-db-relation-joined1
l---------charms/trusty/ceilometer/hooks/start1
l---------charms/trusty/ceilometer/hooks/stop1
l---------charms/trusty/ceilometer/hooks/update-status1
l---------charms/trusty/ceilometer/hooks/upgrade-charm1
-rw-r--r--charms/trusty/ceilometer/icon.svg717
-rw-r--r--charms/trusty/ceilometer/lib/ceilometer_contexts.py122
-rw-r--r--charms/trusty/ceilometer/lib/ceilometer_utils.py391
-rw-r--r--charms/trusty/ceilometer/metadata.yaml42
-rwxr-xr-xcharms/trusty/ceilometer/ocf/openstack/ceilometer-agent-central345
-rw-r--r--charms/trusty/ceilometer/requirements.txt11
-rw-r--r--charms/trusty/ceilometer/revision1
-rw-r--r--charms/trusty/ceilometer/setup.cfg6
-rw-r--r--charms/trusty/ceilometer/templates/icehouse/ceilometer.conf42
-rw-r--r--charms/trusty/ceilometer/templates/kilo/ceilometer.conf43
-rw-r--r--charms/trusty/ceilometer/templates/mitaka/ceilometer.conf42
-rw-r--r--charms/trusty/ceilometer/templates/parts/rabbitmq21
-rw-r--r--charms/trusty/ceilometer/test-requirements.txt9
-rwxr-xr-xcharms/trusty/ceilometer/tests/014-basic-precise-icehouse11
-rwxr-xr-xcharms/trusty/ceilometer/tests/015-basic-trusty-icehouse9
-rwxr-xr-xcharms/trusty/ceilometer/tests/016-basic-trusty-juno11
-rwxr-xr-xcharms/trusty/ceilometer/tests/017-basic-trusty-kilo11
-rwxr-xr-xcharms/trusty/ceilometer/tests/018-basic-trusty-liberty11
-rwxr-xr-xcharms/trusty/ceilometer/tests/019-basic-trusty-mitaka11
-rwxr-xr-xcharms/trusty/ceilometer/tests/020-basic-wily-liberty9
-rwxr-xr-xcharms/trusty/ceilometer/tests/021-basic-xenial-mitaka9
-rw-r--r--charms/trusty/ceilometer/tests/README113
-rw-r--r--charms/trusty/ceilometer/tests/basic_deployment.py664
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/__init__.py38
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/__init__.py15
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/__init__.py15
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/deployment.py95
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/utils.py829
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/__init__.py15
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/__init__.py15
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/deployment.py304
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/utils.py1012
-rwxr-xr-xcharms/trusty/ceilometer/tests/setup/00-setup18
-rw-r--r--charms/trusty/ceilometer/tests/tests.yaml21
-rw-r--r--charms/trusty/ceilometer/tox.ini29
-rw-r--r--charms/trusty/ceilometer/unit_tests/__init__.py3
-rw-r--r--charms/trusty/ceilometer/unit_tests/test_actions.py64
-rw-r--r--charms/trusty/ceilometer/unit_tests/test_actions_openstack_upgrade.py67
-rw-r--r--charms/trusty/ceilometer/unit_tests/test_ceilometer_contexts.py164
-rw-r--r--charms/trusty/ceilometer/unit_tests/test_ceilometer_hooks.py370
-rw-r--r--charms/trusty/ceilometer/unit_tests/test_ceilometer_utils.py180
-rw-r--r--charms/trusty/ceilometer/unit_tests/test_utils.py111
229 files changed, 0 insertions, 24742 deletions
diff --git a/charms/trusty/ceilometer/.bzr/README b/charms/trusty/ceilometer/.bzr/README
deleted file mode 100644
index f82dc1c..0000000
--- a/charms/trusty/ceilometer/.bzr/README
+++ /dev/null
@@ -1,3 +0,0 @@
-This is a Bazaar control directory.
-Do not change any files in this directory.
-See http://bazaar.canonical.com/ for more information about Bazaar.
diff --git a/charms/trusty/ceilometer/.bzr/branch-format b/charms/trusty/ceilometer/.bzr/branch-format
deleted file mode 100644
index 9eb09b7..0000000
--- a/charms/trusty/ceilometer/.bzr/branch-format
+++ /dev/null
@@ -1 +0,0 @@
-Bazaar-NG meta directory, format 1
diff --git a/charms/trusty/ceilometer/.bzr/branch/format b/charms/trusty/ceilometer/.bzr/branch/format
deleted file mode 100644
index b391ffd..0000000
--- a/charms/trusty/ceilometer/.bzr/branch/format
+++ /dev/null
@@ -1 +0,0 @@
-Bazaar-NG Branch Reference Format 1
diff --git a/charms/trusty/ceilometer/.bzr/branch/location b/charms/trusty/ceilometer/.bzr/branch/location
deleted file mode 100644
index a5d57cb..0000000
--- a/charms/trusty/ceilometer/.bzr/branch/location
+++ /dev/null
@@ -1 +0,0 @@
-bzr+ssh://bazaar.launchpad.net/~sdn-charmers/charms/trusty/ceilometer/ceilometer-plugin/ \ No newline at end of file
diff --git a/charms/trusty/ceilometer/.bzr/checkout/conflicts b/charms/trusty/ceilometer/.bzr/checkout/conflicts
deleted file mode 100644
index 0dc2d3a..0000000
--- a/charms/trusty/ceilometer/.bzr/checkout/conflicts
+++ /dev/null
@@ -1 +0,0 @@
-BZR conflict list format 1
diff --git a/charms/trusty/ceilometer/.bzr/checkout/dirstate b/charms/trusty/ceilometer/.bzr/checkout/dirstate
deleted file mode 100644
index 852f177..0000000
--- a/charms/trusty/ceilometer/.bzr/checkout/dirstate
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/ceilometer/.bzr/checkout/format b/charms/trusty/ceilometer/.bzr/checkout/format
deleted file mode 100644
index e0261c7..0000000
--- a/charms/trusty/ceilometer/.bzr/checkout/format
+++ /dev/null
@@ -1 +0,0 @@
-Bazaar Working Tree Format 6 (bzr 1.14)
diff --git a/charms/trusty/ceilometer/.bzr/checkout/views b/charms/trusty/ceilometer/.bzr/checkout/views
deleted file mode 100644
index e69de29..0000000
--- a/charms/trusty/ceilometer/.bzr/checkout/views
+++ /dev/null
diff --git a/charms/trusty/ceilometer/.coveragerc b/charms/trusty/ceilometer/.coveragerc
deleted file mode 100644
index ed08ec9..0000000
--- a/charms/trusty/ceilometer/.coveragerc
+++ /dev/null
@@ -1,6 +0,0 @@
-[report]
-# Regexes for lines to exclude from consideration
-exclude_lines =
- if __name__ == .__main__.:
-include=
- hooks/ceilometer_*
diff --git a/charms/trusty/ceilometer/.gitignore b/charms/trusty/ceilometer/.gitignore
deleted file mode 100644
index 25d8aec..0000000
--- a/charms/trusty/ceilometer/.gitignore
+++ /dev/null
@@ -1,7 +0,0 @@
-bin
-.coverage
-.testrepository
-.tox
-tags
-*.sw[nop]
-*.pyc
diff --git a/charms/trusty/ceilometer/.gitreview b/charms/trusty/ceilometer/.gitreview
deleted file mode 100644
index 7ff19b3..0000000
--- a/charms/trusty/ceilometer/.gitreview
+++ /dev/null
@@ -1,5 +0,0 @@
-[gerrit]
-host=review.openstack.org
-port=29418
-project=openstack/charm-ceilometer.git
-defaultbranch=stable/16.04
diff --git a/charms/trusty/ceilometer/.project b/charms/trusty/ceilometer/.project
deleted file mode 100644
index 9e30b38..0000000
--- a/charms/trusty/ceilometer/.project
+++ /dev/null
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<projectDescription>
- <name>ceilometer</name>
- <comment></comment>
- <projects>
- </projects>
- <buildSpec>
- <buildCommand>
- <name>org.python.pydev.PyDevBuilder</name>
- <arguments>
- </arguments>
- </buildCommand>
- </buildSpec>
- <natures>
- <nature>org.python.pydev.pythonNature</nature>
- </natures>
-</projectDescription>
diff --git a/charms/trusty/ceilometer/.pydevproject b/charms/trusty/ceilometer/.pydevproject
deleted file mode 100644
index a338b81..0000000
--- a/charms/trusty/ceilometer/.pydevproject
+++ /dev/null
@@ -1,9 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<?eclipse-pydev version="1.0"?><pydev_project>
-<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
-<path>/ceilometer/hooks</path>
-<path>/ceilometer/unit_tests</path>
-</pydev_pathproperty>
-<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
-<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
-</pydev_project>
diff --git a/charms/trusty/ceilometer/.testr.conf b/charms/trusty/ceilometer/.testr.conf
deleted file mode 100644
index 801646b..0000000
--- a/charms/trusty/ceilometer/.testr.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[DEFAULT]
-test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
- OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
- OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
- ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION
-
-test_id_option=--load-list $IDFILE
-test_list_option=--list
diff --git a/charms/trusty/ceilometer/Makefile b/charms/trusty/ceilometer/Makefile
deleted file mode 100644
index 64d2c34..0000000
--- a/charms/trusty/ceilometer/Makefile
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/make
-PYTHON := /usr/bin/env python
-
-clean:
- rm -rf .coverage .tox .testrepository trusty .unit-state.db
- find . -iname '*.pyc' -delete
-
-lint:
- @tox -e pep8
-
-test:
- @# Bundletester expects unit tests here.
- tox -e py27
-
-functional_test:
- @echo Starting Amulet tests...
- @tests/setup/00-setup
- @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700
-
-bin/charm_helpers_sync.py:
- @mkdir -p bin
- @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
- > bin/charm_helpers_sync.py
-
-sync: bin/charm_helpers_sync.py
- @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
- @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
-
-publish: lint test
- bzr push lp:charms/ceilometer
- bzr push lp:charms/trusty/ceilometer
diff --git a/charms/trusty/ceilometer/README.md b/charms/trusty/ceilometer/README.md
deleted file mode 100644
index 6c69440..0000000
--- a/charms/trusty/ceilometer/README.md
+++ /dev/null
@@ -1,64 +0,0 @@
-Overview
---------
-
-This charm provides the Ceilometer service for OpenStack. It is intended to
-be used alongside the other OpenStack components, starting with the Folsom
-release.
-
-Ceilometer is made up of 2 separate services: an API service, and a collector
-service. This charm allows them to be deployed in different combination,
-depending on user preference and requirements.
-
-This charm was developed to support deploying Folsom on both Ubuntu Quantal
-and Ubuntu Precise. Since Ceilometer is only available for Ubuntu 12.04 via
-the Ubuntu Cloud Archive, deploying this charm to a Precise machine will by
-default install Ceilometer and its dependencies from the Cloud Archive.
-
-Usage
------
-
-In order to deploy Ceilometer service, the MongoDB service is required:
-
- juju deploy mongodb
- juju deploy ceilometer
- juju add-relation ceilometer mongodb
-
-then Keystone and Rabbit relationships need to be established:
-
- juju add-relation ceilometer rabbitmq
- juju add-relation ceilometer keystone:identity-service
- juju add-relation ceilometer keystone:identity-notifications
-
-In order to capture the calculations, a Ceilometer compute agent needs to be
-installed in each nova node, and be related with Ceilometer service:
-
- juju deploy ceilometer-agent
- juju add-relation ceilometer-agent nova-compute
- juju add-relation ceilometer:ceilometer-service ceilometer-agent:ceilometer-service
-
-Ceilometer provides an API service that can be used to retrieve
-Openstack metrics.
-
-Network Space support
----------------------
-
-This charm supports the use of Juju Network Spaces, allowing the charm to be bound to network space configurations managed directly by Juju. This is only supported with Juju 2.0 and above.
-
-API endpoints can be bound to distinct network spaces supporting the network separation of public, internal and admin endpoints.
-
-To use this feature, use the --bind option when deploying the charm:
-
- juju deploy ceilometer --bind "public=public-space internal=internal-space admin=admin-space"
-
-alternatively these can also be provided as part of a juju native bundle configuration:
-
- ceilometer:
- charm: cs:xenial/ceilometer
- bindings:
- public: public-space
- admin: admin-space
- internal: internal-space
-
-NOTE: Spaces must be configured in the underlying provider prior to attempting to use them.
-
-NOTE: Existing deployments using os-*-network configuration options will continue to function; these options are preferred over any network space binding provided if set.
diff --git a/charms/trusty/ceilometer/actions.yaml b/charms/trusty/ceilometer/actions.yaml
deleted file mode 100644
index dea9d08..0000000
--- a/charms/trusty/ceilometer/actions.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pause:
- description: Pause the Ceilometer unit. This action will stop Ceilometer services.
-resume:
- descrpition: Resume the Ceilometer unit. This action will start Ceilometer services.
-openstack-upgrade:
- description: Perform openstack upgrades. Config option action-managed-upgrade must be set to True.
diff --git a/charms/trusty/ceilometer/actions/actions.py b/charms/trusty/ceilometer/actions/actions.py
deleted file mode 100755
index 0c077b6..0000000
--- a/charms/trusty/ceilometer/actions/actions.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/python
-
-import os
-import sys
-
-from charmhelpers.core.hookenv import action_fail
-from ceilometer_utils import (
- pause_unit_helper,
- resume_unit_helper,
- register_configs,
-)
-
-
-def pause(args):
- """Pause the Ceilometer services.
-
- @raises Exception should the service fail to stop.
- """
- pause_unit_helper(register_configs())
-
-
-def resume(args):
- """Resume the Ceilometer services.
-
- @raises Exception should the service fail to start."""
- resume_unit_helper(register_configs())
-
-
-# A dictionary of all the defined actions to callables (which take
-# parsed arguments).
-ACTIONS = {"pause": pause, "resume": resume}
-
-
-def main(args):
- action_name = os.path.basename(args[0])
- try:
- action = ACTIONS[action_name]
- except KeyError:
- return "Action %s undefined" % action_name
- else:
- try:
- action(args)
- except Exception as e:
- action_fail(str(e))
-
-
-if __name__ == "__main__":
- sys.exit(main(sys.argv))
diff --git a/charms/trusty/ceilometer/actions/ceilometer_contexts.py b/charms/trusty/ceilometer/actions/ceilometer_contexts.py
deleted file mode 120000
index 6c03421..0000000
--- a/charms/trusty/ceilometer/actions/ceilometer_contexts.py
+++ /dev/null
@@ -1 +0,0 @@
-../lib/ceilometer_contexts.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/actions/ceilometer_hooks.py b/charms/trusty/ceilometer/actions/ceilometer_hooks.py
deleted file mode 120000
index b55df6c..0000000
--- a/charms/trusty/ceilometer/actions/ceilometer_hooks.py
+++ /dev/null
@@ -1 +0,0 @@
-../hooks/ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/actions/ceilometer_utils.py b/charms/trusty/ceilometer/actions/ceilometer_utils.py
deleted file mode 120000
index e333253..0000000
--- a/charms/trusty/ceilometer/actions/ceilometer_utils.py
+++ /dev/null
@@ -1 +0,0 @@
-../lib/ceilometer_utils.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/actions/charmhelpers b/charms/trusty/ceilometer/actions/charmhelpers
deleted file mode 120000
index 702de73..0000000
--- a/charms/trusty/ceilometer/actions/charmhelpers
+++ /dev/null
@@ -1 +0,0 @@
-../charmhelpers \ No newline at end of file
diff --git a/charms/trusty/ceilometer/actions/openstack-upgrade b/charms/trusty/ceilometer/actions/openstack-upgrade
deleted file mode 120000
index 6179301..0000000
--- a/charms/trusty/ceilometer/actions/openstack-upgrade
+++ /dev/null
@@ -1 +0,0 @@
-openstack_upgrade.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/actions/openstack_upgrade.py b/charms/trusty/ceilometer/actions/openstack_upgrade.py
deleted file mode 100755
index cb04493..0000000
--- a/charms/trusty/ceilometer/actions/openstack_upgrade.py
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/python
-import sys
-
-sys.path.append('hooks/')
-
-from charmhelpers.contrib.openstack.utils import (
- do_action_openstack_upgrade,
-)
-
-from ceilometer_hooks import (
- config_changed,
- CONFIGS,
-)
-
-from ceilometer_utils import (
- do_openstack_upgrade,
-)
-
-
-def openstack_upgrade():
- """Perform action-managed OpenStack upgrade.
-
- Upgrades packages to the configured openstack-origin version and sets
- the corresponding action status as a result.
-
- If the charm was installed from source we cannot upgrade it.
- For backwards compatibility a config flag (action-managed-upgrade) must
- be set for this code to run, otherwise a full service level upgrade will
- fire on config-changed."""
-
- if (do_action_openstack_upgrade('ceilometer-common',
- do_openstack_upgrade,
- CONFIGS)):
- config_changed()
-
-if __name__ == '__main__':
- openstack_upgrade()
diff --git a/charms/trusty/ceilometer/actions/pause b/charms/trusty/ceilometer/actions/pause
deleted file mode 120000
index 405a394..0000000
--- a/charms/trusty/ceilometer/actions/pause
+++ /dev/null
@@ -1 +0,0 @@
-actions.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/actions/resume b/charms/trusty/ceilometer/actions/resume
deleted file mode 120000
index 405a394..0000000
--- a/charms/trusty/ceilometer/actions/resume
+++ /dev/null
@@ -1 +0,0 @@
-actions.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/charm-helpers-hooks.yaml b/charms/trusty/ceilometer/charm-helpers-hooks.yaml
deleted file mode 100644
index b17947b..0000000
--- a/charms/trusty/ceilometer/charm-helpers-hooks.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-branch: lp:~openstack-charmers/charm-helpers/stable
-destination: hooks/charmhelpers
-include:
- - core
- - cli
- - fetch
- - contrib.openstack|inc=*
- - contrib.hahelpers
- - contrib.storage.linux
- - contrib.network.ip
- - contrib.python.packages
- - contrib.charmsupport
- - contrib.peerstorage
- - payload.execd
- - contrib.hardening|inc=* \ No newline at end of file
diff --git a/charms/trusty/ceilometer/charm-helpers-tests.yaml b/charms/trusty/ceilometer/charm-helpers-tests.yaml
deleted file mode 100644
index c469f13..0000000
--- a/charms/trusty/ceilometer/charm-helpers-tests.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-branch: lp:~openstack-charmers/charm-helpers/stable
-destination: tests/charmhelpers
-include:
- - contrib.amulet
- - contrib.openstack.amulet
diff --git a/charms/trusty/ceilometer/charmhelpers/__init__.py b/charms/trusty/ceilometer/charmhelpers/__init__.py
deleted file mode 100644
index f72e7f8..0000000
--- a/charms/trusty/ceilometer/charmhelpers/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Bootstrap charm-helpers, installing its dependencies if necessary using
-# only standard libraries.
-import subprocess
-import sys
-
-try:
- import six # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
- import six # flake8: noqa
-
-try:
- import yaml # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
- import yaml # flake8: noqa
diff --git a/charms/trusty/ceilometer/charmhelpers/cli/__init__.py b/charms/trusty/ceilometer/charmhelpers/cli/__init__.py
deleted file mode 100644
index 2d37ab3..0000000
--- a/charms/trusty/ceilometer/charmhelpers/cli/__init__.py
+++ /dev/null
@@ -1,191 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import inspect
-import argparse
-import sys
-
-from six.moves import zip
-
-import charmhelpers.core.unitdata
-
-
-class OutputFormatter(object):
- def __init__(self, outfile=sys.stdout):
- self.formats = (
- "raw",
- "json",
- "py",
- "yaml",
- "csv",
- "tab",
- )
- self.outfile = outfile
-
- def add_arguments(self, argument_parser):
- formatgroup = argument_parser.add_mutually_exclusive_group()
- choices = self.supported_formats
- formatgroup.add_argument("--format", metavar='FMT',
- help="Select output format for returned data, "
- "where FMT is one of: {}".format(choices),
- choices=choices, default='raw')
- for fmt in self.formats:
- fmtfunc = getattr(self, fmt)
- formatgroup.add_argument("-{}".format(fmt[0]),
- "--{}".format(fmt), action='store_const',
- const=fmt, dest='format',
- help=fmtfunc.__doc__)
-
- @property
- def supported_formats(self):
- return self.formats
-
- def raw(self, output):
- """Output data as raw string (default)"""
- if isinstance(output, (list, tuple)):
- output = '\n'.join(map(str, output))
- self.outfile.write(str(output))
-
- def py(self, output):
- """Output data as a nicely-formatted python data structure"""
- import pprint
- pprint.pprint(output, stream=self.outfile)
-
- def json(self, output):
- """Output data in JSON format"""
- import json
- json.dump(output, self.outfile)
-
- def yaml(self, output):
- """Output data in YAML format"""
- import yaml
- yaml.safe_dump(output, self.outfile)
-
- def csv(self, output):
- """Output data as excel-compatible CSV"""
- import csv
- csvwriter = csv.writer(self.outfile)
- csvwriter.writerows(output)
-
- def tab(self, output):
- """Output data in excel-compatible tab-delimited format"""
- import csv
- csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab)
- csvwriter.writerows(output)
-
- def format_output(self, output, fmt='raw'):
- fmtfunc = getattr(self, fmt)
- fmtfunc(output)
-
-
-class CommandLine(object):
- argument_parser = None
- subparsers = None
- formatter = None
- exit_code = 0
-
- def __init__(self):
- if not self.argument_parser:
- self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks')
- if not self.formatter:
- self.formatter = OutputFormatter()
- self.formatter.add_arguments(self.argument_parser)
- if not self.subparsers:
- self.subparsers = self.argument_parser.add_subparsers(help='Commands')
-
- def subcommand(self, command_name=None):
- """
- Decorate a function as a subcommand. Use its arguments as the
- command-line arguments"""
- def wrapper(decorated):
- cmd_name = command_name or decorated.__name__
- subparser = self.subparsers.add_parser(cmd_name,
- description=decorated.__doc__)
- for args, kwargs in describe_arguments(decorated):
- subparser.add_argument(*args, **kwargs)
- subparser.set_defaults(func=decorated)
- return decorated
- return wrapper
-
- def test_command(self, decorated):
- """
- Subcommand is a boolean test function, so bool return values should be
- converted to a 0/1 exit code.
- """
- decorated._cli_test_command = True
- return decorated
-
- def no_output(self, decorated):
- """
- Subcommand is not expected to return a value, so don't print a spurious None.
- """
- decorated._cli_no_output = True
- return decorated
-
- def subcommand_builder(self, command_name, description=None):
- """
- Decorate a function that builds a subcommand. Builders should accept a
- single argument (the subparser instance) and return the function to be
- run as the command."""
- def wrapper(decorated):
- subparser = self.subparsers.add_parser(command_name)
- func = decorated(subparser)
- subparser.set_defaults(func=func)
- subparser.description = description or func.__doc__
- return wrapper
-
- def run(self):
- "Run cli, processing arguments and executing subcommands."
- arguments = self.argument_parser.parse_args()
- argspec = inspect.getargspec(arguments.func)
- vargs = []
- for arg in argspec.args:
- vargs.append(getattr(arguments, arg))
- if argspec.varargs:
- vargs.extend(getattr(arguments, argspec.varargs))
- output = arguments.func(*vargs)
- if getattr(arguments.func, '_cli_test_command', False):
- self.exit_code = 0 if output else 1
- output = ''
- if getattr(arguments.func, '_cli_no_output', False):
- output = ''
- self.formatter.format_output(output, arguments.format)
- if charmhelpers.core.unitdata._KV:
- charmhelpers.core.unitdata._KV.flush()
-
-
-cmdline = CommandLine()
-
-
-def describe_arguments(func):
- """
- Analyze a function's signature and return a data structure suitable for
- passing in as arguments to an argparse parser's add_argument() method."""
-
- argspec = inspect.getargspec(func)
- # we should probably raise an exception somewhere if func includes **kwargs
- if argspec.defaults:
- positional_args = argspec.args[:-len(argspec.defaults)]
- keyword_names = argspec.args[-len(argspec.defaults):]
- for arg, default in zip(keyword_names, argspec.defaults):
- yield ('--{}'.format(arg),), {'default': default}
- else:
- positional_args = argspec.args
-
- for arg in positional_args:
- yield (arg,), {}
- if argspec.varargs:
- yield (argspec.varargs,), {'nargs': '*'}
diff --git a/charms/trusty/ceilometer/charmhelpers/cli/benchmark.py b/charms/trusty/ceilometer/charmhelpers/cli/benchmark.py
deleted file mode 100644
index b23c16c..0000000
--- a/charms/trusty/ceilometer/charmhelpers/cli/benchmark.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from . import cmdline
-from charmhelpers.contrib.benchmark import Benchmark
-
-
-@cmdline.subcommand(command_name='benchmark-start')
-def start():
- Benchmark.start()
-
-
-@cmdline.subcommand(command_name='benchmark-finish')
-def finish():
- Benchmark.finish()
-
-
-@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score")
-def service(subparser):
- subparser.add_argument("value", help="The composite score.")
- subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.")
- subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.")
- return Benchmark.set_composite_score
diff --git a/charms/trusty/ceilometer/charmhelpers/cli/commands.py b/charms/trusty/ceilometer/charmhelpers/cli/commands.py
deleted file mode 100644
index 7e91db0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/cli/commands.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-This module loads sub-modules into the python runtime so they can be
-discovered via the inspect module. In order to prevent flake8 from (rightfully)
-telling us these are unused modules, throw a ' # noqa' at the end of each import
-so that the warning is suppressed.
-"""
-
-from . import CommandLine # noqa
-
-"""
-Import the sub-modules which have decorated subcommands to register with chlp.
-"""
-from . import host # noqa
-from . import benchmark # noqa
-from . import unitdata # noqa
-from . import hookenv # noqa
diff --git a/charms/trusty/ceilometer/charmhelpers/cli/hookenv.py b/charms/trusty/ceilometer/charmhelpers/cli/hookenv.py
deleted file mode 100644
index 265c816..0000000
--- a/charms/trusty/ceilometer/charmhelpers/cli/hookenv.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from . import cmdline
-from charmhelpers.core import hookenv
-
-
-cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped)
-cmdline.subcommand('service-name')(hookenv.service_name)
-cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped)
diff --git a/charms/trusty/ceilometer/charmhelpers/cli/host.py b/charms/trusty/ceilometer/charmhelpers/cli/host.py
deleted file mode 100644
index 58e78d6..0000000
--- a/charms/trusty/ceilometer/charmhelpers/cli/host.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from . import cmdline
-from charmhelpers.core import host
-
-
-@cmdline.subcommand()
-def mounts():
- "List mounts"
- return host.mounts()
-
-
-@cmdline.subcommand_builder('service', description="Control system services")
-def service(subparser):
- subparser.add_argument("action", help="The action to perform (start, stop, etc...)")
- subparser.add_argument("service_name", help="Name of the service to control")
- return host.service
diff --git a/charms/trusty/ceilometer/charmhelpers/cli/unitdata.py b/charms/trusty/ceilometer/charmhelpers/cli/unitdata.py
deleted file mode 100644
index d1cd95b..0000000
--- a/charms/trusty/ceilometer/charmhelpers/cli/unitdata.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from . import cmdline
-from charmhelpers.core import unitdata
-
-
-@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
-def unitdata_cmd(subparser):
- nested = subparser.add_subparsers()
- get_cmd = nested.add_parser('get', help='Retrieve data')
- get_cmd.add_argument('key', help='Key to retrieve the value of')
- get_cmd.set_defaults(action='get', value=None)
- set_cmd = nested.add_parser('set', help='Store data')
- set_cmd.add_argument('key', help='Key to set')
- set_cmd.add_argument('value', help='Value to store')
- set_cmd.set_defaults(action='set')
-
- def _unitdata_cmd(action, key, value):
- if action == 'get':
- return unitdata.kv().get(key)
- elif action == 'set':
- unitdata.kv().set(key, value)
- unitdata.kv().flush()
- return ''
- return _unitdata_cmd
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/nrpe.py b/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/nrpe.py
deleted file mode 100644
index 2f24642..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/nrpe.py
+++ /dev/null
@@ -1,398 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""Compatibility with the nrpe-external-master charm"""
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# Matthew Wedgwood <matthew.wedgwood@canonical.com>
-
-import subprocess
-import pwd
-import grp
-import os
-import glob
-import shutil
-import re
-import shlex
-import yaml
-
-from charmhelpers.core.hookenv import (
- config,
- local_unit,
- log,
- relation_ids,
- relation_set,
- relations_of_type,
-)
-
-from charmhelpers.core.host import service
-
-# This module adds compatibility with the nrpe-external-master and plain nrpe
-# subordinate charms. To use it in your charm:
-#
-# 1. Update metadata.yaml
-#
-# provides:
-# (...)
-# nrpe-external-master:
-# interface: nrpe-external-master
-# scope: container
-#
-# and/or
-#
-# provides:
-# (...)
-# local-monitors:
-# interface: local-monitors
-# scope: container
-
-#
-# 2. Add the following to config.yaml
-#
-# nagios_context:
-# default: "juju"
-# type: string
-# description: |
-# Used by the nrpe subordinate charms.
-# A string that will be prepended to instance name to set the host name
-# in nagios. So for instance the hostname would be something like:
-# juju-myservice-0
-# If you're running multiple environments with the same services in them
-# this allows you to differentiate between them.
-# nagios_servicegroups:
-# default: ""
-# type: string
-# description: |
-# A comma-separated list of nagios servicegroups.
-# If left empty, the nagios_context will be used as the servicegroup
-#
-# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
-#
-# 4. Update your hooks.py with something like this:
-#
-# from charmsupport.nrpe import NRPE
-# (...)
-# def update_nrpe_config():
-# nrpe_compat = NRPE()
-# nrpe_compat.add_check(
-# shortname = "myservice",
-# description = "Check MyService",
-# check_cmd = "check_http -w 2 -c 10 http://localhost"
-# )
-# nrpe_compat.add_check(
-# "myservice_other",
-# "Check for widget failures",
-# check_cmd = "/srv/myapp/scripts/widget_check"
-# )
-# nrpe_compat.write()
-#
-# def config_changed():
-# (...)
-# update_nrpe_config()
-#
-# def nrpe_external_master_relation_changed():
-# update_nrpe_config()
-#
-# def local_monitors_relation_changed():
-# update_nrpe_config()
-#
-# 5. ln -s hooks.py nrpe-external-master-relation-changed
-# ln -s hooks.py local-monitors-relation-changed
-
-
-class CheckException(Exception):
- pass
-
-
-class Check(object):
- shortname_re = '[A-Za-z0-9-_]+$'
- service_template = ("""
-#---------------------------------------------------
-# This file is Juju managed
-#---------------------------------------------------
-define service {{
- use active-service
- host_name {nagios_hostname}
- service_description {nagios_hostname}[{shortname}] """
- """{description}
- check_command check_nrpe!{command}
- servicegroups {nagios_servicegroup}
-}}
-""")
-
- def __init__(self, shortname, description, check_cmd):
- super(Check, self).__init__()
- # XXX: could be better to calculate this from the service name
- if not re.match(self.shortname_re, shortname):
- raise CheckException("shortname must match {}".format(
- Check.shortname_re))
- self.shortname = shortname
- self.command = "check_{}".format(shortname)
- # Note: a set of invalid characters is defined by the
- # Nagios server config
- # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
- self.description = description
- self.check_cmd = self._locate_cmd(check_cmd)
-
- def _get_check_filename(self):
- return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
-
- def _get_service_filename(self, hostname):
- return os.path.join(NRPE.nagios_exportdir,
- 'service__{}_{}.cfg'.format(hostname, self.command))
-
- def _locate_cmd(self, check_cmd):
- search_path = (
- '/usr/lib/nagios/plugins',
- '/usr/local/lib/nagios/plugins',
- )
- parts = shlex.split(check_cmd)
- for path in search_path:
- if os.path.exists(os.path.join(path, parts[0])):
- command = os.path.join(path, parts[0])
- if len(parts) > 1:
- command += " " + " ".join(parts[1:])
- return command
- log('Check command not found: {}'.format(parts[0]))
- return ''
-
- def _remove_service_files(self):
- if not os.path.exists(NRPE.nagios_exportdir):
- return
- for f in os.listdir(NRPE.nagios_exportdir):
- if f.endswith('_{}.cfg'.format(self.command)):
- os.remove(os.path.join(NRPE.nagios_exportdir, f))
-
- def remove(self, hostname):
- nrpe_check_file = self._get_check_filename()
- if os.path.exists(nrpe_check_file):
- os.remove(nrpe_check_file)
- self._remove_service_files()
-
- def write(self, nagios_context, hostname, nagios_servicegroups):
- nrpe_check_file = self._get_check_filename()
- with open(nrpe_check_file, 'w') as nrpe_check_config:
- nrpe_check_config.write("# check {}\n".format(self.shortname))
- nrpe_check_config.write("command[{}]={}\n".format(
- self.command, self.check_cmd))
-
- if not os.path.exists(NRPE.nagios_exportdir):
- log('Not writing service config as {} is not accessible'.format(
- NRPE.nagios_exportdir))
- else:
- self.write_service_config(nagios_context, hostname,
- nagios_servicegroups)
-
- def write_service_config(self, nagios_context, hostname,
- nagios_servicegroups):
- self._remove_service_files()
-
- templ_vars = {
- 'nagios_hostname': hostname,
- 'nagios_servicegroup': nagios_servicegroups,
- 'description': self.description,
- 'shortname': self.shortname,
- 'command': self.command,
- }
- nrpe_service_text = Check.service_template.format(**templ_vars)
- nrpe_service_file = self._get_service_filename(hostname)
- with open(nrpe_service_file, 'w') as nrpe_service_config:
- nrpe_service_config.write(str(nrpe_service_text))
-
- def run(self):
- subprocess.call(self.check_cmd)
-
-
-class NRPE(object):
- nagios_logdir = '/var/log/nagios'
- nagios_exportdir = '/var/lib/nagios/export'
- nrpe_confdir = '/etc/nagios/nrpe.d'
-
- def __init__(self, hostname=None):
- super(NRPE, self).__init__()
- self.config = config()
- self.nagios_context = self.config['nagios_context']
- if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
- self.nagios_servicegroups = self.config['nagios_servicegroups']
- else:
- self.nagios_servicegroups = self.nagios_context
- self.unit_name = local_unit().replace('/', '-')
- if hostname:
- self.hostname = hostname
- else:
- nagios_hostname = get_nagios_hostname()
- if nagios_hostname:
- self.hostname = nagios_hostname
- else:
- self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
- self.checks = []
-
- def add_check(self, *args, **kwargs):
- self.checks.append(Check(*args, **kwargs))
-
- def remove_check(self, *args, **kwargs):
- if kwargs.get('shortname') is None:
- raise ValueError('shortname of check must be specified')
-
- # Use sensible defaults if they're not specified - these are not
- # actually used during removal, but they're required for constructing
- # the Check object; check_disk is chosen because it's part of the
- # nagios-plugins-basic package.
- if kwargs.get('check_cmd') is None:
- kwargs['check_cmd'] = 'check_disk'
- if kwargs.get('description') is None:
- kwargs['description'] = ''
-
- check = Check(*args, **kwargs)
- check.remove(self.hostname)
-
- def write(self):
- try:
- nagios_uid = pwd.getpwnam('nagios').pw_uid
- nagios_gid = grp.getgrnam('nagios').gr_gid
- except:
- log("Nagios user not set up, nrpe checks not updated")
- return
-
- if not os.path.exists(NRPE.nagios_logdir):
- os.mkdir(NRPE.nagios_logdir)
- os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
-
- nrpe_monitors = {}
- monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
- for nrpecheck in self.checks:
- nrpecheck.write(self.nagios_context, self.hostname,
- self.nagios_servicegroups)
- nrpe_monitors[nrpecheck.shortname] = {
- "command": nrpecheck.command,
- }
-
- service('restart', 'nagios-nrpe-server')
-
- monitor_ids = relation_ids("local-monitors") + \
- relation_ids("nrpe-external-master")
- for rid in monitor_ids:
- relation_set(relation_id=rid, monitors=yaml.dump(monitors))
-
-
-def get_nagios_hostcontext(relation_name='nrpe-external-master'):
- """
- Query relation with nrpe subordinate, return the nagios_host_context
-
- :param str relation_name: Name of relation nrpe sub joined to
- """
- for rel in relations_of_type(relation_name):
- if 'nagios_host_context' in rel:
- return rel['nagios_host_context']
-
-
-def get_nagios_hostname(relation_name='nrpe-external-master'):
- """
- Query relation with nrpe subordinate, return the nagios_hostname
-
- :param str relation_name: Name of relation nrpe sub joined to
- """
- for rel in relations_of_type(relation_name):
- if 'nagios_hostname' in rel:
- return rel['nagios_hostname']
-
-
-def get_nagios_unit_name(relation_name='nrpe-external-master'):
- """
- Return the nagios unit name prepended with host_context if needed
-
- :param str relation_name: Name of relation nrpe sub joined to
- """
- host_context = get_nagios_hostcontext(relation_name)
- if host_context:
- unit = "%s:%s" % (host_context, local_unit())
- else:
- unit = local_unit()
- return unit
-
-
-def add_init_service_checks(nrpe, services, unit_name):
- """
- Add checks for each service in list
-
- :param NRPE nrpe: NRPE object to add check to
- :param list services: List of services to check
- :param str unit_name: Unit name to use in check description
- """
- for svc in services:
- upstart_init = '/etc/init/%s.conf' % svc
- sysv_init = '/etc/init.d/%s' % svc
- if os.path.exists(upstart_init):
- # Don't add a check for these services from neutron-gateway
- if svc not in ['ext-port', 'os-charm-phy-nic-mtu']:
- nrpe.add_check(
- shortname=svc,
- description='process check {%s}' % unit_name,
- check_cmd='check_upstart_job %s' % svc
- )
- elif os.path.exists(sysv_init):
- cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
- cron_file = ('*/5 * * * * root '
- '/usr/local/lib/nagios/plugins/check_exit_status.pl '
- '-s /etc/init.d/%s status > '
- '/var/lib/nagios/service-check-%s.txt\n' % (svc,
- svc)
- )
- f = open(cronpath, 'w')
- f.write(cron_file)
- f.close()
- nrpe.add_check(
- shortname=svc,
- description='process check {%s}' % unit_name,
- check_cmd='check_status_file.py -f '
- '/var/lib/nagios/service-check-%s.txt' % svc,
- )
-
-
-def copy_nrpe_checks():
- """
- Copy the nrpe checks into place
-
- """
- NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
- nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
- 'charmhelpers', 'contrib', 'openstack',
- 'files')
-
- if not os.path.exists(NAGIOS_PLUGINS):
- os.makedirs(NAGIOS_PLUGINS)
- for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
- if os.path.isfile(fname):
- shutil.copy2(fname,
- os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
-
-
-def add_haproxy_checks(nrpe, unit_name):
- """
- Add checks for each service in list
-
- :param NRPE nrpe: NRPE object to add check to
- :param str unit_name: Unit name to use in check description
- """
- nrpe.add_check(
- shortname='haproxy_servers',
- description='Check HAProxy {%s}' % unit_name,
- check_cmd='check_haproxy.sh')
- nrpe.add_check(
- shortname='haproxy_queue',
- description='Check HAProxy queue depth {%s}' % unit_name,
- check_cmd='check_haproxy_queue_depth.sh')
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/volumes.py b/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/volumes.py
deleted file mode 100644
index 320961b..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/volumes.py
+++ /dev/null
@@ -1,175 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-'''
-Functions for managing volumes in juju units. One volume is supported per unit.
-Subordinates may have their own storage, provided it is on its own partition.
-
-Configuration stanzas::
-
- volume-ephemeral:
- type: boolean
- default: true
- description: >
- If false, a volume is mounted as sepecified in "volume-map"
- If true, ephemeral storage will be used, meaning that log data
- will only exist as long as the machine. YOU HAVE BEEN WARNED.
- volume-map:
- type: string
- default: {}
- description: >
- YAML map of units to device names, e.g:
- "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
- Service units will raise a configure-error if volume-ephemeral
- is 'true' and no volume-map value is set. Use 'juju set' to set a
- value and 'juju resolved' to complete configuration.
-
-Usage::
-
- from charmsupport.volumes import configure_volume, VolumeConfigurationError
- from charmsupport.hookenv import log, ERROR
- def post_mount_hook():
- stop_service('myservice')
- def post_mount_hook():
- start_service('myservice')
-
- if __name__ == '__main__':
- try:
- configure_volume(before_change=pre_mount_hook,
- after_change=post_mount_hook)
- except VolumeConfigurationError:
- log('Storage could not be configured', ERROR)
-
-'''
-
-# XXX: Known limitations
-# - fstab is neither consulted nor updated
-
-import os
-from charmhelpers.core import hookenv
-from charmhelpers.core import host
-import yaml
-
-
-MOUNT_BASE = '/srv/juju/volumes'
-
-
-class VolumeConfigurationError(Exception):
- '''Volume configuration data is missing or invalid'''
- pass
-
-
-def get_config():
- '''Gather and sanity-check volume configuration data'''
- volume_config = {}
- config = hookenv.config()
-
- errors = False
-
- if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
- volume_config['ephemeral'] = True
- else:
- volume_config['ephemeral'] = False
-
- try:
- volume_map = yaml.safe_load(config.get('volume-map', '{}'))
- except yaml.YAMLError as e:
- hookenv.log("Error parsing YAML volume-map: {}".format(e),
- hookenv.ERROR)
- errors = True
- if volume_map is None:
- # probably an empty string
- volume_map = {}
- elif not isinstance(volume_map, dict):
- hookenv.log("Volume-map should be a dictionary, not {}".format(
- type(volume_map)))
- errors = True
-
- volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
- if volume_config['device'] and volume_config['ephemeral']:
- # asked for ephemeral storage but also defined a volume ID
- hookenv.log('A volume is defined for this unit, but ephemeral '
- 'storage was requested', hookenv.ERROR)
- errors = True
- elif not volume_config['device'] and not volume_config['ephemeral']:
- # asked for permanent storage but did not define volume ID
- hookenv.log('Ephemeral storage was requested, but there is no volume '
- 'defined for this unit.', hookenv.ERROR)
- errors = True
-
- unit_mount_name = hookenv.local_unit().replace('/', '-')
- volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
-
- if errors:
- return None
- return volume_config
-
-
-def mount_volume(config):
- if os.path.exists(config['mountpoint']):
- if not os.path.isdir(config['mountpoint']):
- hookenv.log('Not a directory: {}'.format(config['mountpoint']))
- raise VolumeConfigurationError()
- else:
- host.mkdir(config['mountpoint'])
- if os.path.ismount(config['mountpoint']):
- unmount_volume(config)
- if not host.mount(config['device'], config['mountpoint'], persist=True):
- raise VolumeConfigurationError()
-
-
-def unmount_volume(config):
- if os.path.ismount(config['mountpoint']):
- if not host.umount(config['mountpoint'], persist=True):
- raise VolumeConfigurationError()
-
-
-def managed_mounts():
- '''List of all mounted managed volumes'''
- return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
-
-
-def configure_volume(before_change=lambda: None, after_change=lambda: None):
- '''Set up storage (or don't) according to the charm's volume configuration.
- Returns the mount point or "ephemeral". before_change and after_change
- are optional functions to be called if the volume configuration changes.
- '''
-
- config = get_config()
- if not config:
- hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
- raise VolumeConfigurationError()
-
- if config['ephemeral']:
- if os.path.ismount(config['mountpoint']):
- before_change()
- unmount_volume(config)
- after_change()
- return 'ephemeral'
- else:
- # persistent storage
- if os.path.ismount(config['mountpoint']):
- mounts = dict(managed_mounts())
- if mounts.get(config['mountpoint']) != config['device']:
- before_change()
- unmount_volume(config)
- mount_volume(config)
- after_change()
- else:
- before_change()
- mount_volume(config)
- after_change()
- return config['mountpoint']
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/apache.py b/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/apache.py
deleted file mode 100644
index 0091719..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/apache.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Copyright 2012 Canonical Ltd.
-#
-# This file is sourced from lp:openstack-charm-helpers
-#
-# Authors:
-# James Page <james.page@ubuntu.com>
-# Adam Gandelman <adamg@ubuntu.com>
-#
-
-import subprocess
-
-from charmhelpers.core.hookenv import (
- config as config_get,
- relation_get,
- relation_ids,
- related_units as relation_list,
- log,
- INFO,
-)
-
-
-def get_cert(cn=None):
- # TODO: deal with multiple https endpoints via charm config
- cert = config_get('ssl_cert')
- key = config_get('ssl_key')
- if not (cert and key):
- log("Inspecting identity-service relations for SSL certificate.",
- level=INFO)
- cert = key = None
- if cn:
- ssl_cert_attr = 'ssl_cert_{}'.format(cn)
- ssl_key_attr = 'ssl_key_{}'.format(cn)
- else:
- ssl_cert_attr = 'ssl_cert'
- ssl_key_attr = 'ssl_key'
- for r_id in relation_ids('identity-service'):
- for unit in relation_list(r_id):
- if not cert:
- cert = relation_get(ssl_cert_attr,
- rid=r_id, unit=unit)
- if not key:
- key = relation_get(ssl_key_attr,
- rid=r_id, unit=unit)
- return (cert, key)
-
-
-def get_ca_cert():
- ca_cert = config_get('ssl_ca')
- if ca_cert is None:
- log("Inspecting identity-service relations for CA SSL certificate.",
- level=INFO)
- for r_id in relation_ids('identity-service'):
- for unit in relation_list(r_id):
- if ca_cert is None:
- ca_cert = relation_get('ca_cert',
- rid=r_id, unit=unit)
- return ca_cert
-
-
-def install_ca_cert(ca_cert):
- if ca_cert:
- with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
- 'w') as crt:
- crt.write(ca_cert)
- subprocess.check_call(['update-ca-certificates', '--fresh'])
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/cluster.py b/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/cluster.py
deleted file mode 100644
index aa0b515..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/cluster.py
+++ /dev/null
@@ -1,316 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# James Page <james.page@ubuntu.com>
-# Adam Gandelman <adamg@ubuntu.com>
-#
-
-"""
-Helpers for clustering and determining "cluster leadership" and other
-clustering-related helpers.
-"""
-
-import subprocess
-import os
-
-from socket import gethostname as get_unit_hostname
-
-import six
-
-from charmhelpers.core.hookenv import (
- log,
- relation_ids,
- related_units as relation_list,
- relation_get,
- config as config_get,
- INFO,
- ERROR,
- WARNING,
- unit_get,
- is_leader as juju_is_leader
-)
-from charmhelpers.core.decorators import (
- retry_on_exception,
-)
-from charmhelpers.core.strutils import (
- bool_from_string,
-)
-
-DC_RESOURCE_NAME = 'DC'
-
-
-class HAIncompleteConfig(Exception):
- pass
-
-
-class CRMResourceNotFound(Exception):
- pass
-
-
-class CRMDCNotFound(Exception):
- pass
-
-
-def is_elected_leader(resource):
- """
- Returns True if the charm executing this is the elected cluster leader.
-
- It relies on two mechanisms to determine leadership:
- 1. If juju is sufficiently new and leadership election is supported,
- the is_leader command will be used.
- 2. If the charm is part of a corosync cluster, call corosync to
- determine leadership.
- 3. If the charm is not part of a corosync cluster, the leader is
- determined as being "the alive unit with the lowest unit numer". In
- other words, the oldest surviving unit.
- """
- try:
- return juju_is_leader()
- except NotImplementedError:
- log('Juju leadership election feature not enabled'
- ', using fallback support',
- level=WARNING)
-
- if is_clustered():
- if not is_crm_leader(resource):
- log('Deferring action to CRM leader.', level=INFO)
- return False
- else:
- peers = peer_units()
- if peers and not oldest_peer(peers):
- log('Deferring action to oldest service unit.', level=INFO)
- return False
- return True
-
-
-def is_clustered():
- for r_id in (relation_ids('ha') or []):
- for unit in (relation_list(r_id) or []):
- clustered = relation_get('clustered',
- rid=r_id,
- unit=unit)
- if clustered:
- return True
- return False
-
-
-def is_crm_dc():
- """
- Determine leadership by querying the pacemaker Designated Controller
- """
- cmd = ['crm', 'status']
- try:
- status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- if not isinstance(status, six.text_type):
- status = six.text_type(status, "utf-8")
- except subprocess.CalledProcessError as ex:
- raise CRMDCNotFound(str(ex))
-
- current_dc = ''
- for line in status.split('\n'):
- if line.startswith('Current DC'):
- # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
- current_dc = line.split(':')[1].split()[0]
- if current_dc == get_unit_hostname():
- return True
- elif current_dc == 'NONE':
- raise CRMDCNotFound('Current DC: NONE')
-
- return False
-
-
-@retry_on_exception(5, base_delay=2,
- exc_type=(CRMResourceNotFound, CRMDCNotFound))
-def is_crm_leader(resource, retry=False):
- """
- Returns True if the charm calling this is the elected corosync leader,
- as returned by calling the external "crm" command.
-
- We allow this operation to be retried to avoid the possibility of getting a
- false negative. See LP #1396246 for more info.
- """
- if resource == DC_RESOURCE_NAME:
- return is_crm_dc()
- cmd = ['crm', 'resource', 'show', resource]
- try:
- status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- if not isinstance(status, six.text_type):
- status = six.text_type(status, "utf-8")
- except subprocess.CalledProcessError:
- status = None
-
- if status and get_unit_hostname() in status:
- return True
-
- if status and "resource %s is NOT running" % (resource) in status:
- raise CRMResourceNotFound("CRM resource %s not found" % (resource))
-
- return False
-
-
-def is_leader(resource):
- log("is_leader is deprecated. Please consider using is_crm_leader "
- "instead.", level=WARNING)
- return is_crm_leader(resource)
-
-
-def peer_units(peer_relation="cluster"):
- peers = []
- for r_id in (relation_ids(peer_relation) or []):
- for unit in (relation_list(r_id) or []):
- peers.append(unit)
- return peers
-
-
-def peer_ips(peer_relation='cluster', addr_key='private-address'):
- '''Return a dict of peers and their private-address'''
- peers = {}
- for r_id in relation_ids(peer_relation):
- for unit in relation_list(r_id):
- peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
- return peers
-
-
-def oldest_peer(peers):
- """Determines who the oldest peer is by comparing unit numbers."""
- local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
- for peer in peers:
- remote_unit_no = int(peer.split('/')[1])
- if remote_unit_no < local_unit_no:
- return False
- return True
-
-
-def eligible_leader(resource):
- log("eligible_leader is deprecated. Please consider using "
- "is_elected_leader instead.", level=WARNING)
- return is_elected_leader(resource)
-
-
-def https():
- '''
- Determines whether enough data has been provided in configuration
- or relation data to configure HTTPS
- .
- returns: boolean
- '''
- use_https = config_get('use-https')
- if use_https and bool_from_string(use_https):
- return True
- if config_get('ssl_cert') and config_get('ssl_key'):
- return True
- for r_id in relation_ids('identity-service'):
- for unit in relation_list(r_id):
- # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
- rel_state = [
- relation_get('https_keystone', rid=r_id, unit=unit),
- relation_get('ca_cert', rid=r_id, unit=unit),
- ]
- # NOTE: works around (LP: #1203241)
- if (None not in rel_state) and ('' not in rel_state):
- return True
- return False
-
-
-def determine_api_port(public_port, singlenode_mode=False):
- '''
- Determine correct API server listening port based on
- existence of HTTPS reverse proxy and/or haproxy.
-
- public_port: int: standard public port for given service
-
- singlenode_mode: boolean: Shuffle ports when only a single unit is present
-
- returns: int: the correct listening port for the API service
- '''
- i = 0
- if singlenode_mode:
- i += 1
- elif len(peer_units()) > 0 or is_clustered():
- i += 1
- if https():
- i += 1
- return public_port - (i * 10)
-
-
-def determine_apache_port(public_port, singlenode_mode=False):
- '''
- Description: Determine correct apache listening port based on public IP +
- state of the cluster.
-
- public_port: int: standard public port for given service
-
- singlenode_mode: boolean: Shuffle ports when only a single unit is present
-
- returns: int: the correct listening port for the HAProxy service
- '''
- i = 0
- if singlenode_mode:
- i += 1
- elif len(peer_units()) > 0 or is_clustered():
- i += 1
- return public_port - (i * 10)
-
-
-def get_hacluster_config(exclude_keys=None):
- '''
- Obtains all relevant configuration from charm configuration required
- for initiating a relation to hacluster:
-
- ha-bindiface, ha-mcastport, vip
-
- param: exclude_keys: list of setting key(s) to be excluded.
- returns: dict: A dict containing settings keyed by setting name.
- raises: HAIncompleteConfig if settings are missing.
- '''
- settings = ['ha-bindiface', 'ha-mcastport', 'vip']
- conf = {}
- for setting in settings:
- if exclude_keys and setting in exclude_keys:
- continue
-
- conf[setting] = config_get(setting)
- missing = []
- [missing.append(s) for s, v in six.iteritems(conf) if v is None]
- if missing:
- log('Insufficient config data to configure hacluster.', level=ERROR)
- raise HAIncompleteConfig
- return conf
-
-
-def canonical_url(configs, vip_setting='vip'):
- '''
- Returns the correct HTTP URL to this host given the state of HTTPS
- configuration and hacluster.
-
- :configs : OSTemplateRenderer: A config tempating object to inspect for
- a complete https context.
-
- :vip_setting: str: Setting in charm config that specifies
- VIP address.
- '''
- scheme = 'http'
- if 'https' in configs.complete_contexts():
- scheme = 'https'
- if is_clustered():
- addr = config_get(vip_setting)
- else:
- addr = unit_get('private-address')
- return '%s://%s' % (scheme, addr)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/README.hardening.md b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/README.hardening.md
deleted file mode 100644
index 91280c0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/README.hardening.md
+++ /dev/null
@@ -1,38 +0,0 @@
-# Juju charm-helpers hardening library
-
-## Description
-
-This library provides multiple implementations of system and application
-hardening that conform to the standards of http://hardening.io/.
-
-Current implementations include:
-
- * OS
- * SSH
- * MySQL
- * Apache
-
-## Requirements
-
-* Juju Charms
-
-## Usage
-
-1. Synchronise this library into your charm and add the harden() decorator
- (from contrib.hardening.harden) to any functions or methods you want to use
- to trigger hardening of your application/system.
-
-2. Add a config option called 'harden' to your charm config.yaml and set it to
- a space-delimited list of hardening modules you want to run e.g. "os ssh"
-
-3. Override any config defaults (contrib.hardening.defaults) by adding a file
- called hardening.yaml to your charm root containing the name(s) of the
- modules whose settings you want override at root level and then any settings
- with overrides e.g.
-
- os:
- general:
- desktop_enable: True
-
-4. Now just run your charm as usual and hardening will be applied each time the
- hook runs.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/__init__.py
deleted file mode 100644
index a133532..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/__init__.py
deleted file mode 100644
index 277b8c7..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from os import path
-
-TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/__init__.py
deleted file mode 100644
index d130479..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/__init__.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
-)
-from charmhelpers.contrib.hardening.apache.checks import config
-
-
-def run_apache_checks():
- log("Starting Apache hardening checks.", level=DEBUG)
- checks = config.get_audits()
- for check in checks:
- log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
- check.ensure_compliance()
-
- log("Apache hardening checks complete.", level=DEBUG)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/config.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/config.py
deleted file mode 100644
index 8249ca0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/config.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-import subprocess
-
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
-)
-from charmhelpers.contrib.hardening.audits.file import (
- FilePermissionAudit,
- DirectoryPermissionAudit,
- NoReadWriteForOther,
- TemplatedFile,
-)
-from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit
-from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR
-from charmhelpers.contrib.hardening import utils
-
-
-def get_audits():
- """Get Apache hardening config audits.
-
- :returns: dictionary of audits
- """
- if subprocess.call(['which', 'apache2'], stdout=subprocess.PIPE) != 0:
- log("Apache server does not appear to be installed on this node - "
- "skipping apache hardening", level=INFO)
- return []
-
- context = ApacheConfContext()
- settings = utils.get_settings('apache')
- audits = [
- FilePermissionAudit(paths='/etc/apache2/apache2.conf', user='root',
- group='root', mode=0o0640),
-
- TemplatedFile(os.path.join(settings['common']['apache_dir'],
- 'mods-available/alias.conf'),
- context,
- TEMPLATES_DIR,
- mode=0o0755,
- user='root',
- service_actions=[{'service': 'apache2',
- 'actions': ['restart']}]),
-
- TemplatedFile(os.path.join(settings['common']['apache_dir'],
- 'conf-enabled/hardening.conf'),
- context,
- TEMPLATES_DIR,
- mode=0o0640,
- user='root',
- service_actions=[{'service': 'apache2',
- 'actions': ['restart']}]),
-
- DirectoryPermissionAudit(settings['common']['apache_dir'],
- user='root',
- group='root',
- mode=0o640),
-
- DisabledModuleAudit(settings['hardening']['modules_to_disable']),
-
- NoReadWriteForOther(settings['common']['apache_dir']),
- ]
-
- return audits
-
-
-class ApacheConfContext(object):
- """Defines the set of key/value pairs to set in a apache config file.
-
- This context, when called, will return a dictionary containing the
- key/value pairs of setting to specify in the
- /etc/apache/conf-enabled/hardening.conf file.
- """
- def __call__(self):
- settings = utils.get_settings('apache')
- ctxt = settings['hardening']
-
- out = subprocess.check_output(['apache2', '-v'])
- ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+',
- out).group(1)
- ctxt['apache_icondir'] = '/usr/share/apache2/icons/'
- ctxt['traceenable'] = settings['hardening']['traceenable']
- return ctxt
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/__init__.py
+++ /dev/null
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/alias.conf b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/alias.conf
deleted file mode 100644
index e46a58a..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/alias.conf
+++ /dev/null
@@ -1,31 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-<IfModule alias_module>
- #
- # Aliases: Add here as many aliases as you need (with no limit). The format is
- # Alias fakename realname
- #
- # Note that if you include a trailing / on fakename then the server will
- # require it to be present in the URL. So "/icons" isn't aliased in this
- # example, only "/icons/". If the fakename is slash-terminated, then the
- # realname must also be slash terminated, and if the fakename omits the
- # trailing slash, the realname must also omit it.
- #
- # We include the /icons/ alias for FancyIndexed directory listings. If
- # you do not use FancyIndexing, you may comment this out.
- #
- Alias /icons/ "{{ apache_icondir }}/"
-
- <Directory "{{ apache_icondir }}">
- Options -Indexes -MultiViews -FollowSymLinks
- AllowOverride None
-{% if apache_version == '2.4' -%}
- Require all granted
-{% else -%}
- Order allow,deny
- Allow from all
-{% endif %}
- </Directory>
-</IfModule>
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/hardening.conf b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/hardening.conf
deleted file mode 100644
index 0794541..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/hardening.conf
+++ /dev/null
@@ -1,18 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-
-<Location / >
- <LimitExcept {{ allowed_http_methods }} >
- # http://httpd.apache.org/docs/2.4/upgrading.html
- {% if apache_version > '2.2' -%}
- Require all granted
- {% else -%}
- Order Allow,Deny
- Deny from all
- {% endif %}
- </LimitExcept>
-</Location>
-
-TraceEnable {{ traceenable }}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/__init__.py
deleted file mode 100644
index 6a7057b..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/__init__.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-
-class BaseAudit(object): # NO-QA
- """Base class for hardening checks.
-
- The lifecycle of a hardening check is to first check to see if the system
- is in compliance for the specified check. If it is not in compliance, the
- check method will return a value which will be supplied to the.
- """
- def __init__(self, *args, **kwargs):
- self.unless = kwargs.get('unless', None)
- super(BaseAudit, self).__init__()
-
- def ensure_compliance(self):
- """Checks to see if the current hardening check is in compliance or
- not.
-
- If the check that is performed is not in compliance, then an exception
- should be raised.
- """
- pass
-
- def _take_action(self):
- """Determines whether to perform the action or not.
-
- Checks whether or not an action should be taken. This is determined by
- the truthy value for the unless parameter. If unless is a callback
- method, it will be invoked with no parameters in order to determine
- whether or not the action should be taken. Otherwise, the truthy value
- of the unless attribute will determine if the action should be
- performed.
- """
- # Do the action if there isn't an unless override.
- if self.unless is None:
- return True
-
- # Invoke the callback if there is one.
- if hasattr(self.unless, '__call__'):
- results = self.unless()
- if results:
- return False
- else:
- return True
-
- if self.unless:
- return False
- else:
- return True
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apache.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apache.py
deleted file mode 100644
index cf3c987..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apache.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import re
-import subprocess
-
-from six import string_types
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
- ERROR,
-)
-
-from charmhelpers.contrib.hardening.audits import BaseAudit
-
-
-class DisabledModuleAudit(BaseAudit):
- """Audits Apache2 modules.
-
- Determines if the apache2 modules are enabled. If the modules are enabled
- then they are removed in the ensure_compliance.
- """
- def __init__(self, modules):
- if modules is None:
- self.modules = []
- elif isinstance(modules, string_types):
- self.modules = [modules]
- else:
- self.modules = modules
-
- def ensure_compliance(self):
- """Ensures that the modules are not loaded."""
- if not self.modules:
- return
-
- try:
- loaded_modules = self._get_loaded_modules()
- non_compliant_modules = []
- for module in self.modules:
- if module in loaded_modules:
- log("Module '%s' is enabled but should not be." %
- (module), level=INFO)
- non_compliant_modules.append(module)
-
- if len(non_compliant_modules) == 0:
- return
-
- for module in non_compliant_modules:
- self._disable_module(module)
- self._restart_apache()
- except subprocess.CalledProcessError as e:
- log('Error occurred auditing apache module compliance. '
- 'This may have been already reported. '
- 'Output is: %s' % e.output, level=ERROR)
-
- @staticmethod
- def _get_loaded_modules():
- """Returns the modules which are enabled in Apache."""
- output = subprocess.check_output(['apache2ctl', '-M'])
- modules = []
- for line in output.strip().split():
- # Each line of the enabled module output looks like:
- # module_name (static|shared)
- # Plus a header line at the top of the output which is stripped
- # out by the regex.
- matcher = re.search(r'^ (\S*)', line)
- if matcher:
- modules.append(matcher.group(1))
- return modules
-
- @staticmethod
- def _disable_module(module):
- """Disables the specified module in Apache."""
- try:
- subprocess.check_call(['a2dismod', module])
- except subprocess.CalledProcessError as e:
- # Note: catch error here to allow the attempt of disabling
- # multiple modules in one go rather than failing after the
- # first module fails.
- log('Error occurred disabling module %s. '
- 'Output is: %s' % (module, e.output), level=ERROR)
-
- @staticmethod
- def _restart_apache():
- """Restarts the apache process"""
- subprocess.check_output(['service', 'apache2', 'restart'])
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apt.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apt.py
deleted file mode 100644
index e94af03..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apt.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import # required for external apt import
-from apt import apt_pkg
-from six import string_types
-
-from charmhelpers.fetch import (
- apt_cache,
- apt_purge
-)
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- WARNING,
-)
-from charmhelpers.contrib.hardening.audits import BaseAudit
-
-
-class AptConfig(BaseAudit):
-
- def __init__(self, config, **kwargs):
- self.config = config
-
- def verify_config(self):
- apt_pkg.init()
- for cfg in self.config:
- value = apt_pkg.config.get(cfg['key'], cfg.get('default', ''))
- if value and value != cfg['expected']:
- log("APT config '%s' has unexpected value '%s' "
- "(expected='%s')" %
- (cfg['key'], value, cfg['expected']), level=WARNING)
-
- def ensure_compliance(self):
- self.verify_config()
-
-
-class RestrictedPackages(BaseAudit):
- """Class used to audit restricted packages on the system."""
-
- def __init__(self, pkgs, **kwargs):
- super(RestrictedPackages, self).__init__(**kwargs)
- if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'):
- self.pkgs = [pkgs]
- else:
- self.pkgs = pkgs
-
- def ensure_compliance(self):
- cache = apt_cache()
-
- for p in self.pkgs:
- if p not in cache:
- continue
-
- pkg = cache[p]
- if not self.is_virtual_package(pkg):
- if not pkg.current_ver:
- log("Package '%s' is not installed." % pkg.name,
- level=DEBUG)
- continue
- else:
- log("Restricted package '%s' is installed" % pkg.name,
- level=WARNING)
- self.delete_package(cache, pkg)
- else:
- log("Checking restricted virtual package '%s' provides" %
- pkg.name, level=DEBUG)
- self.delete_package(cache, pkg)
-
- def delete_package(self, cache, pkg):
- """Deletes the package from the system.
-
- Deletes the package form the system, properly handling virtual
- packages.
-
- :param cache: the apt cache
- :param pkg: the package to remove
- """
- if self.is_virtual_package(pkg):
- log("Package '%s' appears to be virtual - purging provides" %
- pkg.name, level=DEBUG)
- for _p in pkg.provides_list:
- self.delete_package(cache, _p[2].parent_pkg)
- elif not pkg.current_ver:
- log("Package '%s' not installed" % pkg.name, level=DEBUG)
- return
- else:
- log("Purging package '%s'" % pkg.name, level=DEBUG)
- apt_purge(pkg.name)
-
- def is_virtual_package(self, pkg):
- return pkg.has_provides and not pkg.has_versions
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/file.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/file.py
deleted file mode 100644
index 0fb545a..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/file.py
+++ /dev/null
@@ -1,552 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import grp
-import os
-import pwd
-import re
-
-from subprocess import (
- CalledProcessError,
- check_output,
- check_call,
-)
-from traceback import format_exc
-from six import string_types
-from stat import (
- S_ISGID,
- S_ISUID
-)
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- INFO,
- WARNING,
- ERROR,
-)
-from charmhelpers.core import unitdata
-from charmhelpers.core.host import file_hash
-from charmhelpers.contrib.hardening.audits import BaseAudit
-from charmhelpers.contrib.hardening.templating import (
- get_template_path,
- render_and_write,
-)
-from charmhelpers.contrib.hardening import utils
-
-
-class BaseFileAudit(BaseAudit):
- """Base class for file audits.
-
- Provides api stubs for compliance check flow that must be used by any class
- that implemented this one.
- """
-
- def __init__(self, paths, always_comply=False, *args, **kwargs):
- """
- :param paths: string path of list of paths of files we want to apply
- compliance checks are criteria to.
- :param always_comply: if true compliance criteria is always applied
- else compliance is skipped for non-existent
- paths.
- """
- super(BaseFileAudit, self).__init__(*args, **kwargs)
- self.always_comply = always_comply
- if isinstance(paths, string_types) or not hasattr(paths, '__iter__'):
- self.paths = [paths]
- else:
- self.paths = paths
-
- def ensure_compliance(self):
- """Ensure that the all registered files comply to registered criteria.
- """
- for p in self.paths:
- if os.path.exists(p):
- if self.is_compliant(p):
- continue
-
- log('File %s is not in compliance.' % p, level=INFO)
- else:
- if not self.always_comply:
- log("Non-existent path '%s' - skipping compliance check"
- % (p), level=INFO)
- continue
-
- if self._take_action():
- log("Applying compliance criteria to '%s'" % (p), level=INFO)
- self.comply(p)
-
- def is_compliant(self, path):
- """Audits the path to see if it is compliance.
-
- :param path: the path to the file that should be checked.
- """
- raise NotImplementedError
-
- def comply(self, path):
- """Enforces the compliance of a path.
-
- :param path: the path to the file that should be enforced.
- """
- raise NotImplementedError
-
- @classmethod
- def _get_stat(cls, path):
- """Returns the Posix st_stat information for the specified file path.
-
- :param path: the path to get the st_stat information for.
- :returns: an st_stat object for the path or None if the path doesn't
- exist.
- """
- return os.stat(path)
-
-
-class FilePermissionAudit(BaseFileAudit):
- """Implements an audit for file permissions and ownership for a user.
-
- This class implements functionality that ensures that a specific user/group
- will own the file(s) specified and that the permissions specified are
- applied properly to the file.
- """
- def __init__(self, paths, user, group=None, mode=0o600, **kwargs):
- self.user = user
- self.group = group
- self.mode = mode
- super(FilePermissionAudit, self).__init__(paths, user, group, mode,
- **kwargs)
-
- @property
- def user(self):
- return self._user
-
- @user.setter
- def user(self, name):
- try:
- user = pwd.getpwnam(name)
- except KeyError:
- log('Unknown user %s' % name, level=ERROR)
- user = None
- self._user = user
-
- @property
- def group(self):
- return self._group
-
- @group.setter
- def group(self, name):
- try:
- group = None
- if name:
- group = grp.getgrnam(name)
- else:
- group = grp.getgrgid(self.user.pw_gid)
- except KeyError:
- log('Unknown group %s' % name, level=ERROR)
- self._group = group
-
- def is_compliant(self, path):
- """Checks if the path is in compliance.
-
- Used to determine if the path specified meets the necessary
- requirements to be in compliance with the check itself.
-
- :param path: the file path to check
- :returns: True if the path is compliant, False otherwise.
- """
- stat = self._get_stat(path)
- user = self.user
- group = self.group
-
- compliant = True
- if stat.st_uid != user.pw_uid or stat.st_gid != group.gr_gid:
- log('File %s is not owned by %s:%s.' % (path, user.pw_name,
- group.gr_name),
- level=INFO)
- compliant = False
-
- # POSIX refers to the st_mode bits as corresponding to both the
- # file type and file permission bits, where the least significant 12
- # bits (o7777) are the suid (11), sgid (10), sticky bits (9), and the
- # file permission bits (8-0)
- perms = stat.st_mode & 0o7777
- if perms != self.mode:
- log('File %s has incorrect permissions, currently set to %s' %
- (path, oct(stat.st_mode & 0o7777)), level=INFO)
- compliant = False
-
- return compliant
-
- def comply(self, path):
- """Issues a chown and chmod to the file paths specified."""
- utils.ensure_permissions(path, self.user.pw_name, self.group.gr_name,
- self.mode)
-
-
-class DirectoryPermissionAudit(FilePermissionAudit):
- """Performs a permission check for the specified directory path."""
-
- def __init__(self, paths, user, group=None, mode=0o600,
- recursive=True, **kwargs):
- super(DirectoryPermissionAudit, self).__init__(paths, user, group,
- mode, **kwargs)
- self.recursive = recursive
-
- def is_compliant(self, path):
- """Checks if the directory is compliant.
-
- Used to determine if the path specified and all of its children
- directories are in compliance with the check itself.
-
- :param path: the directory path to check
- :returns: True if the directory tree is compliant, otherwise False.
- """
- if not os.path.isdir(path):
- log('Path specified %s is not a directory.' % path, level=ERROR)
- raise ValueError("%s is not a directory." % path)
-
- if not self.recursive:
- return super(DirectoryPermissionAudit, self).is_compliant(path)
-
- compliant = True
- for root, dirs, _ in os.walk(path):
- if len(dirs) > 0:
- continue
-
- if not super(DirectoryPermissionAudit, self).is_compliant(root):
- compliant = False
- continue
-
- return compliant
-
- def comply(self, path):
- for root, dirs, _ in os.walk(path):
- if len(dirs) > 0:
- super(DirectoryPermissionAudit, self).comply(root)
-
-
-class ReadOnly(BaseFileAudit):
- """Audits that files and folders are read only."""
- def __init__(self, paths, *args, **kwargs):
- super(ReadOnly, self).__init__(paths=paths, *args, **kwargs)
-
- def is_compliant(self, path):
- try:
- output = check_output(['find', path, '-perm', '-go+w',
- '-type', 'f']).strip()
-
- # The find above will find any files which have permission sets
- # which allow too broad of write access. As such, the path is
- # compliant if there is no output.
- if output:
- return False
-
- return True
- except CalledProcessError as e:
- log('Error occurred checking finding writable files for %s. '
- 'Error information is: command %s failed with returncode '
- '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
- format_exc(e)), level=ERROR)
- return False
-
- def comply(self, path):
- try:
- check_output(['chmod', 'go-w', '-R', path])
- except CalledProcessError as e:
- log('Error occurred removing writeable permissions for %s. '
- 'Error information is: command %s failed with returncode '
- '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
- format_exc(e)), level=ERROR)
-
-
-class NoReadWriteForOther(BaseFileAudit):
- """Ensures that the files found under the base path are readable or
- writable by anyone other than the owner or the group.
- """
- def __init__(self, paths):
- super(NoReadWriteForOther, self).__init__(paths)
-
- def is_compliant(self, path):
- try:
- cmd = ['find', path, '-perm', '-o+r', '-type', 'f', '-o',
- '-perm', '-o+w', '-type', 'f']
- output = check_output(cmd).strip()
-
- # The find above here will find any files which have read or
- # write permissions for other, meaning there is too broad of access
- # to read/write the file. As such, the path is compliant if there's
- # no output.
- if output:
- return False
-
- return True
- except CalledProcessError as e:
- log('Error occurred while finding files which are readable or '
- 'writable to the world in %s. '
- 'Command output is: %s.' % (path, e.output), level=ERROR)
-
- def comply(self, path):
- try:
- check_output(['chmod', '-R', 'o-rw', path])
- except CalledProcessError as e:
- log('Error occurred attempting to change modes of files under '
- 'path %s. Output of command is: %s' % (path, e.output))
-
-
-class NoSUIDSGIDAudit(BaseFileAudit):
- """Audits that specified files do not have SUID/SGID bits set."""
- def __init__(self, paths, *args, **kwargs):
- super(NoSUIDSGIDAudit, self).__init__(paths=paths, *args, **kwargs)
-
- def is_compliant(self, path):
- stat = self._get_stat(path)
- if (stat.st_mode & (S_ISGID | S_ISUID)) != 0:
- return False
-
- return True
-
- def comply(self, path):
- try:
- log('Removing suid/sgid from %s.' % path, level=DEBUG)
- check_output(['chmod', '-s', path])
- except CalledProcessError as e:
- log('Error occurred removing suid/sgid from %s.'
- 'Error information is: command %s failed with returncode '
- '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
- format_exc(e)), level=ERROR)
-
-
-class TemplatedFile(BaseFileAudit):
- """The TemplatedFileAudit audits the contents of a templated file.
-
- This audit renders a file from a template, sets the appropriate file
- permissions, then generates a hashsum with which to check the content
- changed.
- """
- def __init__(self, path, context, template_dir, mode, user='root',
- group='root', service_actions=None, **kwargs):
- self.context = context
- self.user = user
- self.group = group
- self.mode = mode
- self.template_dir = template_dir
- self.service_actions = service_actions
- super(TemplatedFile, self).__init__(paths=path, always_comply=True,
- **kwargs)
-
- def is_compliant(self, path):
- """Determines if the templated file is compliant.
-
- A templated file is only compliant if it has not changed (as
- determined by its sha256 hashsum) AND its file permissions are set
- appropriately.
-
- :param path: the path to check compliance.
- """
- same_templates = self.templates_match(path)
- same_content = self.contents_match(path)
- same_permissions = self.permissions_match(path)
-
- if same_content and same_permissions and same_templates:
- return True
-
- return False
-
- def run_service_actions(self):
- """Run any actions on services requested."""
- if not self.service_actions:
- return
-
- for svc_action in self.service_actions:
- name = svc_action['service']
- actions = svc_action['actions']
- log("Running service '%s' actions '%s'" % (name, actions),
- level=DEBUG)
- for action in actions:
- cmd = ['service', name, action]
- try:
- check_call(cmd)
- except CalledProcessError as exc:
- log("Service name='%s' action='%s' failed - %s" %
- (name, action, exc), level=WARNING)
-
- def comply(self, path):
- """Ensures the contents and the permissions of the file.
-
- :param path: the path to correct
- """
- dirname = os.path.dirname(path)
- if not os.path.exists(dirname):
- os.makedirs(dirname)
-
- self.pre_write()
- render_and_write(self.template_dir, path, self.context())
- utils.ensure_permissions(path, self.user, self.group, self.mode)
- self.run_service_actions()
- self.save_checksum(path)
- self.post_write()
-
- def pre_write(self):
- """Invoked prior to writing the template."""
- pass
-
- def post_write(self):
- """Invoked after writing the template."""
- pass
-
- def templates_match(self, path):
- """Determines if the template files are the same.
-
- The template file equality is determined by the hashsum of the
- template files themselves. If there is no hashsum, then the content
- cannot be sure to be the same so treat it as if they changed.
- Otherwise, return whether or not the hashsums are the same.
-
- :param path: the path to check
- :returns: boolean
- """
- template_path = get_template_path(self.template_dir, path)
- key = 'hardening:template:%s' % template_path
- template_checksum = file_hash(template_path)
- kv = unitdata.kv()
- stored_tmplt_checksum = kv.get(key)
- if not stored_tmplt_checksum:
- kv.set(key, template_checksum)
- kv.flush()
- log('Saved template checksum for %s.' % template_path,
- level=DEBUG)
- # Since we don't have a template checksum, then assume it doesn't
- # match and return that the template is different.
- return False
- elif stored_tmplt_checksum != template_checksum:
- kv.set(key, template_checksum)
- kv.flush()
- log('Updated template checksum for %s.' % template_path,
- level=DEBUG)
- return False
-
- # Here the template hasn't changed based upon the calculated
- # checksum of the template and what was previously stored.
- return True
-
- def contents_match(self, path):
- """Determines if the file content is the same.
-
- This is determined by comparing hashsum of the file contents and
- the saved hashsum. If there is no hashsum, then the content cannot
- be sure to be the same so treat them as if they are not the same.
- Otherwise, return True if the hashsums are the same, False if they
- are not the same.
-
- :param path: the file to check.
- """
- checksum = file_hash(path)
-
- kv = unitdata.kv()
- stored_checksum = kv.get('hardening:%s' % path)
- if not stored_checksum:
- # If the checksum hasn't been generated, return False to ensure
- # the file is written and the checksum stored.
- log('Checksum for %s has not been calculated.' % path, level=DEBUG)
- return False
- elif stored_checksum != checksum:
- log('Checksum mismatch for %s.' % path, level=DEBUG)
- return False
-
- return True
-
- def permissions_match(self, path):
- """Determines if the file owner and permissions match.
-
- :param path: the path to check.
- """
- audit = FilePermissionAudit(path, self.user, self.group, self.mode)
- return audit.is_compliant(path)
-
- def save_checksum(self, path):
- """Calculates and saves the checksum for the path specified.
-
- :param path: the path of the file to save the checksum.
- """
- checksum = file_hash(path)
- kv = unitdata.kv()
- kv.set('hardening:%s' % path, checksum)
- kv.flush()
-
-
-class DeletedFile(BaseFileAudit):
- """Audit to ensure that a file is deleted."""
- def __init__(self, paths):
- super(DeletedFile, self).__init__(paths)
-
- def is_compliant(self, path):
- return not os.path.exists(path)
-
- def comply(self, path):
- os.remove(path)
-
-
-class FileContentAudit(BaseFileAudit):
- """Audit the contents of a file."""
- def __init__(self, paths, cases, **kwargs):
- # Cases we expect to pass
- self.pass_cases = cases.get('pass', [])
- # Cases we expect to fail
- self.fail_cases = cases.get('fail', [])
- super(FileContentAudit, self).__init__(paths, **kwargs)
-
- def is_compliant(self, path):
- """
- Given a set of content matching cases i.e. tuple(regex, bool) where
- bool value denotes whether or not regex is expected to match, check that
- all cases match as expected with the contents of the file. Cases can be
- expected to pass of fail.
-
- :param path: Path of file to check.
- :returns: Boolean value representing whether or not all cases are
- found to be compliant.
- """
- log("Auditing contents of file '%s'" % (path), level=DEBUG)
- with open(path, 'r') as fd:
- contents = fd.read()
-
- matches = 0
- for pattern in self.pass_cases:
- key = re.compile(pattern, flags=re.MULTILINE)
- results = re.search(key, contents)
- if results:
- matches += 1
- else:
- log("Pattern '%s' was expected to pass but instead it failed"
- % (pattern), level=WARNING)
-
- for pattern in self.fail_cases:
- key = re.compile(pattern, flags=re.MULTILINE)
- results = re.search(key, contents)
- if not results:
- matches += 1
- else:
- log("Pattern '%s' was expected to fail but instead it passed"
- % (pattern), level=WARNING)
-
- total = len(self.pass_cases) + len(self.fail_cases)
- log("Checked %s cases and %s passed" % (total, matches), level=DEBUG)
- return matches == total
-
- def comply(self, *args, **kwargs):
- """NOOP since we just issue warnings. This is to avoid the
- NotImplememtedError.
- """
- log("Not applying any compliance criteria, only checks.", level=INFO)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/__init__.py
+++ /dev/null
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml
deleted file mode 100644
index e5ada29..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-# NOTE: this file contains the default configuration for the 'apache' hardening
-# code. If you want to override any settings you must add them to a file
-# called hardening.yaml in the root directory of your charm using the
-# name 'apache' as the root key followed by any of the following with new
-# values.
-
-common:
- apache_dir: '/etc/apache2'
-
-hardening:
- traceenable: 'off'
- allowed_http_methods: "GET POST"
- modules_to_disable: [ cgi, cgid ] \ No newline at end of file
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml.schema b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml.schema
deleted file mode 100644
index 227589b..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml.schema
+++ /dev/null
@@ -1,9 +0,0 @@
-# NOTE: this schema must contain all valid keys from it's associated defaults
-# file. It is used to validate user-provided overrides.
-common:
- apache_dir:
- traceenable:
-
-hardening:
- allowed_http_methods:
- modules_to_disable:
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml
deleted file mode 100644
index 682d22b..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-# NOTE: this file contains the default configuration for the 'mysql' hardening
-# code. If you want to override any settings you must add them to a file
-# called hardening.yaml in the root directory of your charm using the
-# name 'mysql' as the root key followed by any of the following with new
-# values.
-
-hardening:
- mysql-conf: /etc/mysql/my.cnf
- hardening-conf: /etc/mysql/conf.d/hardening.cnf
-
-security:
- # @see http://www.symantec.com/connect/articles/securing-mysql-step-step
- # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_chroot
- chroot: None
-
- # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_safe-user-create
- safe-user-create: 1
-
- # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-auth
- secure-auth: 1
-
- # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_symbolic-links
- skip-symbolic-links: 1
-
- # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_skip-show-database
- skip-show-database: True
-
- # @see http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_local_infile
- local-infile: 0
-
- # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_allow-suspicious-udfs
- allow-suspicious-udfs: 0
-
- # @see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_automatic_sp_privileges
- automatic-sp-privileges: 0
-
- # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-file-priv
- secure-file-priv: /tmp
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema
deleted file mode 100644
index 2edf325..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema
+++ /dev/null
@@ -1,15 +0,0 @@
-# NOTE: this schema must contain all valid keys from it's associated defaults
-# file. It is used to validate user-provided overrides.
-hardening:
- mysql-conf:
- hardening-conf:
-security:
- chroot:
- safe-user-create:
- secure-auth:
- skip-symbolic-links:
- skip-show-database:
- local-infile:
- allow-suspicious-udfs:
- automatic-sp-privileges:
- secure-file-priv:
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml
deleted file mode 100644
index ddd4286..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml
+++ /dev/null
@@ -1,67 +0,0 @@
-# NOTE: this file contains the default configuration for the 'os' hardening
-# code. If you want to override any settings you must add them to a file
-# called hardening.yaml in the root directory of your charm using the
-# name 'os' as the root key followed by any of the following with new
-# values.
-
-general:
- desktop_enable: False # (type:boolean)
-
-environment:
- extra_user_paths: []
- umask: 027
- root_path: /
-
-auth:
- pw_max_age: 60
- # discourage password cycling
- pw_min_age: 7
- retries: 5
- lockout_time: 600
- timeout: 60
- allow_homeless: False # (type:boolean)
- pam_passwdqc_enable: True # (type:boolean)
- pam_passwdqc_options: 'min=disabled,disabled,16,12,8'
- root_ttys:
- console
- tty1
- tty2
- tty3
- tty4
- tty5
- tty6
- uid_min: 1000
- gid_min: 1000
- sys_uid_min: 100
- sys_uid_max: 999
- sys_gid_min: 100
- sys_gid_max: 999
- chfn_restrict:
-
-security:
- users_allow: []
- suid_sgid_enforce: True # (type:boolean)
- # user-defined blacklist and whitelist
- suid_sgid_blacklist: []
- suid_sgid_whitelist: []
- # if this is True, remove any suid/sgid bits from files that were not in the whitelist
- suid_sgid_dry_run_on_unknown: False # (type:boolean)
- suid_sgid_remove_from_unknown: False # (type:boolean)
- # remove packages with known issues
- packages_clean: True # (type:boolean)
- packages_list:
- xinetd
- inetd
- ypserv
- telnet-server
- rsh-server
- rsync
- kernel_enable_module_loading: True # (type:boolean)
- kernel_enable_core_dump: False # (type:boolean)
-
-sysctl:
- kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128
- kernel_enable_sysrq: False # (type:boolean)
- forwarding: False # (type:boolean)
- ipv6_enable: False # (type:boolean)
- arp_restricted: True # (type:boolean)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml.schema b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml.schema
deleted file mode 100644
index 88b3966..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml.schema
+++ /dev/null
@@ -1,42 +0,0 @@
-# NOTE: this schema must contain all valid keys from it's associated defaults
-# file. It is used to validate user-provided overrides.
-general:
- desktop_enable:
-environment:
- extra_user_paths:
- umask:
- root_path:
-auth:
- pw_max_age:
- pw_min_age:
- retries:
- lockout_time:
- timeout:
- allow_homeless:
- pam_passwdqc_enable:
- pam_passwdqc_options:
- root_ttys:
- uid_min:
- gid_min:
- sys_uid_min:
- sys_uid_max:
- sys_gid_min:
- sys_gid_max:
- chfn_restrict:
-security:
- users_allow:
- suid_sgid_enforce:
- suid_sgid_blacklist:
- suid_sgid_whitelist:
- suid_sgid_dry_run_on_unknown:
- suid_sgid_remove_from_unknown:
- packages_clean:
- packages_list:
- kernel_enable_module_loading:
- kernel_enable_core_dump:
-sysctl:
- kernel_secure_sysrq:
- kernel_enable_sysrq:
- forwarding:
- ipv6_enable:
- arp_restricted:
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml
deleted file mode 100644
index cd529bc..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-# NOTE: this file contains the default configuration for the 'ssh' hardening
-# code. If you want to override any settings you must add them to a file
-# called hardening.yaml in the root directory of your charm using the
-# name 'ssh' as the root key followed by any of the following with new
-# values.
-
-common:
- service_name: 'ssh'
- network_ipv6_enable: False # (type:boolean)
- ports: [22]
- remote_hosts: []
-
-client:
- package: 'openssh-client'
- cbc_required: False # (type:boolean)
- weak_hmac: False # (type:boolean)
- weak_kex: False # (type:boolean)
- roaming: False
- password_authentication: 'no'
-
-server:
- host_key_files: ['/etc/ssh/ssh_host_rsa_key', '/etc/ssh/ssh_host_dsa_key',
- '/etc/ssh/ssh_host_ecdsa_key']
- cbc_required: False # (type:boolean)
- weak_hmac: False # (type:boolean)
- weak_kex: False # (type:boolean)
- allow_root_with_key: False # (type:boolean)
- allow_tcp_forwarding: 'no'
- allow_agent_forwarding: 'no'
- allow_x11_forwarding: 'no'
- use_privilege_separation: 'sandbox'
- listen_to: ['0.0.0.0']
- use_pam: 'no'
- package: 'openssh-server'
- password_authentication: 'no'
- alive_interval: '600'
- alive_count: '3'
- sftp_enable: False # (type:boolean)
- sftp_group: 'sftponly'
- sftp_chroot: '/home/%u'
- deny_users: []
- allow_users: []
- deny_groups: []
- allow_groups: []
- print_motd: 'no'
- print_last_log: 'no'
- use_dns: 'no'
- max_auth_tries: 2
- max_sessions: 10
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema
deleted file mode 100644
index d05e054..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema
+++ /dev/null
@@ -1,42 +0,0 @@
-# NOTE: this schema must contain all valid keys from it's associated defaults
-# file. It is used to validate user-provided overrides.
-common:
- service_name:
- network_ipv6_enable:
- ports:
- remote_hosts:
-client:
- package:
- cbc_required:
- weak_hmac:
- weak_kex:
- roaming:
- password_authentication:
-server:
- host_key_files:
- cbc_required:
- weak_hmac:
- weak_kex:
- allow_root_with_key:
- allow_tcp_forwarding:
- allow_agent_forwarding:
- allow_x11_forwarding:
- use_privilege_separation:
- listen_to:
- use_pam:
- package:
- password_authentication:
- alive_interval:
- alive_count:
- sftp_enable:
- sftp_group:
- sftp_chroot:
- deny_users:
- allow_users:
- deny_groups:
- allow_groups:
- print_motd:
- print_last_log:
- use_dns:
- max_auth_tries:
- max_sessions:
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/harden.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/harden.py
deleted file mode 100644
index ac7568d..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/harden.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-
-from collections import OrderedDict
-
-from charmhelpers.core.hookenv import (
- config,
- log,
- DEBUG,
- WARNING,
-)
-from charmhelpers.contrib.hardening.host.checks import run_os_checks
-from charmhelpers.contrib.hardening.ssh.checks import run_ssh_checks
-from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks
-from charmhelpers.contrib.hardening.apache.checks import run_apache_checks
-
-
-def harden(overrides=None):
- """Hardening decorator.
-
- This is the main entry point for running the hardening stack. In order to
- run modules of the stack you must add this decorator to charm hook(s) and
- ensure that your charm config.yaml contains the 'harden' option set to
- one or more of the supported modules. Setting these will cause the
- corresponding hardening code to be run when the hook fires.
-
- This decorator can and should be applied to more than one hook or function
- such that hardening modules are called multiple times. This is because
- subsequent calls will perform auditing checks that will report any changes
- to resources hardened by the first run (and possibly perform compliance
- actions as a result of any detected infractions).
-
- :param overrides: Optional list of stack modules used to override those
- provided with 'harden' config.
- :returns: Returns value returned by decorated function once executed.
- """
- def _harden_inner1(f):
- log("Hardening function '%s'" % (f.__name__), level=DEBUG)
-
- def _harden_inner2(*args, **kwargs):
- RUN_CATALOG = OrderedDict([('os', run_os_checks),
- ('ssh', run_ssh_checks),
- ('mysql', run_mysql_checks),
- ('apache', run_apache_checks)])
-
- enabled = overrides or (config("harden") or "").split()
- if enabled:
- modules_to_run = []
- # modules will always be performed in the following order
- for module, func in six.iteritems(RUN_CATALOG):
- if module in enabled:
- enabled.remove(module)
- modules_to_run.append(func)
-
- if enabled:
- log("Unknown hardening modules '%s' - ignoring" %
- (', '.join(enabled)), level=WARNING)
-
- for hardener in modules_to_run:
- log("Executing hardening module '%s'" %
- (hardener.__name__), level=DEBUG)
- hardener()
- else:
- log("No hardening applied to '%s'" % (f.__name__), level=DEBUG)
-
- return f(*args, **kwargs)
- return _harden_inner2
-
- return _harden_inner1
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/__init__.py
deleted file mode 100644
index 277b8c7..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from os import path
-
-TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/__init__.py
deleted file mode 100644
index c3bd598..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/__init__.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
-)
-from charmhelpers.contrib.hardening.host.checks import (
- apt,
- limits,
- login,
- minimize_access,
- pam,
- profile,
- securetty,
- suid_sgid,
- sysctl
-)
-
-
-def run_os_checks():
- log("Starting OS hardening checks.", level=DEBUG)
- checks = apt.get_audits()
- checks.extend(limits.get_audits())
- checks.extend(login.get_audits())
- checks.extend(minimize_access.get_audits())
- checks.extend(pam.get_audits())
- checks.extend(profile.get_audits())
- checks.extend(securetty.get_audits())
- checks.extend(suid_sgid.get_audits())
- checks.extend(sysctl.get_audits())
-
- for check in checks:
- log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
- check.ensure_compliance()
-
- log("OS hardening checks complete.", level=DEBUG)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/apt.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/apt.py
deleted file mode 100644
index 2c221cd..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/apt.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.contrib.hardening.utils import get_settings
-from charmhelpers.contrib.hardening.audits.apt import (
- AptConfig,
- RestrictedPackages,
-)
-
-
-def get_audits():
- """Get OS hardening apt audits.
-
- :returns: dictionary of audits
- """
- audits = [AptConfig([{'key': 'APT::Get::AllowUnauthenticated',
- 'expected': 'false'}])]
-
- settings = get_settings('os')
- clean_packages = settings['security']['packages_clean']
- if clean_packages:
- security_packages = settings['security']['packages_list']
- if security_packages:
- audits.append(RestrictedPackages(security_packages))
-
- return audits
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/limits.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/limits.py
deleted file mode 100644
index 8ce9dc2..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/limits.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.contrib.hardening.audits.file import (
- DirectoryPermissionAudit,
- TemplatedFile,
-)
-from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
-from charmhelpers.contrib.hardening import utils
-
-
-def get_audits():
- """Get OS hardening security limits audits.
-
- :returns: dictionary of audits
- """
- audits = []
- settings = utils.get_settings('os')
-
- # Ensure that the /etc/security/limits.d directory is only writable
- # by the root user, but others can execute and read.
- audits.append(DirectoryPermissionAudit('/etc/security/limits.d',
- user='root', group='root',
- mode=0o755))
-
- # If core dumps are not enabled, then don't allow core dumps to be
- # created as they may contain sensitive information.
- if not settings['security']['kernel_enable_core_dump']:
- audits.append(TemplatedFile('/etc/security/limits.d/10.hardcore.conf',
- SecurityLimitsContext(),
- template_dir=TEMPLATES_DIR,
- user='root', group='root', mode=0o0440))
- return audits
-
-
-class SecurityLimitsContext(object):
-
- def __call__(self):
- settings = utils.get_settings('os')
- ctxt = {'disable_core_dump':
- not settings['security']['kernel_enable_core_dump']}
- return ctxt
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/login.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/login.py
deleted file mode 100644
index d32c4f6..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/login.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from six import string_types
-
-from charmhelpers.contrib.hardening.audits.file import TemplatedFile
-from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
-from charmhelpers.contrib.hardening import utils
-
-
-def get_audits():
- """Get OS hardening login.defs audits.
-
- :returns: dictionary of audits
- """
- audits = [TemplatedFile('/etc/login.defs', LoginContext(),
- template_dir=TEMPLATES_DIR,
- user='root', group='root', mode=0o0444)]
- return audits
-
-
-class LoginContext(object):
-
- def __call__(self):
- settings = utils.get_settings('os')
-
- # Octal numbers in yaml end up being turned into decimal,
- # so check if the umask is entered as a string (e.g. '027')
- # or as an octal umask as we know it (e.g. 002). If its not
- # a string assume it to be octal and turn it into an octal
- # string.
- umask = settings['environment']['umask']
- if not isinstance(umask, string_types):
- umask = '%s' % oct(umask)
-
- ctxt = {
- 'additional_user_paths':
- settings['environment']['extra_user_paths'],
- 'umask': umask,
- 'pwd_max_age': settings['auth']['pw_max_age'],
- 'pwd_min_age': settings['auth']['pw_min_age'],
- 'uid_min': settings['auth']['uid_min'],
- 'sys_uid_min': settings['auth']['sys_uid_min'],
- 'sys_uid_max': settings['auth']['sys_uid_max'],
- 'gid_min': settings['auth']['gid_min'],
- 'sys_gid_min': settings['auth']['sys_gid_min'],
- 'sys_gid_max': settings['auth']['sys_gid_max'],
- 'login_retries': settings['auth']['retries'],
- 'login_timeout': settings['auth']['timeout'],
- 'chfn_restrict': settings['auth']['chfn_restrict'],
- 'allow_login_without_home': settings['auth']['allow_homeless']
- }
-
- return ctxt
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/minimize_access.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/minimize_access.py
deleted file mode 100644
index c471064..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/minimize_access.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.contrib.hardening.audits.file import (
- FilePermissionAudit,
- ReadOnly,
-)
-from charmhelpers.contrib.hardening import utils
-
-
-def get_audits():
- """Get OS hardening access audits.
-
- :returns: dictionary of audits
- """
- audits = []
- settings = utils.get_settings('os')
-
- # Remove write permissions from $PATH folders for all regular users.
- # This prevents changing system-wide commands from normal users.
- path_folders = {'/usr/local/sbin',
- '/usr/local/bin',
- '/usr/sbin',
- '/usr/bin',
- '/bin'}
- extra_user_paths = settings['environment']['extra_user_paths']
- path_folders.update(extra_user_paths)
- audits.append(ReadOnly(path_folders))
-
- # Only allow the root user to have access to the shadow file.
- audits.append(FilePermissionAudit('/etc/shadow', 'root', 'root', 0o0600))
-
- if 'change_user' not in settings['security']['users_allow']:
- # su should only be accessible to user and group root, unless it is
- # expressly defined to allow users to change to root via the
- # security_users_allow config option.
- audits.append(FilePermissionAudit('/bin/su', 'root', 'root', 0o750))
-
- return audits
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/pam.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/pam.py
deleted file mode 100644
index 383fe28..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/pam.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from subprocess import (
- check_output,
- CalledProcessError,
-)
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- ERROR,
-)
-from charmhelpers.fetch import (
- apt_install,
- apt_purge,
- apt_update,
-)
-from charmhelpers.contrib.hardening.audits.file import (
- TemplatedFile,
- DeletedFile,
-)
-from charmhelpers.contrib.hardening import utils
-from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
-
-
-def get_audits():
- """Get OS hardening PAM authentication audits.
-
- :returns: dictionary of audits
- """
- audits = []
-
- settings = utils.get_settings('os')
-
- if settings['auth']['pam_passwdqc_enable']:
- audits.append(PasswdqcPAM('/etc/passwdqc.conf'))
-
- if settings['auth']['retries']:
- audits.append(Tally2PAM('/usr/share/pam-configs/tally2'))
- else:
- audits.append(DeletedFile('/usr/share/pam-configs/tally2'))
-
- return audits
-
-
-class PasswdqcPAMContext(object):
-
- def __call__(self):
- ctxt = {}
- settings = utils.get_settings('os')
-
- ctxt['auth_pam_passwdqc_options'] = \
- settings['auth']['pam_passwdqc_options']
-
- return ctxt
-
-
-class PasswdqcPAM(TemplatedFile):
- """The PAM Audit verifies the linux PAM settings."""
- def __init__(self, path):
- super(PasswdqcPAM, self).__init__(path=path,
- template_dir=TEMPLATES_DIR,
- context=PasswdqcPAMContext(),
- user='root',
- group='root',
- mode=0o0640)
-
- def pre_write(self):
- # Always remove?
- for pkg in ['libpam-ccreds', 'libpam-cracklib']:
- log("Purging package '%s'" % pkg, level=DEBUG),
- apt_purge(pkg)
-
- apt_update(fatal=True)
- for pkg in ['libpam-passwdqc']:
- log("Installing package '%s'" % pkg, level=DEBUG),
- apt_install(pkg)
-
- def post_write(self):
- """Updates the PAM configuration after the file has been written"""
- try:
- check_output(['pam-auth-update', '--package'])
- except CalledProcessError as e:
- log('Error calling pam-auth-update: %s' % e, level=ERROR)
-
-
-class Tally2PAMContext(object):
-
- def __call__(self):
- ctxt = {}
- settings = utils.get_settings('os')
-
- ctxt['auth_lockout_time'] = settings['auth']['lockout_time']
- ctxt['auth_retries'] = settings['auth']['retries']
-
- return ctxt
-
-
-class Tally2PAM(TemplatedFile):
- """The PAM Audit verifies the linux PAM settings."""
- def __init__(self, path):
- super(Tally2PAM, self).__init__(path=path,
- template_dir=TEMPLATES_DIR,
- context=Tally2PAMContext(),
- user='root',
- group='root',
- mode=0o0640)
-
- def pre_write(self):
- # Always remove?
- apt_purge('libpam-ccreds')
- apt_update(fatal=True)
- apt_install('libpam-modules')
-
- def post_write(self):
- """Updates the PAM configuration after the file has been written"""
- try:
- check_output(['pam-auth-update', '--package'])
- except CalledProcessError as e:
- log('Error calling pam-auth-update: %s' % e, level=ERROR)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/profile.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/profile.py
deleted file mode 100644
index f744335..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/profile.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.contrib.hardening.audits.file import TemplatedFile
-from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
-from charmhelpers.contrib.hardening import utils
-
-
-def get_audits():
- """Get OS hardening profile audits.
-
- :returns: dictionary of audits
- """
- audits = []
-
- settings = utils.get_settings('os')
-
- # If core dumps are not enabled, then don't allow core dumps to be
- # created as they may contain sensitive information.
- if not settings['security']['kernel_enable_core_dump']:
- audits.append(TemplatedFile('/etc/profile.d/pinerolo_profile.sh',
- ProfileContext(),
- template_dir=TEMPLATES_DIR,
- mode=0o0755, user='root', group='root'))
- return audits
-
-
-class ProfileContext(object):
-
- def __call__(self):
- ctxt = {}
- return ctxt
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/securetty.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/securetty.py
deleted file mode 100644
index e33c73c..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/securetty.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.contrib.hardening.audits.file import TemplatedFile
-from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
-from charmhelpers.contrib.hardening import utils
-
-
-def get_audits():
- """Get OS hardening Secure TTY audits.
-
- :returns: dictionary of audits
- """
- audits = []
- audits.append(TemplatedFile('/etc/securetty', SecureTTYContext(),
- template_dir=TEMPLATES_DIR,
- mode=0o0400, user='root', group='root'))
- return audits
-
-
-class SecureTTYContext(object):
-
- def __call__(self):
- settings = utils.get_settings('os')
- ctxt = {'ttys': settings['auth']['root_ttys']}
- return ctxt
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/suid_sgid.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/suid_sgid.py
deleted file mode 100644
index 0534689..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/suid_sgid.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import subprocess
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
-)
-from charmhelpers.contrib.hardening.audits.file import NoSUIDSGIDAudit
-from charmhelpers.contrib.hardening import utils
-
-
-BLACKLIST = ['/usr/bin/rcp', '/usr/bin/rlogin', '/usr/bin/rsh',
- '/usr/libexec/openssh/ssh-keysign',
- '/usr/lib/openssh/ssh-keysign',
- '/sbin/netreport',
- '/usr/sbin/usernetctl',
- '/usr/sbin/userisdnctl',
- '/usr/sbin/pppd',
- '/usr/bin/lockfile',
- '/usr/bin/mail-lock',
- '/usr/bin/mail-unlock',
- '/usr/bin/mail-touchlock',
- '/usr/bin/dotlockfile',
- '/usr/bin/arping',
- '/usr/sbin/uuidd',
- '/usr/bin/mtr',
- '/usr/lib/evolution/camel-lock-helper-1.2',
- '/usr/lib/pt_chown',
- '/usr/lib/eject/dmcrypt-get-device',
- '/usr/lib/mc/cons.saver']
-
-WHITELIST = ['/bin/mount', '/bin/ping', '/bin/su', '/bin/umount',
- '/sbin/pam_timestamp_check', '/sbin/unix_chkpwd', '/usr/bin/at',
- '/usr/bin/gpasswd', '/usr/bin/locate', '/usr/bin/newgrp',
- '/usr/bin/passwd', '/usr/bin/ssh-agent',
- '/usr/libexec/utempter/utempter', '/usr/sbin/lockdev',
- '/usr/sbin/sendmail.sendmail', '/usr/bin/expiry',
- '/bin/ping6', '/usr/bin/traceroute6.iputils',
- '/sbin/mount.nfs', '/sbin/umount.nfs',
- '/sbin/mount.nfs4', '/sbin/umount.nfs4',
- '/usr/bin/crontab',
- '/usr/bin/wall', '/usr/bin/write',
- '/usr/bin/screen',
- '/usr/bin/mlocate',
- '/usr/bin/chage', '/usr/bin/chfn', '/usr/bin/chsh',
- '/bin/fusermount',
- '/usr/bin/pkexec',
- '/usr/bin/sudo', '/usr/bin/sudoedit',
- '/usr/sbin/postdrop', '/usr/sbin/postqueue',
- '/usr/sbin/suexec',
- '/usr/lib/squid/ncsa_auth', '/usr/lib/squid/pam_auth',
- '/usr/kerberos/bin/ksu',
- '/usr/sbin/ccreds_validate',
- '/usr/bin/Xorg',
- '/usr/bin/X',
- '/usr/lib/dbus-1.0/dbus-daemon-launch-helper',
- '/usr/lib/vte/gnome-pty-helper',
- '/usr/lib/libvte9/gnome-pty-helper',
- '/usr/lib/libvte-2.90-9/gnome-pty-helper']
-
-
-def get_audits():
- """Get OS hardening suid/sgid audits.
-
- :returns: dictionary of audits
- """
- checks = []
- settings = utils.get_settings('os')
- if not settings['security']['suid_sgid_enforce']:
- log("Skipping suid/sgid hardening", level=INFO)
- return checks
-
- # Build the blacklist and whitelist of files for suid/sgid checks.
- # There are a total of 4 lists:
- # 1. the system blacklist
- # 2. the system whitelist
- # 3. the user blacklist
- # 4. the user whitelist
- #
- # The blacklist is the set of paths which should NOT have the suid/sgid bit
- # set and the whitelist is the set of paths which MAY have the suid/sgid
- # bit setl. The user whitelist/blacklist effectively override the system
- # whitelist/blacklist.
- u_b = settings['security']['suid_sgid_blacklist']
- u_w = settings['security']['suid_sgid_whitelist']
-
- blacklist = set(BLACKLIST) - set(u_w + u_b)
- whitelist = set(WHITELIST) - set(u_b + u_w)
-
- checks.append(NoSUIDSGIDAudit(blacklist))
-
- dry_run = settings['security']['suid_sgid_dry_run_on_unknown']
-
- if settings['security']['suid_sgid_remove_from_unknown'] or dry_run:
- # If the policy is a dry_run (e.g. complain only) or remove unknown
- # suid/sgid bits then find all of the paths which have the suid/sgid
- # bit set and then remove the whitelisted paths.
- root_path = settings['environment']['root_path']
- unknown_paths = find_paths_with_suid_sgid(root_path) - set(whitelist)
- checks.append(NoSUIDSGIDAudit(unknown_paths, unless=dry_run))
-
- return checks
-
-
-def find_paths_with_suid_sgid(root_path):
- """Finds all paths/files which have an suid/sgid bit enabled.
-
- Starting with the root_path, this will recursively find all paths which
- have an suid or sgid bit set.
- """
- cmd = ['find', root_path, '-perm', '-4000', '-o', '-perm', '-2000',
- '-type', 'f', '!', '-path', '/proc/*', '-print']
-
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- out, _ = p.communicate()
- return set(out.split('\n'))
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/sysctl.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/sysctl.py
deleted file mode 100644
index 4a76d74..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/sysctl.py
+++ /dev/null
@@ -1,211 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import platform
-import re
-import six
-import subprocess
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
- WARNING,
-)
-from charmhelpers.contrib.hardening import utils
-from charmhelpers.contrib.hardening.audits.file import (
- FilePermissionAudit,
- TemplatedFile,
-)
-from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
-
-
-SYSCTL_DEFAULTS = """net.ipv4.ip_forward=%(net_ipv4_ip_forward)s
-net.ipv6.conf.all.forwarding=%(net_ipv6_conf_all_forwarding)s
-net.ipv4.conf.all.rp_filter=1
-net.ipv4.conf.default.rp_filter=1
-net.ipv4.icmp_echo_ignore_broadcasts=1
-net.ipv4.icmp_ignore_bogus_error_responses=1
-net.ipv4.icmp_ratelimit=100
-net.ipv4.icmp_ratemask=88089
-net.ipv6.conf.all.disable_ipv6=%(net_ipv6_conf_all_disable_ipv6)s
-net.ipv4.tcp_timestamps=%(net_ipv4_tcp_timestamps)s
-net.ipv4.conf.all.arp_ignore=%(net_ipv4_conf_all_arp_ignore)s
-net.ipv4.conf.all.arp_announce=%(net_ipv4_conf_all_arp_announce)s
-net.ipv4.tcp_rfc1337=1
-net.ipv4.tcp_syncookies=1
-net.ipv4.conf.all.shared_media=1
-net.ipv4.conf.default.shared_media=1
-net.ipv4.conf.all.accept_source_route=0
-net.ipv4.conf.default.accept_source_route=0
-net.ipv4.conf.all.accept_redirects=0
-net.ipv4.conf.default.accept_redirects=0
-net.ipv6.conf.all.accept_redirects=0
-net.ipv6.conf.default.accept_redirects=0
-net.ipv4.conf.all.secure_redirects=0
-net.ipv4.conf.default.secure_redirects=0
-net.ipv4.conf.all.send_redirects=0
-net.ipv4.conf.default.send_redirects=0
-net.ipv4.conf.all.log_martians=0
-net.ipv6.conf.default.router_solicitations=0
-net.ipv6.conf.default.accept_ra_rtr_pref=0
-net.ipv6.conf.default.accept_ra_pinfo=0
-net.ipv6.conf.default.accept_ra_defrtr=0
-net.ipv6.conf.default.autoconf=0
-net.ipv6.conf.default.dad_transmits=0
-net.ipv6.conf.default.max_addresses=1
-net.ipv6.conf.all.accept_ra=0
-net.ipv6.conf.default.accept_ra=0
-kernel.modules_disabled=%(kernel_modules_disabled)s
-kernel.sysrq=%(kernel_sysrq)s
-fs.suid_dumpable=%(fs_suid_dumpable)s
-kernel.randomize_va_space=2
-"""
-
-
-def get_audits():
- """Get OS hardening sysctl audits.
-
- :returns: dictionary of audits
- """
- audits = []
- settings = utils.get_settings('os')
-
- # Apply the sysctl settings which are configured to be applied.
- audits.append(SysctlConf())
- # Make sure that only root has access to the sysctl.conf file, and
- # that it is read-only.
- audits.append(FilePermissionAudit('/etc/sysctl.conf',
- user='root',
- group='root', mode=0o0440))
- # If module loading is not enabled, then ensure that the modules
- # file has the appropriate permissions and rebuild the initramfs
- if not settings['security']['kernel_enable_module_loading']:
- audits.append(ModulesTemplate())
-
- return audits
-
-
-class ModulesContext(object):
-
- def __call__(self):
- settings = utils.get_settings('os')
- with open('/proc/cpuinfo', 'r') as fd:
- cpuinfo = fd.readlines()
-
- for line in cpuinfo:
- match = re.search(r"^vendor_id\s+:\s+(.+)", line)
- if match:
- vendor = match.group(1)
-
- if vendor == "GenuineIntel":
- vendor = "intel"
- elif vendor == "AuthenticAMD":
- vendor = "amd"
-
- ctxt = {'arch': platform.processor(),
- 'cpuVendor': vendor,
- 'desktop_enable': settings['general']['desktop_enable']}
-
- return ctxt
-
-
-class ModulesTemplate(object):
-
- def __init__(self):
- super(ModulesTemplate, self).__init__('/etc/initramfs-tools/modules',
- ModulesContext(),
- templates_dir=TEMPLATES_DIR,
- user='root', group='root',
- mode=0o0440)
-
- def post_write(self):
- subprocess.check_call(['update-initramfs', '-u'])
-
-
-class SysCtlHardeningContext(object):
- def __call__(self):
- settings = utils.get_settings('os')
- ctxt = {'sysctl': {}}
-
- log("Applying sysctl settings", level=INFO)
- extras = {'net_ipv4_ip_forward': 0,
- 'net_ipv6_conf_all_forwarding': 0,
- 'net_ipv6_conf_all_disable_ipv6': 1,
- 'net_ipv4_tcp_timestamps': 0,
- 'net_ipv4_conf_all_arp_ignore': 0,
- 'net_ipv4_conf_all_arp_announce': 0,
- 'kernel_sysrq': 0,
- 'fs_suid_dumpable': 0,
- 'kernel_modules_disabled': 1}
-
- if settings['sysctl']['ipv6_enable']:
- extras['net_ipv6_conf_all_disable_ipv6'] = 0
-
- if settings['sysctl']['forwarding']:
- extras['net_ipv4_ip_forward'] = 1
- extras['net_ipv6_conf_all_forwarding'] = 1
-
- if settings['sysctl']['arp_restricted']:
- extras['net_ipv4_conf_all_arp_ignore'] = 1
- extras['net_ipv4_conf_all_arp_announce'] = 2
-
- if settings['security']['kernel_enable_module_loading']:
- extras['kernel_modules_disabled'] = 0
-
- if settings['sysctl']['kernel_enable_sysrq']:
- sysrq_val = settings['sysctl']['kernel_secure_sysrq']
- extras['kernel_sysrq'] = sysrq_val
-
- if settings['security']['kernel_enable_core_dump']:
- extras['fs_suid_dumpable'] = 1
-
- settings.update(extras)
- for d in (SYSCTL_DEFAULTS % settings).split():
- d = d.strip().partition('=')
- key = d[0].strip()
- path = os.path.join('/proc/sys', key.replace('.', '/'))
- if not os.path.exists(path):
- log("Skipping '%s' since '%s' does not exist" % (key, path),
- level=WARNING)
- continue
-
- ctxt['sysctl'][key] = d[2] or None
-
- # Translate for python3
- return {'sysctl_settings':
- [(k, v) for k, v in six.iteritems(ctxt['sysctl'])]}
-
-
-class SysctlConf(TemplatedFile):
- """An audit check for sysctl settings."""
- def __init__(self):
- self.conffile = '/etc/sysctl.d/99-juju-hardening.conf'
- super(SysctlConf, self).__init__(self.conffile,
- SysCtlHardeningContext(),
- template_dir=TEMPLATES_DIR,
- user='root', group='root',
- mode=0o0440)
-
- def post_write(self):
- try:
- subprocess.check_call(['sysctl', '-p', self.conffile])
- except subprocess.CalledProcessError as e:
- # NOTE: on some systems if sysctl cannot apply all settings it
- # will return non-zero as well.
- log("sysctl command returned an error (maybe some "
- "keys could not be set) - %s" % (e),
- level=WARNING)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf
deleted file mode 100644
index 0014191..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-{% if disable_core_dump -%}
-# Prevent core dumps for all users. These are usually only needed by developers and may contain sensitive information.
-* hard core 0
-{% endif %} \ No newline at end of file
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf
deleted file mode 100644
index 101f1e1..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-{% for key, value in sysctl_settings -%}
-{{ key }}={{ value }}
-{% endfor -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/__init__.py
+++ /dev/null
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/login.defs b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/login.defs
deleted file mode 100644
index db137d6..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/login.defs
+++ /dev/null
@@ -1,349 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-#
-# /etc/login.defs - Configuration control definitions for the login package.
-#
-# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH.
-# If unspecified, some arbitrary (and possibly incorrect) value will
-# be assumed. All other items are optional - if not specified then
-# the described action or option will be inhibited.
-#
-# Comment lines (lines beginning with "#") and blank lines are ignored.
-#
-# Modified for Linux. --marekm
-
-# REQUIRED for useradd/userdel/usermod
-# Directory where mailboxes reside, _or_ name of file, relative to the
-# home directory. If you _do_ define MAIL_DIR and MAIL_FILE,
-# MAIL_DIR takes precedence.
-#
-# Essentially:
-# - MAIL_DIR defines the location of users mail spool files
-# (for mbox use) by appending the username to MAIL_DIR as defined
-# below.
-# - MAIL_FILE defines the location of the users mail spool files as the
-# fully-qualified filename obtained by prepending the user home
-# directory before $MAIL_FILE
-#
-# NOTE: This is no more used for setting up users MAIL environment variable
-# which is, starting from shadow 4.0.12-1 in Debian, entirely the
-# job of the pam_mail PAM modules
-# See default PAM configuration files provided for
-# login, su, etc.
-#
-# This is a temporary situation: setting these variables will soon
-# move to /etc/default/useradd and the variables will then be
-# no more supported
-MAIL_DIR /var/mail
-#MAIL_FILE .mail
-
-#
-# Enable logging and display of /var/log/faillog login failure info.
-# This option conflicts with the pam_tally PAM module.
-#
-FAILLOG_ENAB yes
-
-#
-# Enable display of unknown usernames when login failures are recorded.
-#
-# WARNING: Unknown usernames may become world readable.
-# See #290803 and #298773 for details about how this could become a security
-# concern
-LOG_UNKFAIL_ENAB no
-
-#
-# Enable logging of successful logins
-#
-LOG_OK_LOGINS yes
-
-#
-# Enable "syslog" logging of su activity - in addition to sulog file logging.
-# SYSLOG_SG_ENAB does the same for newgrp and sg.
-#
-SYSLOG_SU_ENAB yes
-SYSLOG_SG_ENAB yes
-
-#
-# If defined, all su activity is logged to this file.
-#
-#SULOG_FILE /var/log/sulog
-
-#
-# If defined, file which maps tty line to TERM environment parameter.
-# Each line of the file is in a format something like "vt100 tty01".
-#
-#TTYTYPE_FILE /etc/ttytype
-
-#
-# If defined, login failures will be logged here in a utmp format
-# last, when invoked as lastb, will read /var/log/btmp, so...
-#
-FTMP_FILE /var/log/btmp
-
-#
-# If defined, the command name to display when running "su -". For
-# example, if this is defined as "su" then a "ps" will display the
-# command is "-su". If not defined, then "ps" would display the
-# name of the shell actually being run, e.g. something like "-sh".
-#
-SU_NAME su
-
-#
-# If defined, file which inhibits all the usual chatter during the login
-# sequence. If a full pathname, then hushed mode will be enabled if the
-# user's name or shell are found in the file. If not a full pathname, then
-# hushed mode will be enabled if the file exists in the user's home directory.
-#
-HUSHLOGIN_FILE .hushlogin
-#HUSHLOGIN_FILE /etc/hushlogins
-
-#
-# *REQUIRED* The default PATH settings, for superuser and normal users.
-#
-# (they are minimal, add the rest in the shell startup files)
-ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
-ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin{% if additional_user_paths %}{{ additional_user_paths }}{% endif %}
-
-#
-# Terminal permissions
-#
-# TTYGROUP Login tty will be assigned this group ownership.
-# TTYPERM Login tty will be set to this permission.
-#
-# If you have a "write" program which is "setgid" to a special group
-# which owns the terminals, define TTYGROUP to the group number and
-# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign
-# TTYPERM to either 622 or 600.
-#
-# In Debian /usr/bin/bsd-write or similar programs are setgid tty
-# However, the default and recommended value for TTYPERM is still 0600
-# to not allow anyone to write to anyone else console or terminal
-
-# Users can still allow other people to write them by issuing
-# the "mesg y" command.
-
-TTYGROUP tty
-TTYPERM 0600
-
-#
-# Login configuration initializations:
-#
-# ERASECHAR Terminal ERASE character ('\010' = backspace).
-# KILLCHAR Terminal KILL character ('\025' = CTRL/U).
-# UMASK Default "umask" value.
-#
-# The ERASECHAR and KILLCHAR are used only on System V machines.
-#
-# UMASK is the default umask value for pam_umask and is used by
-# useradd and newusers to set the mode of the new home directories.
-# 022 is the "historical" value in Debian for UMASK
-# 027, or even 077, could be considered better for privacy
-# There is no One True Answer here : each sysadmin must make up his/her
-# mind.
-#
-# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value
-# for private user groups, i. e. the uid is the same as gid, and username is
-# the same as the primary group name: for these, the user permissions will be
-# used as group permissions, e. g. 022 will become 002.
-#
-# Prefix these values with "0" to get octal, "0x" to get hexadecimal.
-#
-ERASECHAR 0177
-KILLCHAR 025
-UMASK {{ umask }}
-
-# Enable setting of the umask group bits to be the same as owner bits (examples: `022` -> `002`, `077` -> `007`) for non-root users, if the uid is the same as gid, and username is the same as the primary group name.
-# If set to yes, userdel will remove the user´s group if it contains no more members, and useradd will create by default a group with the name of the user.
-USERGROUPS_ENAB yes
-
-#
-# Password aging controls:
-#
-# PASS_MAX_DAYS Maximum number of days a password may be used.
-# PASS_MIN_DAYS Minimum number of days allowed between password changes.
-# PASS_WARN_AGE Number of days warning given before a password expires.
-#
-PASS_MAX_DAYS {{ pwd_max_age }}
-PASS_MIN_DAYS {{ pwd_min_age }}
-PASS_WARN_AGE 7
-
-#
-# Min/max values for automatic uid selection in useradd
-#
-UID_MIN {{ uid_min }}
-UID_MAX 60000
-# System accounts
-SYS_UID_MIN {{ sys_uid_min }}
-SYS_UID_MAX {{ sys_uid_max }}
-
-# Min/max values for automatic gid selection in groupadd
-GID_MIN {{ gid_min }}
-GID_MAX 60000
-# System accounts
-SYS_GID_MIN {{ sys_gid_min }}
-SYS_GID_MAX {{ sys_gid_max }}
-
-#
-# Max number of login retries if password is bad. This will most likely be
-# overriden by PAM, since the default pam_unix module has it's own built
-# in of 3 retries. However, this is a safe fallback in case you are using
-# an authentication module that does not enforce PAM_MAXTRIES.
-#
-LOGIN_RETRIES {{ login_retries }}
-
-#
-# Max time in seconds for login
-#
-LOGIN_TIMEOUT {{ login_timeout }}
-
-#
-# Which fields may be changed by regular users using chfn - use
-# any combination of letters "frwh" (full name, room number, work
-# phone, home phone). If not defined, no changes are allowed.
-# For backward compatibility, "yes" = "rwh" and "no" = "frwh".
-#
-{% if chfn_restrict %}
-CHFN_RESTRICT {{ chfn_restrict }}
-{% endif %}
-
-#
-# Should login be allowed if we can't cd to the home directory?
-# Default in no.
-#
-DEFAULT_HOME {% if allow_login_without_home %} yes {% else %} no {% endif %}
-
-#
-# If defined, this command is run when removing a user.
-# It should remove any at/cron/print jobs etc. owned by
-# the user to be removed (passed as the first argument).
-#
-#USERDEL_CMD /usr/sbin/userdel_local
-
-#
-# Enable setting of the umask group bits to be the same as owner bits
-# (examples: 022 -> 002, 077 -> 007) for non-root users, if the uid is
-# the same as gid, and username is the same as the primary group name.
-#
-# If set to yes, userdel will remove the user´s group if it contains no
-# more members, and useradd will create by default a group with the name
-# of the user.
-#
-USERGROUPS_ENAB yes
-
-#
-# Instead of the real user shell, the program specified by this parameter
-# will be launched, although its visible name (argv[0]) will be the shell's.
-# The program may do whatever it wants (logging, additional authentification,
-# banner, ...) before running the actual shell.
-#
-# FAKE_SHELL /bin/fakeshell
-
-#
-# If defined, either full pathname of a file containing device names or
-# a ":" delimited list of device names. Root logins will be allowed only
-# upon these devices.
-#
-# This variable is used by login and su.
-#
-#CONSOLE /etc/consoles
-#CONSOLE console:tty01:tty02:tty03:tty04
-
-#
-# List of groups to add to the user's supplementary group set
-# when logging in on the console (as determined by the CONSOLE
-# setting). Default is none.
-#
-# Use with caution - it is possible for users to gain permanent
-# access to these groups, even when not logged in on the console.
-# How to do it is left as an exercise for the reader...
-#
-# This variable is used by login and su.
-#
-#CONSOLE_GROUPS floppy:audio:cdrom
-
-#
-# If set to "yes", new passwords will be encrypted using the MD5-based
-# algorithm compatible with the one used by recent releases of FreeBSD.
-# It supports passwords of unlimited length and longer salt strings.
-# Set to "no" if you need to copy encrypted passwords to other systems
-# which don't understand the new algorithm. Default is "no".
-#
-# This variable is deprecated. You should use ENCRYPT_METHOD.
-#
-MD5_CRYPT_ENAB no
-
-#
-# If set to MD5 , MD5-based algorithm will be used for encrypting password
-# If set to SHA256, SHA256-based algorithm will be used for encrypting password
-# If set to SHA512, SHA512-based algorithm will be used for encrypting password
-# If set to DES, DES-based algorithm will be used for encrypting password (default)
-# Overrides the MD5_CRYPT_ENAB option
-#
-# Note: It is recommended to use a value consistent with
-# the PAM modules configuration.
-#
-ENCRYPT_METHOD SHA512
-
-#
-# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512.
-#
-# Define the number of SHA rounds.
-# With a lot of rounds, it is more difficult to brute forcing the password.
-# But note also that it more CPU resources will be needed to authenticate
-# users.
-#
-# If not specified, the libc will choose the default number of rounds (5000).
-# The values must be inside the 1000-999999999 range.
-# If only one of the MIN or MAX values is set, then this value will be used.
-# If MIN > MAX, the highest value will be used.
-#
-# SHA_CRYPT_MIN_ROUNDS 5000
-# SHA_CRYPT_MAX_ROUNDS 5000
-
-################# OBSOLETED BY PAM ##############
-# #
-# These options are now handled by PAM. Please #
-# edit the appropriate file in /etc/pam.d/ to #
-# enable the equivelants of them.
-#
-###############
-
-#MOTD_FILE
-#DIALUPS_CHECK_ENAB
-#LASTLOG_ENAB
-#MAIL_CHECK_ENAB
-#OBSCURE_CHECKS_ENAB
-#PORTTIME_CHECKS_ENAB
-#SU_WHEEL_ONLY
-#CRACKLIB_DICTPATH
-#PASS_CHANGE_TRIES
-#PASS_ALWAYS_WARN
-#ENVIRON_FILE
-#NOLOGINS_FILE
-#ISSUE_FILE
-#PASS_MIN_LEN
-#PASS_MAX_LEN
-#ULIMIT
-#ENV_HZ
-#CHFN_AUTH
-#CHSH_AUTH
-#FAIL_DELAY
-
-################# OBSOLETED #######################
-# #
-# These options are no more handled by shadow. #
-# #
-# Shadow utilities will display a warning if they #
-# still appear. #
-# #
-###################################################
-
-# CLOSE_SESSIONS
-# LOGIN_STRING
-# NO_PASSWORD_CONSOLE
-# QMAIL_DIR
-
-
-
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/modules b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/modules
deleted file mode 100644
index ef0354e..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/modules
+++ /dev/null
@@ -1,117 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-# /etc/modules: kernel modules to load at boot time.
-#
-# This file contains the names of kernel modules that should be loaded
-# at boot time, one per line. Lines beginning with "#" are ignored.
-# Parameters can be specified after the module name.
-
-# Arch
-# ----
-#
-# Modules for certains builds, contains support modules and some CPU-specific optimizations.
-
-{% if arch == "x86_64" -%}
-# Optimize for x86_64 cryptographic features
-twofish-x86_64-3way
-twofish-x86_64
-aes-x86_64
-salsa20-x86_64
-blowfish-x86_64
-{% endif -%}
-
-{% if cpuVendor == "intel" -%}
-# Intel-specific optimizations
-ghash-clmulni-intel
-aesni-intel
-kvm-intel
-{% endif -%}
-
-{% if cpuVendor == "amd" -%}
-# AMD-specific optimizations
-kvm-amd
-{% endif -%}
-
-kvm
-
-
-# Crypto
-# ------
-
-# Some core modules which comprise strong cryptography.
-blowfish_common
-blowfish_generic
-ctr
-cts
-lrw
-lzo
-rmd160
-rmd256
-rmd320
-serpent
-sha512_generic
-twofish_common
-twofish_generic
-xts
-zlib
-
-
-# Drivers
-# -------
-
-# Basics
-lp
-rtc
-loop
-
-# Filesystems
-ext2
-btrfs
-
-{% if desktop_enable -%}
-# Desktop
-psmouse
-snd
-snd_ac97_codec
-snd_intel8x0
-snd_page_alloc
-snd_pcm
-snd_timer
-soundcore
-usbhid
-{% endif -%}
-
-# Lib
-# ---
-xz
-
-
-# Net
-# ---
-
-# All packets needed for netfilter rules (ie iptables, ebtables).
-ip_tables
-x_tables
-iptable_filter
-iptable_nat
-
-# Targets
-ipt_LOG
-ipt_REJECT
-
-# Modules
-xt_connlimit
-xt_tcpudp
-xt_recent
-xt_limit
-xt_conntrack
-nf_conntrack
-nf_conntrack_ipv4
-nf_defrag_ipv4
-xt_state
-nf_nat
-
-# Addons
-xt_pknock \ No newline at end of file
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/passwdqc.conf b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/passwdqc.conf
deleted file mode 100644
index f98d14e..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/passwdqc.conf
+++ /dev/null
@@ -1,11 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-Name: passwdqc password strength enforcement
-Default: yes
-Priority: 1024
-Conflicts: cracklib
-Password-Type: Primary
-Password:
- requisite pam_passwdqc.so {{ auth_pam_passwdqc_options }}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh
deleted file mode 100644
index fd2de79..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-# Disable core dumps via soft limits for all users. Compliance to this setting
-# is voluntary and can be modified by users up to a hard limit. This setting is
-# a sane default.
-ulimit -S -c 0 > /dev/null 2>&1
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/securetty b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/securetty
deleted file mode 100644
index 15b18d4..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/securetty
+++ /dev/null
@@ -1,11 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-# A list of TTYs, from which root can log in
-# see `man securetty` for reference
-{% if ttys -%}
-{% for tty in ttys -%}
-{{ tty }}
-{% endfor -%}
-{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/tally2 b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/tally2
deleted file mode 100644
index d962029..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/tally2
+++ /dev/null
@@ -1,14 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-Name: tally2 lockout after failed attempts enforcement
-Default: yes
-Priority: 1024
-Conflicts: cracklib
-Auth-Type: Primary
-Auth-Initial:
- required pam_tally2.so deny={{ auth_retries }} onerr=fail unlock_time={{ auth_lockout_time }}
-Account-Type: Primary
-Account-Initial:
- required pam_tally2.so
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/__init__.py
deleted file mode 100644
index 277b8c7..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from os import path
-
-TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/__init__.py
deleted file mode 100644
index d4f0ec1..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/__init__.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
-)
-from charmhelpers.contrib.hardening.mysql.checks import config
-
-
-def run_mysql_checks():
- log("Starting MySQL hardening checks.", level=DEBUG)
- checks = config.get_audits()
- for check in checks:
- log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
- check.ensure_compliance()
-
- log("MySQL hardening checks complete.", level=DEBUG)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/config.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/config.py
deleted file mode 100644
index 3af8b89..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/config.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-import subprocess
-
-from charmhelpers.core.hookenv import (
- log,
- WARNING,
-)
-from charmhelpers.contrib.hardening.audits.file import (
- FilePermissionAudit,
- DirectoryPermissionAudit,
- TemplatedFile,
-)
-from charmhelpers.contrib.hardening.mysql import TEMPLATES_DIR
-from charmhelpers.contrib.hardening import utils
-
-
-def get_audits():
- """Get MySQL hardening config audits.
-
- :returns: dictionary of audits
- """
- if subprocess.call(['which', 'mysql'], stdout=subprocess.PIPE) != 0:
- log("MySQL does not appear to be installed on this node - "
- "skipping mysql hardening", level=WARNING)
- return []
-
- settings = utils.get_settings('mysql')
- hardening_settings = settings['hardening']
- my_cnf = hardening_settings['mysql-conf']
-
- audits = [
- FilePermissionAudit(paths=[my_cnf], user='root',
- group='root', mode=0o0600),
-
- TemplatedFile(hardening_settings['hardening-conf'],
- MySQLConfContext(),
- TEMPLATES_DIR,
- mode=0o0750,
- user='mysql',
- group='root',
- service_actions=[{'service': 'mysql',
- 'actions': ['restart']}]),
-
- # MySQL and Percona charms do not allow configuration of the
- # data directory, so use the default.
- DirectoryPermissionAudit('/var/lib/mysql',
- user='mysql',
- group='mysql',
- recursive=False,
- mode=0o755),
-
- DirectoryPermissionAudit('/etc/mysql',
- user='root',
- group='root',
- recursive=False,
- mode=0o700),
- ]
-
- return audits
-
-
-class MySQLConfContext(object):
- """Defines the set of key/value pairs to set in a mysql config file.
-
- This context, when called, will return a dictionary containing the
- key/value pairs of setting to specify in the
- /etc/mysql/conf.d/hardening.cnf file.
- """
- def __call__(self):
- settings = utils.get_settings('mysql')
- # Translate for python3
- return {'mysql_settings':
- [(k, v) for k, v in six.iteritems(settings['security'])]}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/__init__.py
+++ /dev/null
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf
deleted file mode 100644
index 8242586..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf
+++ /dev/null
@@ -1,12 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-[mysqld]
-{% for setting, value in mysql_settings -%}
-{% if value == 'True' -%}
-{{ setting }}
-{% elif value != 'None' and value != None -%}
-{{ setting }} = {{ value }}
-{% endif -%}
-{% endfor -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/__init__.py
deleted file mode 100644
index 277b8c7..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from os import path
-
-TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/__init__.py
deleted file mode 100644
index b85150d..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/__init__.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
-)
-from charmhelpers.contrib.hardening.ssh.checks import config
-
-
-def run_ssh_checks():
- log("Starting SSH hardening checks.", level=DEBUG)
- checks = config.get_audits()
- for check in checks:
- log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
- check.ensure_compliance()
-
- log("SSH hardening checks complete.", level=DEBUG)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/config.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/config.py
deleted file mode 100644
index 3fb6ae8..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/config.py
+++ /dev/null
@@ -1,394 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
-)
-from charmhelpers.fetch import (
- apt_install,
- apt_update,
-)
-from charmhelpers.core.host import lsb_release
-from charmhelpers.contrib.hardening.audits.file import (
- TemplatedFile,
- FileContentAudit,
-)
-from charmhelpers.contrib.hardening.ssh import TEMPLATES_DIR
-from charmhelpers.contrib.hardening import utils
-
-
-def get_audits():
- """Get SSH hardening config audits.
-
- :returns: dictionary of audits
- """
- audits = [SSHConfig(), SSHDConfig(), SSHConfigFileContentAudit(),
- SSHDConfigFileContentAudit()]
- return audits
-
-
-class SSHConfigContext(object):
-
- type = 'client'
-
- def get_macs(self, allow_weak_mac):
- if allow_weak_mac:
- weak_macs = 'weak'
- else:
- weak_macs = 'default'
-
- default = 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160'
- macs = {'default': default,
- 'weak': default + ',hmac-sha1'}
-
- default = ('hmac-sha2-512-etm@openssh.com,'
- 'hmac-sha2-256-etm@openssh.com,'
- 'hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,'
- 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160')
- macs_66 = {'default': default,
- 'weak': default + ',hmac-sha1'}
-
- # Use newer ciphers on Ubuntu Trusty and above
- if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
- log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG)
- macs = macs_66
-
- return macs[weak_macs]
-
- def get_kexs(self, allow_weak_kex):
- if allow_weak_kex:
- weak_kex = 'weak'
- else:
- weak_kex = 'default'
-
- default = 'diffie-hellman-group-exchange-sha256'
- weak = (default + ',diffie-hellman-group14-sha1,'
- 'diffie-hellman-group-exchange-sha1,'
- 'diffie-hellman-group1-sha1')
- kex = {'default': default,
- 'weak': weak}
-
- default = ('curve25519-sha256@libssh.org,'
- 'diffie-hellman-group-exchange-sha256')
- weak = (default + ',diffie-hellman-group14-sha1,'
- 'diffie-hellman-group-exchange-sha1,'
- 'diffie-hellman-group1-sha1')
- kex_66 = {'default': default,
- 'weak': weak}
-
- # Use newer kex on Ubuntu Trusty and above
- if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
- log('Detected Ubuntu 14.04 or newer, using new key exchange '
- 'algorithms', level=DEBUG)
- kex = kex_66
-
- return kex[weak_kex]
-
- def get_ciphers(self, cbc_required):
- if cbc_required:
- weak_ciphers = 'weak'
- else:
- weak_ciphers = 'default'
-
- default = 'aes256-ctr,aes192-ctr,aes128-ctr'
- cipher = {'default': default,
- 'weak': default + 'aes256-cbc,aes192-cbc,aes128-cbc'}
-
- default = ('chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,'
- 'aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr')
- ciphers_66 = {'default': default,
- 'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'}
-
- # Use newer ciphers on ubuntu Trusty and above
- if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
- log('Detected Ubuntu 14.04 or newer, using new ciphers',
- level=DEBUG)
- cipher = ciphers_66
-
- return cipher[weak_ciphers]
-
- def __call__(self):
- settings = utils.get_settings('ssh')
- if settings['common']['network_ipv6_enable']:
- addr_family = 'any'
- else:
- addr_family = 'inet'
-
- ctxt = {
- 'addr_family': addr_family,
- 'remote_hosts': settings['common']['remote_hosts'],
- 'password_auth_allowed':
- settings['client']['password_authentication'],
- 'ports': settings['common']['ports'],
- 'ciphers': self.get_ciphers(settings['client']['cbc_required']),
- 'macs': self.get_macs(settings['client']['weak_hmac']),
- 'kexs': self.get_kexs(settings['client']['weak_kex']),
- 'roaming': settings['client']['roaming'],
- }
- return ctxt
-
-
-class SSHConfig(TemplatedFile):
- def __init__(self):
- path = '/etc/ssh/ssh_config'
- super(SSHConfig, self).__init__(path=path,
- template_dir=TEMPLATES_DIR,
- context=SSHConfigContext(),
- user='root',
- group='root',
- mode=0o0644)
-
- def pre_write(self):
- settings = utils.get_settings('ssh')
- apt_update(fatal=True)
- apt_install(settings['client']['package'])
- if not os.path.exists('/etc/ssh'):
- os.makedir('/etc/ssh')
- # NOTE: don't recurse
- utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
- maxdepth=0)
-
- def post_write(self):
- # NOTE: don't recurse
- utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
- maxdepth=0)
-
-
-class SSHDConfigContext(SSHConfigContext):
-
- type = 'server'
-
- def __call__(self):
- settings = utils.get_settings('ssh')
- if settings['common']['network_ipv6_enable']:
- addr_family = 'any'
- else:
- addr_family = 'inet'
-
- ctxt = {
- 'ssh_ip': settings['server']['listen_to'],
- 'password_auth_allowed':
- settings['server']['password_authentication'],
- 'ports': settings['common']['ports'],
- 'addr_family': addr_family,
- 'ciphers': self.get_ciphers(settings['server']['cbc_required']),
- 'macs': self.get_macs(settings['server']['weak_hmac']),
- 'kexs': self.get_kexs(settings['server']['weak_kex']),
- 'host_key_files': settings['server']['host_key_files'],
- 'allow_root_with_key': settings['server']['allow_root_with_key'],
- 'password_authentication':
- settings['server']['password_authentication'],
- 'use_priv_sep': settings['server']['use_privilege_separation'],
- 'use_pam': settings['server']['use_pam'],
- 'allow_x11_forwarding': settings['server']['allow_x11_forwarding'],
- 'print_motd': settings['server']['print_motd'],
- 'print_last_log': settings['server']['print_last_log'],
- 'client_alive_interval':
- settings['server']['alive_interval'],
- 'client_alive_count': settings['server']['alive_count'],
- 'allow_tcp_forwarding': settings['server']['allow_tcp_forwarding'],
- 'allow_agent_forwarding':
- settings['server']['allow_agent_forwarding'],
- 'deny_users': settings['server']['deny_users'],
- 'allow_users': settings['server']['allow_users'],
- 'deny_groups': settings['server']['deny_groups'],
- 'allow_groups': settings['server']['allow_groups'],
- 'use_dns': settings['server']['use_dns'],
- 'sftp_enable': settings['server']['sftp_enable'],
- 'sftp_group': settings['server']['sftp_group'],
- 'sftp_chroot': settings['server']['sftp_chroot'],
- 'max_auth_tries': settings['server']['max_auth_tries'],
- 'max_sessions': settings['server']['max_sessions'],
- }
- return ctxt
-
-
-class SSHDConfig(TemplatedFile):
- def __init__(self):
- path = '/etc/ssh/sshd_config'
- super(SSHDConfig, self).__init__(path=path,
- template_dir=TEMPLATES_DIR,
- context=SSHDConfigContext(),
- user='root',
- group='root',
- mode=0o0600,
- service_actions=[{'service': 'ssh',
- 'actions':
- ['restart']}])
-
- def pre_write(self):
- settings = utils.get_settings('ssh')
- apt_update(fatal=True)
- apt_install(settings['server']['package'])
- if not os.path.exists('/etc/ssh'):
- os.makedir('/etc/ssh')
- # NOTE: don't recurse
- utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
- maxdepth=0)
-
- def post_write(self):
- # NOTE: don't recurse
- utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
- maxdepth=0)
-
-
-class SSHConfigFileContentAudit(FileContentAudit):
- def __init__(self):
- self.path = '/etc/ssh/ssh_config'
- super(SSHConfigFileContentAudit, self).__init__(self.path, {})
-
- def is_compliant(self, *args, **kwargs):
- self.pass_cases = []
- self.fail_cases = []
- settings = utils.get_settings('ssh')
-
- if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
- if not settings['server']['weak_hmac']:
- self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
- else:
- self.pass_cases.append(r'^MACs.+,hmac-sha1$')
-
- if settings['server']['weak_kex']:
- self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
- else:
- self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa
- self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa
-
- if settings['server']['cbc_required']:
- self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
- else:
- self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
- self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa
- self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$')
- self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
- self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
- else:
- if not settings['client']['weak_hmac']:
- self.fail_cases.append(r'^MACs.+,hmac-sha1$')
- else:
- self.pass_cases.append(r'^MACs.+,hmac-sha1$')
-
- if settings['client']['weak_kex']:
- self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
- else:
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa
- self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
- self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
- self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
-
- if settings['client']['cbc_required']:
- self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
- else:
- self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
- self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
- self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
- self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
-
- if settings['client']['roaming']:
- self.pass_cases.append(r'^UseRoaming yes$')
- else:
- self.fail_cases.append(r'^UseRoaming yes$')
-
- return super(SSHConfigFileContentAudit, self).is_compliant(*args,
- **kwargs)
-
-
-class SSHDConfigFileContentAudit(FileContentAudit):
- def __init__(self):
- self.path = '/etc/ssh/sshd_config'
- super(SSHDConfigFileContentAudit, self).__init__(self.path, {})
-
- def is_compliant(self, *args, **kwargs):
- self.pass_cases = []
- self.fail_cases = []
- settings = utils.get_settings('ssh')
-
- if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
- if not settings['server']['weak_hmac']:
- self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
- else:
- self.pass_cases.append(r'^MACs.+,hmac-sha1$')
-
- if settings['server']['weak_kex']:
- self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
- else:
- self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa
- self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa
-
- if settings['server']['cbc_required']:
- self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
- else:
- self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
- self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa
- self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$')
- self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
- self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
- else:
- if not settings['server']['weak_hmac']:
- self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
- else:
- self.pass_cases.append(r'^MACs.+,hmac-sha1$')
-
- if settings['server']['weak_kex']:
- self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
- else:
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa
- self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
- self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
- self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
-
- if settings['server']['cbc_required']:
- self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
- else:
- self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
- self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
- self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
- self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
-
- if settings['server']['sftp_enable']:
- self.pass_cases.append(r'^Subsystem\ssftp')
- else:
- self.fail_cases.append(r'^Subsystem\ssftp')
-
- return super(SSHDConfigFileContentAudit, self).is_compliant(*args,
- **kwargs)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/__init__.py
+++ /dev/null
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/ssh_config b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/ssh_config
deleted file mode 100644
index 9742d8e..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/ssh_config
+++ /dev/null
@@ -1,70 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-# This is the ssh client system-wide configuration file. See
-# ssh_config(5) for more information. This file provides defaults for
-# users, and the values can be changed in per-user configuration files
-# or on the command line.
-
-# Configuration data is parsed as follows:
-# 1. command line options
-# 2. user-specific file
-# 3. system-wide file
-# Any configuration value is only changed the first time it is set.
-# Thus, host-specific definitions should be at the beginning of the
-# configuration file, and defaults at the end.
-
-# Site-wide defaults for some commonly used options. For a comprehensive
-# list of available options, their meanings and defaults, please see the
-# ssh_config(5) man page.
-
-# Restrict the following configuration to be limited to this Host.
-{% if remote_hosts -%}
-Host {{ ' '.join(remote_hosts) }}
-{% endif %}
-ForwardAgent no
-ForwardX11 no
-ForwardX11Trusted yes
-RhostsRSAAuthentication no
-RSAAuthentication yes
-PasswordAuthentication {{ password_auth_allowed }}
-HostbasedAuthentication no
-GSSAPIAuthentication no
-GSSAPIDelegateCredentials no
-GSSAPIKeyExchange no
-GSSAPITrustDNS no
-BatchMode no
-CheckHostIP yes
-AddressFamily {{ addr_family }}
-ConnectTimeout 0
-StrictHostKeyChecking ask
-IdentityFile ~/.ssh/identity
-IdentityFile ~/.ssh/id_rsa
-IdentityFile ~/.ssh/id_dsa
-# The port at the destination should be defined
-{% for port in ports -%}
-Port {{ port }}
-{% endfor %}
-Protocol 2
-Cipher 3des
-{% if ciphers -%}
-Ciphers {{ ciphers }}
-{%- endif %}
-{% if macs -%}
-MACs {{ macs }}
-{%- endif %}
-{% if kexs -%}
-KexAlgorithms {{ kexs }}
-{%- endif %}
-EscapeChar ~
-Tunnel no
-TunnelDevice any:any
-PermitLocalCommand no
-VisualHostKey no
-RekeyLimit 1G 1h
-SendEnv LANG LC_*
-HashKnownHosts yes
-{% if roaming -%}
-UseRoaming {{ roaming }}
-{% endif %}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/sshd_config b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/sshd_config
deleted file mode 100644
index 5f87298..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/sshd_config
+++ /dev/null
@@ -1,159 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-# Package generated configuration file
-# See the sshd_config(5) manpage for details
-
-# What ports, IPs and protocols we listen for
-{% for port in ports -%}
-Port {{ port }}
-{% endfor -%}
-AddressFamily {{ addr_family }}
-# Use these options to restrict which interfaces/protocols sshd will bind to
-{% if ssh_ip -%}
-{% for ip in ssh_ip -%}
-ListenAddress {{ ip }}
-{% endfor %}
-{%- else -%}
-ListenAddress ::
-ListenAddress 0.0.0.0
-{% endif -%}
-Protocol 2
-{% if ciphers -%}
-Ciphers {{ ciphers }}
-{% endif -%}
-{% if macs -%}
-MACs {{ macs }}
-{% endif -%}
-{% if kexs -%}
-KexAlgorithms {{ kexs }}
-{% endif -%}
-# HostKeys for protocol version 2
-{% for keyfile in host_key_files -%}
-HostKey {{ keyfile }}
-{% endfor -%}
-
-# Privilege Separation is turned on for security
-{% if use_priv_sep -%}
-UsePrivilegeSeparation {{ use_priv_sep }}
-{% endif -%}
-
-# Lifetime and size of ephemeral version 1 server key
-KeyRegenerationInterval 3600
-ServerKeyBits 1024
-
-# Logging
-SyslogFacility AUTH
-LogLevel VERBOSE
-
-# Authentication:
-LoginGraceTime 30s
-{% if allow_root_with_key -%}
-PermitRootLogin without-password
-{% else -%}
-PermitRootLogin no
-{% endif %}
-PermitTunnel no
-PermitUserEnvironment no
-StrictModes yes
-
-RSAAuthentication yes
-PubkeyAuthentication yes
-AuthorizedKeysFile %h/.ssh/authorized_keys
-
-# Don't read the user's ~/.rhosts and ~/.shosts files
-IgnoreRhosts yes
-# For this to work you will also need host keys in /etc/ssh_known_hosts
-RhostsRSAAuthentication no
-# similar for protocol version 2
-HostbasedAuthentication no
-# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
-IgnoreUserKnownHosts yes
-
-# To enable empty passwords, change to yes (NOT RECOMMENDED)
-PermitEmptyPasswords no
-
-# Change to yes to enable challenge-response passwords (beware issues with
-# some PAM modules and threads)
-ChallengeResponseAuthentication no
-
-# Change to no to disable tunnelled clear text passwords
-PasswordAuthentication {{ password_authentication }}
-
-# Kerberos options
-KerberosAuthentication no
-KerberosGetAFSToken no
-KerberosOrLocalPasswd no
-KerberosTicketCleanup yes
-
-# GSSAPI options
-GSSAPIAuthentication no
-GSSAPICleanupCredentials yes
-
-X11Forwarding {{ allow_x11_forwarding }}
-X11DisplayOffset 10
-X11UseLocalhost yes
-GatewayPorts no
-PrintMotd {{ print_motd }}
-PrintLastLog {{ print_last_log }}
-TCPKeepAlive no
-UseLogin no
-
-ClientAliveInterval {{ client_alive_interval }}
-ClientAliveCountMax {{ client_alive_count }}
-AllowTcpForwarding {{ allow_tcp_forwarding }}
-AllowAgentForwarding {{ allow_agent_forwarding }}
-
-MaxStartups 10:30:100
-#Banner /etc/issue.net
-
-# Allow client to pass locale environment variables
-AcceptEnv LANG LC_*
-
-# Set this to 'yes' to enable PAM authentication, account processing,
-# and session processing. If this is enabled, PAM authentication will
-# be allowed through the ChallengeResponseAuthentication and
-# PasswordAuthentication. Depending on your PAM configuration,
-# PAM authentication via ChallengeResponseAuthentication may bypass
-# the setting of "PermitRootLogin without-password".
-# If you just want the PAM account and session checks to run without
-# PAM authentication, then enable this but set PasswordAuthentication
-# and ChallengeResponseAuthentication to 'no'.
-UsePAM {{ use_pam }}
-
-{% if deny_users -%}
-DenyUsers {{ deny_users }}
-{% endif -%}
-{% if allow_users -%}
-AllowUsers {{ allow_users }}
-{% endif -%}
-{% if deny_groups -%}
-DenyGroups {{ deny_groups }}
-{% endif -%}
-{% if allow_groups -%}
-AllowGroups allow_groups
-{% endif -%}
-UseDNS {{ use_dns }}
-MaxAuthTries {{ max_auth_tries }}
-MaxSessions {{ max_sessions }}
-
-{% if sftp_enable -%}
-# Configuration, in case SFTP is used
-## override default of no subsystems
-## Subsystem sftp /opt/app/openssh5/libexec/sftp-server
-Subsystem sftp internal-sftp -l VERBOSE
-
-## These lines must appear at the *end* of sshd_config
-Match Group {{ sftp_group }}
-ForceCommand internal-sftp -l VERBOSE
-ChrootDirectory {{ sftp_chroot }}
-{% else -%}
-# Configuration, in case SFTP is used
-## override default of no subsystems
-## Subsystem sftp /opt/app/openssh5/libexec/sftp-server
-## These lines must appear at the *end* of sshd_config
-Match Group sftponly
-ForceCommand internal-sftp -l VERBOSE
-ChrootDirectory /sftpchroot/home/%u
-{% endif %}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/templating.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/templating.py
deleted file mode 100644
index d2ab7dc..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/templating.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- WARNING,
-)
-
-try:
- from jinja2 import FileSystemLoader, Environment
-except ImportError:
- from charmhelpers.fetch import apt_install
- from charmhelpers.fetch import apt_update
- apt_update(fatal=True)
- apt_install('python-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, Environment
-
-
-# NOTE: function separated from main rendering code to facilitate easier
-# mocking in unit tests.
-def write(path, data):
- with open(path, 'wb') as out:
- out.write(data)
-
-
-def get_template_path(template_dir, path):
- """Returns the template file which would be used to render the path.
-
- The path to the template file is returned.
- :param template_dir: the directory the templates are located in
- :param path: the file path to be written to.
- :returns: path to the template file
- """
- return os.path.join(template_dir, os.path.basename(path))
-
-
-def render_and_write(template_dir, path, context):
- """Renders the specified template into the file.
-
- :param template_dir: the directory to load the template from
- :param path: the path to write the templated contents to
- :param context: the parameters to pass to the rendering engine
- """
- env = Environment(loader=FileSystemLoader(template_dir))
- template_file = os.path.basename(path)
- template = env.get_template(template_file)
- log('Rendering from template: %s' % template.name, level=DEBUG)
- rendered_content = template.render(context)
- if not rendered_content:
- log("Render returned None - skipping '%s'" % path,
- level=WARNING)
- return
-
- write(path, rendered_content.encode('utf-8').strip())
- log('Wrote template %s' % path, level=DEBUG)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/utils.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/utils.py
deleted file mode 100644
index a6743a4..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/utils.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import glob
-import grp
-import os
-import pwd
-import six
-import yaml
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- INFO,
- WARNING,
- ERROR,
-)
-
-
-# Global settings cache. Since each hook fire entails a fresh module import it
-# is safe to hold this in memory and not risk missing config changes (since
-# they will result in a new hook fire and thus re-import).
-__SETTINGS__ = {}
-
-
-def _get_defaults(modules):
- """Load the default config for the provided modules.
-
- :param modules: stack modules config defaults to lookup.
- :returns: modules default config dictionary.
- """
- default = os.path.join(os.path.dirname(__file__),
- 'defaults/%s.yaml' % (modules))
- return yaml.safe_load(open(default))
-
-
-def _get_schema(modules):
- """Load the config schema for the provided modules.
-
- NOTE: this schema is intended to have 1-1 relationship with they keys in
- the default config and is used a means to verify valid overrides provided
- by the user.
-
- :param modules: stack modules config schema to lookup.
- :returns: modules default schema dictionary.
- """
- schema = os.path.join(os.path.dirname(__file__),
- 'defaults/%s.yaml.schema' % (modules))
- return yaml.safe_load(open(schema))
-
-
-def _get_user_provided_overrides(modules):
- """Load user-provided config overrides.
-
- :param modules: stack modules to lookup in user overrides yaml file.
- :returns: overrides dictionary.
- """
- overrides = os.path.join(os.environ['JUJU_CHARM_DIR'],
- 'hardening.yaml')
- if os.path.exists(overrides):
- log("Found user-provided config overrides file '%s'" %
- (overrides), level=DEBUG)
- settings = yaml.safe_load(open(overrides))
- if settings and settings.get(modules):
- log("Applying '%s' overrides" % (modules), level=DEBUG)
- return settings.get(modules)
-
- log("No overrides found for '%s'" % (modules), level=DEBUG)
- else:
- log("No hardening config overrides file '%s' found in charm "
- "root dir" % (overrides), level=DEBUG)
-
- return {}
-
-
-def _apply_overrides(settings, overrides, schema):
- """Get overrides config overlayed onto modules defaults.
-
- :param modules: require stack modules config.
- :returns: dictionary of modules config with user overrides applied.
- """
- if overrides:
- for k, v in six.iteritems(overrides):
- if k in schema:
- if schema[k] is None:
- settings[k] = v
- elif type(schema[k]) is dict:
- settings[k] = _apply_overrides(settings[k], overrides[k],
- schema[k])
- else:
- raise Exception("Unexpected type found in schema '%s'" %
- type(schema[k]), level=ERROR)
- else:
- log("Unknown override key '%s' - ignoring" % (k), level=INFO)
-
- return settings
-
-
-def get_settings(modules):
- global __SETTINGS__
- if modules in __SETTINGS__:
- return __SETTINGS__[modules]
-
- schema = _get_schema(modules)
- settings = _get_defaults(modules)
- overrides = _get_user_provided_overrides(modules)
- __SETTINGS__[modules] = _apply_overrides(settings, overrides, schema)
- return __SETTINGS__[modules]
-
-
-def ensure_permissions(path, user, group, permissions, maxdepth=-1):
- """Ensure permissions for path.
-
- If path is a file, apply to file and return. If path is a directory,
- apply recursively (if required) to directory contents and return.
-
- :param user: user name
- :param group: group name
- :param permissions: octal permissions
- :param maxdepth: maximum recursion depth. A negative maxdepth allows
- infinite recursion and maxdepth=0 means no recursion.
- :returns: None
- """
- if not os.path.exists(path):
- log("File '%s' does not exist - cannot set permissions" % (path),
- level=WARNING)
- return
-
- _user = pwd.getpwnam(user)
- os.chown(path, _user.pw_uid, grp.getgrnam(group).gr_gid)
- os.chmod(path, permissions)
-
- if maxdepth == 0:
- log("Max recursion depth reached - skipping further recursion",
- level=DEBUG)
- return
- elif maxdepth > 0:
- maxdepth -= 1
-
- if os.path.isdir(path):
- contents = glob.glob("%s/*" % (path))
- for c in contents:
- ensure_permissions(c, user=user, group=group,
- permissions=permissions, maxdepth=maxdepth)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/network/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/network/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/network/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/network/ip.py b/charms/trusty/ceilometer/charmhelpers/contrib/network/ip.py
deleted file mode 100644
index 6bba07b..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/network/ip.py
+++ /dev/null
@@ -1,499 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import glob
-import re
-import subprocess
-import six
-import socket
-
-from functools import partial
-
-from charmhelpers.core.hookenv import unit_get
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import (
- log,
- WARNING,
-)
-
-try:
- import netifaces
-except ImportError:
- apt_update(fatal=True)
- apt_install('python-netifaces', fatal=True)
- import netifaces
-
-try:
- import netaddr
-except ImportError:
- apt_update(fatal=True)
- apt_install('python-netaddr', fatal=True)
- import netaddr
-
-
-def _validate_cidr(network):
- try:
- netaddr.IPNetwork(network)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Network (%s) is not in CIDR presentation format" %
- network)
-
-
-def no_ip_found_error_out(network):
- errmsg = ("No IP address found in network(s): %s" % network)
- raise ValueError(errmsg)
-
-
-def get_address_in_network(network, fallback=None, fatal=False):
- """Get an IPv4 or IPv6 address within the network from the host.
-
- :param network (str): CIDR presentation format. For example,
- '192.168.1.0/24'. Supports multiple networks as a space-delimited list.
- :param fallback (str): If no address is found, return fallback.
- :param fatal (boolean): If no address is found, fallback is not
- set and fatal is True then exit(1).
- """
- if network is None:
- if fallback is not None:
- return fallback
-
- if fatal:
- no_ip_found_error_out(network)
- else:
- return None
-
- networks = network.split() or [network]
- for network in networks:
- _validate_cidr(network)
- network = netaddr.IPNetwork(network)
- for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
- if network.version == 4 and netifaces.AF_INET in addresses:
- addr = addresses[netifaces.AF_INET][0]['addr']
- netmask = addresses[netifaces.AF_INET][0]['netmask']
- cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
- if cidr in network:
- return str(cidr.ip)
-
- if network.version == 6 and netifaces.AF_INET6 in addresses:
- for addr in addresses[netifaces.AF_INET6]:
- if not addr['addr'].startswith('fe80'):
- cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
- addr['netmask']))
- if cidr in network:
- return str(cidr.ip)
-
- if fallback is not None:
- return fallback
-
- if fatal:
- no_ip_found_error_out(network)
-
- return None
-
-
-def is_ipv6(address):
- """Determine whether provided address is IPv6 or not."""
- try:
- address = netaddr.IPAddress(address)
- except netaddr.AddrFormatError:
- # probably a hostname - so not an address at all!
- return False
-
- return address.version == 6
-
-
-def is_address_in_network(network, address):
- """
- Determine whether the provided address is within a network range.
-
- :param network (str): CIDR presentation format. For example,
- '192.168.1.0/24'.
- :param address: An individual IPv4 or IPv6 address without a net
- mask or subnet prefix. For example, '192.168.1.1'.
- :returns boolean: Flag indicating whether address is in network.
- """
- try:
- network = netaddr.IPNetwork(network)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Network (%s) is not in CIDR presentation format" %
- network)
-
- try:
- address = netaddr.IPAddress(address)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Address (%s) is not in correct presentation format" %
- address)
-
- if address in network:
- return True
- else:
- return False
-
-
-def _get_for_address(address, key):
- """Retrieve an attribute of or the physical interface that
- the IP address provided could be bound to.
-
- :param address (str): An individual IPv4 or IPv6 address without a net
- mask or subnet prefix. For example, '192.168.1.1'.
- :param key: 'iface' for the physical interface name or an attribute
- of the configured interface, for example 'netmask'.
- :returns str: Requested attribute or None if address is not bindable.
- """
- address = netaddr.IPAddress(address)
- for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
- if address.version == 4 and netifaces.AF_INET in addresses:
- addr = addresses[netifaces.AF_INET][0]['addr']
- netmask = addresses[netifaces.AF_INET][0]['netmask']
- network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
- cidr = network.cidr
- if address in cidr:
- if key == 'iface':
- return iface
- else:
- return addresses[netifaces.AF_INET][0][key]
-
- if address.version == 6 and netifaces.AF_INET6 in addresses:
- for addr in addresses[netifaces.AF_INET6]:
- if not addr['addr'].startswith('fe80'):
- network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
- addr['netmask']))
- cidr = network.cidr
- if address in cidr:
- if key == 'iface':
- return iface
- elif key == 'netmask' and cidr:
- return str(cidr).split('/')[1]
- else:
- return addr[key]
-
- return None
-
-
-get_iface_for_address = partial(_get_for_address, key='iface')
-
-
-get_netmask_for_address = partial(_get_for_address, key='netmask')
-
-
-def resolve_network_cidr(ip_address):
- '''
- Resolves the full address cidr of an ip_address based on
- configured network interfaces
- '''
- netmask = get_netmask_for_address(ip_address)
- return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
-
-
-def format_ipv6_addr(address):
- """If address is IPv6, wrap it in '[]' otherwise return None.
-
- This is required by most configuration files when specifying IPv6
- addresses.
- """
- if is_ipv6(address):
- return "[%s]" % address
-
- return None
-
-
-def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
- fatal=True, exc_list=None):
- """Return the assigned IP address for a given interface, if any.
-
- :param iface: network interface on which address(es) are expected to
- be found.
- :param inet_type: inet address family
- :param inc_aliases: include alias interfaces in search
- :param fatal: if True, raise exception if address not found
- :param exc_list: list of addresses to ignore
- :return: list of ip addresses
- """
- # Extract nic if passed /dev/ethX
- if '/' in iface:
- iface = iface.split('/')[-1]
-
- if not exc_list:
- exc_list = []
-
- try:
- inet_num = getattr(netifaces, inet_type)
- except AttributeError:
- raise Exception("Unknown inet type '%s'" % str(inet_type))
-
- interfaces = netifaces.interfaces()
- if inc_aliases:
- ifaces = []
- for _iface in interfaces:
- if iface == _iface or _iface.split(':')[0] == iface:
- ifaces.append(_iface)
-
- if fatal and not ifaces:
- raise Exception("Invalid interface '%s'" % iface)
-
- ifaces.sort()
- else:
- if iface not in interfaces:
- if fatal:
- raise Exception("Interface '%s' not found " % (iface))
- else:
- return []
-
- else:
- ifaces = [iface]
-
- addresses = []
- for netiface in ifaces:
- net_info = netifaces.ifaddresses(netiface)
- if inet_num in net_info:
- for entry in net_info[inet_num]:
- if 'addr' in entry and entry['addr'] not in exc_list:
- addresses.append(entry['addr'])
-
- if fatal and not addresses:
- raise Exception("Interface '%s' doesn't have any %s addresses." %
- (iface, inet_type))
-
- return sorted(addresses)
-
-
-get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
-
-
-def get_iface_from_addr(addr):
- """Work out on which interface the provided address is configured."""
- for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
- for inet_type in addresses:
- for _addr in addresses[inet_type]:
- _addr = _addr['addr']
- # link local
- ll_key = re.compile("(.+)%.*")
- raw = re.match(ll_key, _addr)
- if raw:
- _addr = raw.group(1)
-
- if _addr == addr:
- log("Address '%s' is configured on iface '%s'" %
- (addr, iface))
- return iface
-
- msg = "Unable to infer net iface on which '%s' is configured" % (addr)
- raise Exception(msg)
-
-
-def sniff_iface(f):
- """Ensure decorated function is called with a value for iface.
-
- If no iface provided, inject net iface inferred from unit private address.
- """
- def iface_sniffer(*args, **kwargs):
- if not kwargs.get('iface', None):
- kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
-
- return f(*args, **kwargs)
-
- return iface_sniffer
-
-
-@sniff_iface
-def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
- dynamic_only=True):
- """Get assigned IPv6 address for a given interface.
-
- Returns list of addresses found. If no address found, returns empty list.
-
- If iface is None, we infer the current primary interface by doing a reverse
- lookup on the unit private-address.
-
- We currently only support scope global IPv6 addresses i.e. non-temporary
- addresses. If no global IPv6 address is found, return the first one found
- in the ipv6 address list.
-
- :param iface: network interface on which ipv6 address(es) are expected to
- be found.
- :param inc_aliases: include alias interfaces in search
- :param fatal: if True, raise exception if address not found
- :param exc_list: list of addresses to ignore
- :param dynamic_only: only recognise dynamic addresses
- :return: list of ipv6 addresses
- """
- addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
- inc_aliases=inc_aliases, fatal=fatal,
- exc_list=exc_list)
-
- if addresses:
- global_addrs = []
- for addr in addresses:
- key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
- m = re.match(key_scope_link_local, addr)
- if m:
- eui_64_mac = m.group(1)
- iface = m.group(2)
- else:
- global_addrs.append(addr)
-
- if global_addrs:
- # Make sure any found global addresses are not temporary
- cmd = ['ip', 'addr', 'show', iface]
- out = subprocess.check_output(cmd).decode('UTF-8')
- if dynamic_only:
- key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*")
- else:
- key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
-
- addrs = []
- for line in out.split('\n'):
- line = line.strip()
- m = re.match(key, line)
- if m and 'temporary' not in line:
- # Return the first valid address we find
- for addr in global_addrs:
- if m.group(1) == addr:
- if not dynamic_only or \
- m.group(1).endswith(eui_64_mac):
- addrs.append(addr)
-
- if addrs:
- return addrs
-
- if fatal:
- raise Exception("Interface '%s' does not have a scope global "
- "non-temporary ipv6 address." % iface)
-
- return []
-
-
-def get_bridges(vnic_dir='/sys/devices/virtual/net'):
- """Return a list of bridges on the system."""
- b_regex = "%s/*/bridge" % vnic_dir
- return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
-
-
-def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
- """Return a list of nics comprising a given bridge on the system."""
- brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
- return [x.split('/')[-1] for x in glob.glob(brif_regex)]
-
-
-def is_bridge_member(nic):
- """Check if a given nic is a member of a bridge."""
- for bridge in get_bridges():
- if nic in get_bridge_nics(bridge):
- return True
-
- return False
-
-
-def is_ip(address):
- """
- Returns True if address is a valid IP address.
- """
- try:
- # Test to see if already an IPv4 address
- socket.inet_aton(address)
- return True
- except socket.error:
- return False
-
-
-def ns_query(address):
- try:
- import dns.resolver
- except ImportError:
- apt_install('python-dnspython')
- import dns.resolver
-
- if isinstance(address, dns.name.Name):
- rtype = 'PTR'
- elif isinstance(address, six.string_types):
- rtype = 'A'
- else:
- return None
-
- answers = dns.resolver.query(address, rtype)
- if answers:
- return str(answers[0])
- return None
-
-
-def get_host_ip(hostname, fallback=None):
- """
- Resolves the IP for a given hostname, or returns
- the input if it is already an IP.
- """
- if is_ip(hostname):
- return hostname
-
- ip_addr = ns_query(hostname)
- if not ip_addr:
- try:
- ip_addr = socket.gethostbyname(hostname)
- except:
- log("Failed to resolve hostname '%s'" % (hostname),
- level=WARNING)
- return fallback
- return ip_addr
-
-
-def get_hostname(address, fqdn=True):
- """
- Resolves hostname for given IP, or returns the input
- if it is already a hostname.
- """
- if is_ip(address):
- try:
- import dns.reversename
- except ImportError:
- apt_install("python-dnspython")
- import dns.reversename
-
- rev = dns.reversename.from_address(address)
- result = ns_query(rev)
-
- if not result:
- try:
- result = socket.gethostbyaddr(address)[0]
- except:
- return None
- else:
- result = address
-
- if fqdn:
- # strip trailing .
- if result.endswith('.'):
- return result[:-1]
- else:
- return result
- else:
- return result.split('.')[0]
-
-
-def port_has_listener(address, port):
- """
- Returns True if the address:port is open and being listened to,
- else False.
-
- @param address: an IP address or hostname
- @param port: integer port
-
- Note calls 'zc' via a subprocess shell
- """
- cmd = ['nc', '-z', address, str(port)]
- result = subprocess.call(cmd)
- return not(bool(result))
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/alternatives.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/alternatives.py
deleted file mode 100644
index ef77caf..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/alternatives.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-''' Helper for managing alternatives for file conflict resolution '''
-
-import subprocess
-import shutil
-import os
-
-
-def install_alternative(name, target, source, priority=50):
- ''' Install alternative configuration '''
- if (os.path.exists(target) and not os.path.islink(target)):
- # Move existing file/directory away before installing
- shutil.move(target, '{}.bak'.format(target))
- cmd = [
- 'update-alternatives', '--force', '--install',
- target, name, source, str(priority)
- ]
- subprocess.check_call(cmd)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/deployment.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/deployment.py
deleted file mode 100644
index d21c9c7..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/deployment.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import logging
-import re
-import sys
-import six
-from collections import OrderedDict
-from charmhelpers.contrib.amulet.deployment import (
- AmuletDeployment
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-
-class OpenStackAmuletDeployment(AmuletDeployment):
- """OpenStack amulet deployment.
-
- This class inherits from AmuletDeployment and has additional support
- that is specifically for use by OpenStack charms.
- """
-
- def __init__(self, series=None, openstack=None, source=None,
- stable=True, log_level=DEBUG):
- """Initialize the deployment environment."""
- super(OpenStackAmuletDeployment, self).__init__(series)
- self.log = self.get_logger(level=log_level)
- self.log.info('OpenStackAmuletDeployment: init')
- self.openstack = openstack
- self.source = source
- self.stable = stable
- # Note(coreycb): this needs to be changed when new next branches come
- # out.
- self.current_next = "trusty"
-
- def get_logger(self, name="deployment-logger", level=logging.DEBUG):
- """Get a logger object that will log to stdout."""
- log = logging
- logger = log.getLogger(name)
- fmt = log.Formatter("%(asctime)s %(funcName)s "
- "%(levelname)s: %(message)s")
-
- handler = log.StreamHandler(stream=sys.stdout)
- handler.setLevel(level)
- handler.setFormatter(fmt)
-
- logger.addHandler(handler)
- logger.setLevel(level)
-
- return logger
-
- def _determine_branch_locations(self, other_services):
- """Determine the branch locations for the other services.
-
- Determine if the local branch being tested is derived from its
- stable or next (dev) branch, and based on this, use the corresonding
- stable or next branches for the other_services."""
-
- self.log.info('OpenStackAmuletDeployment: determine branch locations')
-
- # Charms outside the lp:~openstack-charmers namespace
- base_charms = ['mysql', 'mongodb', 'nrpe']
-
- # Force these charms to current series even when using an older series.
- # ie. Use trusty/nrpe even when series is precise, as the P charm
- # does not possess the necessary external master config and hooks.
- force_series_current = ['nrpe']
-
- if self.series in ['precise', 'trusty']:
- base_series = self.series
- else:
- base_series = self.current_next
-
- for svc in other_services:
- if svc['name'] in force_series_current:
- base_series = self.current_next
- # If a location has been explicitly set, use it
- if svc.get('location'):
- continue
- if self.stable:
- temp = 'lp:charms/{}/{}'
- svc['location'] = temp.format(base_series,
- svc['name'])
- else:
- if svc['name'] in base_charms:
- temp = 'lp:charms/{}/{}'
- svc['location'] = temp.format(base_series,
- svc['name'])
- else:
- temp = 'lp:~openstack-charmers/charms/{}/{}/next'
- svc['location'] = temp.format(self.current_next,
- svc['name'])
-
- return other_services
-
- def _add_services(self, this_service, other_services):
- """Add services to the deployment and set openstack-origin/source."""
- self.log.info('OpenStackAmuletDeployment: adding services')
-
- other_services = self._determine_branch_locations(other_services)
-
- super(OpenStackAmuletDeployment, self)._add_services(this_service,
- other_services)
-
- services = other_services
- services.append(this_service)
-
- # Charms which should use the source config option
- use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
- 'ceph-osd', 'ceph-radosgw', 'ceph-mon']
-
- # Charms which can not use openstack-origin, ie. many subordinates
- no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
- 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
- 'cinder-backup', 'nexentaedge-data',
- 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
- 'cinder-nexentaedge', 'nexentaedge-mgmt']
-
- if self.openstack:
- for svc in services:
- if svc['name'] not in use_source + no_origin:
- config = {'openstack-origin': self.openstack}
- self.d.configure(svc['name'], config)
-
- if self.source:
- for svc in services:
- if svc['name'] in use_source and svc['name'] not in no_origin:
- config = {'source': self.source}
- self.d.configure(svc['name'], config)
-
- def _configure_services(self, configs):
- """Configure all of the services."""
- self.log.info('OpenStackAmuletDeployment: configure services')
- for service, config in six.iteritems(configs):
- self.d.configure(service, config)
-
- def _auto_wait_for_status(self, message=None, exclude_services=None,
- include_only=None, timeout=1800):
- """Wait for all units to have a specific extended status, except
- for any defined as excluded. Unless specified via message, any
- status containing any case of 'ready' will be considered a match.
-
- Examples of message usage:
-
- Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
- message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
-
- Wait for all units to reach this status (exact match):
- message = re.compile('^Unit is ready and clustered$')
-
- Wait for all units to reach any one of these (exact match):
- message = re.compile('Unit is ready|OK|Ready')
-
- Wait for at least one unit to reach this status (exact match):
- message = {'ready'}
-
- See Amulet's sentry.wait_for_messages() for message usage detail.
- https://github.com/juju/amulet/blob/master/amulet/sentry.py
-
- :param message: Expected status match
- :param exclude_services: List of juju service names to ignore,
- not to be used in conjuction with include_only.
- :param include_only: List of juju service names to exclusively check,
- not to be used in conjuction with exclude_services.
- :param timeout: Maximum time in seconds to wait for status match
- :returns: None. Raises if timeout is hit.
- """
- self.log.info('Waiting for extended status on units...')
-
- all_services = self.d.services.keys()
-
- if exclude_services and include_only:
- raise ValueError('exclude_services can not be used '
- 'with include_only')
-
- if message:
- if isinstance(message, re._pattern_type):
- match = message.pattern
- else:
- match = message
-
- self.log.debug('Custom extended status wait match: '
- '{}'.format(match))
- else:
- self.log.debug('Default extended status wait match: contains '
- 'READY (case-insensitive)')
- message = re.compile('.*ready.*', re.IGNORECASE)
-
- if exclude_services:
- self.log.debug('Excluding services from extended status match: '
- '{}'.format(exclude_services))
- else:
- exclude_services = []
-
- if include_only:
- services = include_only
- else:
- services = list(set(all_services) - set(exclude_services))
-
- self.log.debug('Waiting up to {}s for extended status on services: '
- '{}'.format(timeout, services))
- service_messages = {service: message for service in services}
- self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
- self.log.info('OK')
-
- def _get_openstack_release(self):
- """Get openstack release.
-
- Return an integer representing the enum value of the openstack
- release.
- """
- # Must be ordered by OpenStack release (not by Ubuntu release):
- (self.precise_essex, self.precise_folsom, self.precise_grizzly,
- self.precise_havana, self.precise_icehouse,
- self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
- self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
- self.wily_liberty, self.trusty_mitaka,
- self.xenial_mitaka) = range(14)
-
- releases = {
- ('precise', None): self.precise_essex,
- ('precise', 'cloud:precise-folsom'): self.precise_folsom,
- ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
- ('precise', 'cloud:precise-havana'): self.precise_havana,
- ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
- ('trusty', None): self.trusty_icehouse,
- ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
- ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
- ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
- ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
- ('utopic', None): self.utopic_juno,
- ('vivid', None): self.vivid_kilo,
- ('wily', None): self.wily_liberty,
- ('xenial', None): self.xenial_mitaka}
- return releases[(self.series, self.openstack)]
-
- def _get_openstack_release_string(self):
- """Get openstack release string.
-
- Return a string representing the openstack release.
- """
- releases = OrderedDict([
- ('precise', 'essex'),
- ('quantal', 'folsom'),
- ('raring', 'grizzly'),
- ('saucy', 'havana'),
- ('trusty', 'icehouse'),
- ('utopic', 'juno'),
- ('vivid', 'kilo'),
- ('wily', 'liberty'),
- ('xenial', 'mitaka'),
- ])
- if self.openstack:
- os_origin = self.openstack.split(':')[1]
- return os_origin.split('%s-' % self.series)[1].split('/')[0]
- else:
- return releases[self.series]
-
- def get_ceph_expected_pools(self, radosgw=False):
- """Return a list of expected ceph pools in a ceph + cinder + glance
- test scenario, based on OpenStack release and whether ceph radosgw
- is flagged as present or not."""
-
- if self._get_openstack_release() >= self.trusty_kilo:
- # Kilo or later
- pools = [
- 'rbd',
- 'cinder',
- 'glance'
- ]
- else:
- # Juno or earlier
- pools = [
- 'data',
- 'metadata',
- 'rbd',
- 'cinder',
- 'glance'
- ]
-
- if radosgw:
- pools.extend([
- '.rgw.root',
- '.rgw.control',
- '.rgw',
- '.rgw.gc',
- '.users.uid'
- ])
-
- return pools
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/utils.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/utils.py
deleted file mode 100644
index ef3bdcc..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/utils.py
+++ /dev/null
@@ -1,1012 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import amulet
-import json
-import logging
-import os
-import re
-import six
-import time
-import urllib
-
-import cinderclient.v1.client as cinder_client
-import glanceclient.v1.client as glance_client
-import heatclient.v1.client as heat_client
-import keystoneclient.v2_0 as keystone_client
-from keystoneclient.auth.identity import v3 as keystone_id_v3
-from keystoneclient import session as keystone_session
-from keystoneclient.v3 import client as keystone_client_v3
-
-import novaclient.client as nova_client
-import pika
-import swiftclient
-
-from charmhelpers.contrib.amulet.utils import (
- AmuletUtils
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-NOVA_CLIENT_VERSION = "2"
-
-
-class OpenStackAmuletUtils(AmuletUtils):
- """OpenStack amulet utilities.
-
- This class inherits from AmuletUtils and has additional support
- that is specifically for use by OpenStack charm tests.
- """
-
- def __init__(self, log_level=ERROR):
- """Initialize the deployment environment."""
- super(OpenStackAmuletUtils, self).__init__(log_level)
-
- def validate_endpoint_data(self, endpoints, admin_port, internal_port,
- public_port, expected):
- """Validate endpoint data.
-
- Validate actual endpoint data vs expected endpoint data. The ports
- are used to find the matching endpoint.
- """
- self.log.debug('Validating endpoint data...')
- self.log.debug('actual: {}'.format(repr(endpoints)))
- found = False
- for ep in endpoints:
- self.log.debug('endpoint: {}'.format(repr(ep)))
- if (admin_port in ep.adminurl and
- internal_port in ep.internalurl and
- public_port in ep.publicurl):
- found = True
- actual = {'id': ep.id,
- 'region': ep.region,
- 'adminurl': ep.adminurl,
- 'internalurl': ep.internalurl,
- 'publicurl': ep.publicurl,
- 'service_id': ep.service_id}
- ret = self._validate_dict_data(expected, actual)
- if ret:
- return 'unexpected endpoint data - {}'.format(ret)
-
- if not found:
- return 'endpoint not found'
-
- def validate_svc_catalog_endpoint_data(self, expected, actual):
- """Validate service catalog endpoint data.
-
- Validate a list of actual service catalog endpoints vs a list of
- expected service catalog endpoints.
- """
- self.log.debug('Validating service catalog endpoint data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for k, v in six.iteritems(expected):
- if k in actual:
- ret = self._validate_dict_data(expected[k][0], actual[k][0])
- if ret:
- return self.endpoint_error(k, ret)
- else:
- return "endpoint {} does not exist".format(k)
- return ret
-
- def validate_tenant_data(self, expected, actual):
- """Validate tenant data.
-
- Validate a list of actual tenant data vs list of expected tenant
- data.
- """
- self.log.debug('Validating tenant data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'enabled': act.enabled, 'description': act.description,
- 'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected tenant data - {}".format(ret)
- if not found:
- return "tenant {} does not exist".format(e['name'])
- return ret
-
- def validate_role_data(self, expected, actual):
- """Validate role data.
-
- Validate a list of actual role data vs a list of expected role
- data.
- """
- self.log.debug('Validating role data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected role data - {}".format(ret)
- if not found:
- return "role {} does not exist".format(e['name'])
- return ret
-
- def validate_user_data(self, expected, actual, api_version=None):
- """Validate user data.
-
- Validate a list of actual user data vs a list of expected user
- data.
- """
- self.log.debug('Validating user data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- if e['name'] == act.name:
- a = {'enabled': act.enabled, 'name': act.name,
- 'email': act.email, 'id': act.id}
- if api_version == 3:
- a['default_project_id'] = getattr(act,
- 'default_project_id',
- 'none')
- else:
- a['tenantId'] = act.tenantId
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected user data - {}".format(ret)
- if not found:
- return "user {} does not exist".format(e['name'])
- return ret
-
- def validate_flavor_data(self, expected, actual):
- """Validate flavor data.
-
- Validate a list of actual flavors vs a list of expected flavors.
- """
- self.log.debug('Validating flavor data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- act = [a.name for a in actual]
- return self._validate_list_data(expected, act)
-
- def tenant_exists(self, keystone, tenant):
- """Return True if tenant exists."""
- self.log.debug('Checking if tenant exists ({})...'.format(tenant))
- return tenant in [t.name for t in keystone.tenants.list()]
-
- def authenticate_cinder_admin(self, keystone_sentry, username,
- password, tenant):
- """Authenticates admin user with cinder."""
- # NOTE(beisner): cinder python client doesn't accept tokens.
- service_ip = \
- keystone_sentry.relation('shared-db',
- 'mysql:shared-db')['private-address']
- ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
- return cinder_client.Client(username, password, tenant, ept)
-
- def authenticate_keystone_admin(self, keystone_sentry, user, password,
- tenant=None, api_version=None,
- keystone_ip=None):
- """Authenticates admin user with the keystone admin endpoint."""
- self.log.debug('Authenticating keystone admin...')
- unit = keystone_sentry
- if not keystone_ip:
- keystone_ip = unit.relation('shared-db',
- 'mysql:shared-db')['private-address']
- base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8'))
- if not api_version or api_version == 2:
- ep = base_ep + "/v2.0"
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
- else:
- ep = base_ep + "/v3"
- auth = keystone_id_v3.Password(
- user_domain_name='admin_domain',
- username=user,
- password=password,
- domain_name='admin_domain',
- auth_url=ep,
- )
- sess = keystone_session.Session(auth=auth)
- return keystone_client_v3.Client(session=sess)
-
- def authenticate_keystone_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with the keystone public endpoint."""
- self.log.debug('Authenticating keystone user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
-
- def authenticate_glance_admin(self, keystone):
- """Authenticates admin user with glance."""
- self.log.debug('Authenticating glance admin...')
- ep = keystone.service_catalog.url_for(service_type='image',
- endpoint_type='adminURL')
- return glance_client.Client(ep, token=keystone.auth_token)
-
- def authenticate_heat_admin(self, keystone):
- """Authenticates the admin user with heat."""
- self.log.debug('Authenticating heat admin...')
- ep = keystone.service_catalog.url_for(service_type='orchestration',
- endpoint_type='publicURL')
- return heat_client.Client(endpoint=ep, token=keystone.auth_token)
-
- def authenticate_nova_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with nova-api."""
- self.log.debug('Authenticating nova user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return nova_client.Client(NOVA_CLIENT_VERSION,
- username=user, api_key=password,
- project_id=tenant, auth_url=ep)
-
- def authenticate_swift_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with swift api."""
- self.log.debug('Authenticating swift user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return swiftclient.Connection(authurl=ep,
- user=user,
- key=password,
- tenant_name=tenant,
- auth_version='2.0')
-
- def create_cirros_image(self, glance, image_name):
- """Download the latest cirros image and upload it to glance,
- validate and return a resource pointer.
-
- :param glance: pointer to authenticated glance connection
- :param image_name: display name for new image
- :returns: glance image pointer
- """
- self.log.debug('Creating glance cirros image '
- '({})...'.format(image_name))
-
- # Download cirros image
- http_proxy = os.getenv('AMULET_HTTP_PROXY')
- self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
- if http_proxy:
- proxies = {'http': http_proxy}
- opener = urllib.FancyURLopener(proxies)
- else:
- opener = urllib.FancyURLopener()
-
- f = opener.open('http://download.cirros-cloud.net/version/released')
- version = f.read().strip()
- cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
- local_path = os.path.join('tests', cirros_img)
-
- if not os.path.exists(local_path):
- cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
- version, cirros_img)
- opener.retrieve(cirros_url, local_path)
- f.close()
-
- # Create glance image
- with open(local_path) as f:
- image = glance.images.create(name=image_name, is_public=True,
- disk_format='qcow2',
- container_format='bare', data=f)
-
- # Wait for image to reach active status
- img_id = image.id
- ret = self.resource_reaches_status(glance.images, img_id,
- expected_stat='active',
- msg='Image status wait')
- if not ret:
- msg = 'Glance image failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new image
- self.log.debug('Validating image attributes...')
- val_img_name = glance.images.get(img_id).name
- val_img_stat = glance.images.get(img_id).status
- val_img_pub = glance.images.get(img_id).is_public
- val_img_cfmt = glance.images.get(img_id).container_format
- val_img_dfmt = glance.images.get(img_id).disk_format
- msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
- 'container fmt:{} disk fmt:{}'.format(
- val_img_name, val_img_pub, img_id,
- val_img_stat, val_img_cfmt, val_img_dfmt))
-
- if val_img_name == image_name and val_img_stat == 'active' \
- and val_img_pub is True and val_img_cfmt == 'bare' \
- and val_img_dfmt == 'qcow2':
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return image
-
- def delete_image(self, glance, image):
- """Delete the specified image."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_image.')
- self.log.debug('Deleting glance image ({})...'.format(image))
- return self.delete_resource(glance.images, image, msg='glance image')
-
- def create_instance(self, nova, image_name, instance_name, flavor):
- """Create the specified instance."""
- self.log.debug('Creating instance '
- '({}|{}|{})'.format(instance_name, image_name, flavor))
- image = nova.images.find(name=image_name)
- flavor = nova.flavors.find(name=flavor)
- instance = nova.servers.create(name=instance_name, image=image,
- flavor=flavor)
-
- count = 1
- status = instance.status
- while status != 'ACTIVE' and count < 60:
- time.sleep(3)
- instance = nova.servers.get(instance.id)
- status = instance.status
- self.log.debug('instance status: {}'.format(status))
- count += 1
-
- if status != 'ACTIVE':
- self.log.error('instance creation timed out')
- return None
-
- return instance
-
- def delete_instance(self, nova, instance):
- """Delete the specified instance."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_instance.')
- self.log.debug('Deleting instance ({})...'.format(instance))
- return self.delete_resource(nova.servers, instance,
- msg='nova instance')
-
- def create_or_get_keypair(self, nova, keypair_name="testkey"):
- """Create a new keypair, or return pointer if it already exists."""
- try:
- _keypair = nova.keypairs.get(keypair_name)
- self.log.debug('Keypair ({}) already exists, '
- 'using it.'.format(keypair_name))
- return _keypair
- except:
- self.log.debug('Keypair ({}) does not exist, '
- 'creating it.'.format(keypair_name))
-
- _keypair = nova.keypairs.create(name=keypair_name)
- return _keypair
-
- def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
- img_id=None, src_vol_id=None, snap_id=None):
- """Create cinder volume, optionally from a glance image, OR
- optionally as a clone of an existing volume, OR optionally
- from a snapshot. Wait for the new volume status to reach
- the expected status, validate and return a resource pointer.
-
- :param vol_name: cinder volume display name
- :param vol_size: size in gigabytes
- :param img_id: optional glance image id
- :param src_vol_id: optional source volume id to clone
- :param snap_id: optional snapshot id to use
- :returns: cinder volume pointer
- """
- # Handle parameter input and avoid impossible combinations
- if img_id and not src_vol_id and not snap_id:
- # Create volume from image
- self.log.debug('Creating cinder volume from glance image...')
- bootable = 'true'
- elif src_vol_id and not img_id and not snap_id:
- # Clone an existing volume
- self.log.debug('Cloning cinder volume...')
- bootable = cinder.volumes.get(src_vol_id).bootable
- elif snap_id and not src_vol_id and not img_id:
- # Create volume from snapshot
- self.log.debug('Creating cinder volume from snapshot...')
- snap = cinder.volume_snapshots.find(id=snap_id)
- vol_size = snap.size
- snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
- bootable = cinder.volumes.get(snap_vol_id).bootable
- elif not img_id and not src_vol_id and not snap_id:
- # Create volume
- self.log.debug('Creating cinder volume...')
- bootable = 'false'
- else:
- # Impossible combination of parameters
- msg = ('Invalid method use - name:{} size:{} img_id:{} '
- 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
- img_id, src_vol_id,
- snap_id))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Create new volume
- try:
- vol_new = cinder.volumes.create(display_name=vol_name,
- imageRef=img_id,
- size=vol_size,
- source_volid=src_vol_id,
- snapshot_id=snap_id)
- vol_id = vol_new.id
- except Exception as e:
- msg = 'Failed to create volume: {}'.format(e)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Wait for volume to reach available status
- ret = self.resource_reaches_status(cinder.volumes, vol_id,
- expected_stat="available",
- msg="Volume status wait")
- if not ret:
- msg = 'Cinder volume failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new volume
- self.log.debug('Validating volume attributes...')
- val_vol_name = cinder.volumes.get(vol_id).display_name
- val_vol_boot = cinder.volumes.get(vol_id).bootable
- val_vol_stat = cinder.volumes.get(vol_id).status
- val_vol_size = cinder.volumes.get(vol_id).size
- msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
- '{} size:{}'.format(val_vol_name, vol_id,
- val_vol_stat, val_vol_boot,
- val_vol_size))
-
- if val_vol_boot == bootable and val_vol_stat == 'available' \
- and val_vol_name == vol_name and val_vol_size == vol_size:
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return vol_new
-
- def delete_resource(self, resource, resource_id,
- msg="resource", max_wait=120):
- """Delete one openstack resource, such as one instance, keypair,
- image, volume, stack, etc., and confirm deletion within max wait time.
-
- :param resource: pointer to os resource type, ex:glance_client.images
- :param resource_id: unique name or id for the openstack resource
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, otherwise False
- """
- self.log.debug('Deleting OpenStack resource '
- '{} ({})'.format(resource_id, msg))
- num_before = len(list(resource.list()))
- resource.delete(resource_id)
-
- tries = 0
- num_after = len(list(resource.list()))
- while num_after != (num_before - 1) and tries < (max_wait / 4):
- self.log.debug('{} delete check: '
- '{} [{}:{}] {}'.format(msg, tries,
- num_before,
- num_after,
- resource_id))
- time.sleep(4)
- num_after = len(list(resource.list()))
- tries += 1
-
- self.log.debug('{}: expected, actual count = {}, '
- '{}'.format(msg, num_before - 1, num_after))
-
- if num_after == (num_before - 1):
- return True
- else:
- self.log.error('{} delete timed out'.format(msg))
- return False
-
- def resource_reaches_status(self, resource, resource_id,
- expected_stat='available',
- msg='resource', max_wait=120):
- """Wait for an openstack resources status to reach an
- expected status within a specified time. Useful to confirm that
- nova instances, cinder vols, snapshots, glance images, heat stacks
- and other resources eventually reach the expected status.
-
- :param resource: pointer to os resource type, ex: heat_client.stacks
- :param resource_id: unique id for the openstack resource
- :param expected_stat: status to expect resource to reach
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, False if status is not reached
- """
-
- tries = 0
- resource_stat = resource.get(resource_id).status
- while resource_stat != expected_stat and tries < (max_wait / 4):
- self.log.debug('{} status check: '
- '{} [{}:{}] {}'.format(msg, tries,
- resource_stat,
- expected_stat,
- resource_id))
- time.sleep(4)
- resource_stat = resource.get(resource_id).status
- tries += 1
-
- self.log.debug('{}: expected, actual status = {}, '
- '{}'.format(msg, resource_stat, expected_stat))
-
- if resource_stat == expected_stat:
- return True
- else:
- self.log.debug('{} never reached expected status: '
- '{}'.format(resource_id, expected_stat))
- return False
-
- def get_ceph_osd_id_cmd(self, index):
- """Produce a shell command that will return a ceph-osd id."""
- return ("`initctl list | grep 'ceph-osd ' | "
- "awk 'NR=={} {{ print $2 }}' | "
- "grep -o '[0-9]*'`".format(index + 1))
-
- def get_ceph_pools(self, sentry_unit):
- """Return a dict of ceph pools from a single ceph unit, with
- pool name as keys, pool id as vals."""
- pools = {}
- cmd = 'sudo ceph osd lspools'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
- for pool in str(output).split(','):
- pool_id_name = pool.split(' ')
- if len(pool_id_name) == 2:
- pool_id = pool_id_name[0]
- pool_name = pool_id_name[1]
- pools[pool_name] = int(pool_id)
-
- self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
- pools))
- return pools
-
- def get_ceph_df(self, sentry_unit):
- """Return dict of ceph df json output, including ceph pool state.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :returns: Dict of ceph df output
- """
- cmd = 'sudo ceph df --format=json'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return json.loads(output)
-
- def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
- """Take a sample of attributes of a ceph pool, returning ceph
- pool name, object count and disk space used for the specified
- pool ID number.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :param pool_id: Ceph pool ID
- :returns: List of pool name, object count, kb disk space used
- """
- df = self.get_ceph_df(sentry_unit)
- pool_name = df['pools'][pool_id]['name']
- obj_count = df['pools'][pool_id]['stats']['objects']
- kb_used = df['pools'][pool_id]['stats']['kb_used']
- self.log.debug('Ceph {} pool (ID {}): {} objects, '
- '{} kb used'.format(pool_name, pool_id,
- obj_count, kb_used))
- return pool_name, obj_count, kb_used
-
- def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
- """Validate ceph pool samples taken over time, such as pool
- object counts or pool kb used, before adding, after adding, and
- after deleting items which affect those pool attributes. The
- 2nd element is expected to be greater than the 1st; 3rd is expected
- to be less than the 2nd.
-
- :param samples: List containing 3 data samples
- :param sample_type: String for logging and usage context
- :returns: None if successful, Failure message otherwise
- """
- original, created, deleted = range(3)
- if samples[created] <= samples[original] or \
- samples[deleted] >= samples[created]:
- return ('Ceph {} samples ({}) '
- 'unexpected.'.format(sample_type, samples))
- else:
- self.log.debug('Ceph {} samples (OK): '
- '{}'.format(sample_type, samples))
- return None
-
- # rabbitmq/amqp specific helpers:
-
- def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
- """Wait for rmq units extended status to show cluster readiness,
- after an optional initial sleep period. Initial sleep is likely
- necessary to be effective following a config change, as status
- message may not instantly update to non-ready."""
-
- if init_sleep:
- time.sleep(init_sleep)
-
- message = re.compile('^Unit is ready and clustered$')
- deployment._auto_wait_for_status(message=message,
- timeout=timeout,
- include_only=['rabbitmq-server'])
-
- def add_rmq_test_user(self, sentry_units,
- username="testuser1", password="changeme"):
- """Add a test user via the first rmq juju unit, check connection as
- the new user against all sentry units.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Adding rmq user ({})...'.format(username))
-
- # Check that user does not already exist
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
- if username in output:
- self.log.warning('User ({}) already exists, returning '
- 'gracefully.'.format(username))
- return
-
- perms = '".*" ".*" ".*"'
- cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
- 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
-
- # Add user via first unit
- for cmd in cmds:
- output, _ = self.run_cmd_unit(sentry_units[0], cmd)
-
- # Check connection against the other sentry_units
- self.log.debug('Checking user connect against units...')
- for sentry_unit in sentry_units:
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
- username=username,
- password=password)
- connection.close()
-
- def delete_rmq_test_user(self, sentry_units, username="testuser1"):
- """Delete a rabbitmq user via the first rmq juju unit.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful or no such user.
- """
- self.log.debug('Deleting rmq user ({})...'.format(username))
-
- # Check that the user exists
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
-
- if username not in output:
- self.log.warning('User ({}) does not exist, returning '
- 'gracefully.'.format(username))
- return
-
- # Delete the user
- cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
-
- def get_rmq_cluster_status(self, sentry_unit):
- """Execute rabbitmq cluster status command on a unit and return
- the full output.
-
- :param unit: sentry unit
- :returns: String containing console output of cluster status command
- """
- cmd = 'rabbitmqctl cluster_status'
- output, _ = self.run_cmd_unit(sentry_unit, cmd)
- self.log.debug('{} cluster_status:\n{}'.format(
- sentry_unit.info['unit_name'], output))
- return str(output)
-
- def get_rmq_cluster_running_nodes(self, sentry_unit):
- """Parse rabbitmqctl cluster_status output string, return list of
- running rabbitmq cluster nodes.
-
- :param unit: sentry unit
- :returns: List containing node names of running nodes
- """
- # NOTE(beisner): rabbitmqctl cluster_status output is not
- # json-parsable, do string chop foo, then json.loads that.
- str_stat = self.get_rmq_cluster_status(sentry_unit)
- if 'running_nodes' in str_stat:
- pos_start = str_stat.find("{running_nodes,") + 15
- pos_end = str_stat.find("]},", pos_start) + 1
- str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
- run_nodes = json.loads(str_run_nodes)
- return run_nodes
- else:
- return []
-
- def validate_rmq_cluster_running_nodes(self, sentry_units):
- """Check that all rmq unit hostnames are represented in the
- cluster_status output of all units.
-
- :param host_names: dict of juju unit names to host names
- :param units: list of sentry unit pointers (all rmq units)
- :returns: None if successful, otherwise return error message
- """
- host_names = self.get_unit_hostnames(sentry_units)
- errors = []
-
- # Query every unit for cluster_status running nodes
- for query_unit in sentry_units:
- query_unit_name = query_unit.info['unit_name']
- running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
-
- # Confirm that every unit is represented in the queried unit's
- # cluster_status running nodes output.
- for validate_unit in sentry_units:
- val_host_name = host_names[validate_unit.info['unit_name']]
- val_node_name = 'rabbit@{}'.format(val_host_name)
-
- if val_node_name not in running_nodes:
- errors.append('Cluster member check failed on {}: {} not '
- 'in {}\n'.format(query_unit_name,
- val_node_name,
- running_nodes))
- if errors:
- return ''.join(errors)
-
- def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
- """Check a single juju rmq unit for ssl and port in the config file."""
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- conf_file = '/etc/rabbitmq/rabbitmq.config'
- conf_contents = str(self.file_contents_safe(sentry_unit,
- conf_file, max_wait=16))
- # Checks
- conf_ssl = 'ssl' in conf_contents
- conf_port = str(port) in conf_contents
-
- # Port explicitly checked in config
- if port and conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif port and not conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{} but not on port {} '
- '({})'.format(host, port, unit_name))
- return False
- # Port not checked (useful when checking that ssl is disabled)
- elif not port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif not conf_ssl:
- self.log.debug('SSL not enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return False
- else:
- msg = ('Unknown condition when checking SSL status @{}:{} '
- '({})'.format(host, port, unit_name))
- amulet.raise_status(amulet.FAIL, msg)
-
- def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
- """Check that ssl is enabled on rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :param port: optional ssl port override to validate
- :returns: None if successful, otherwise return error message
- """
- for sentry_unit in sentry_units:
- if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
- return ('Unexpected condition: ssl is disabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def validate_rmq_ssl_disabled_units(self, sentry_units):
- """Check that ssl is enabled on listed rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :returns: True if successful. Raise on error.
- """
- for sentry_unit in sentry_units:
- if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
- return ('Unexpected condition: ssl is enabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def configure_rmq_ssl_on(self, sentry_units, deployment,
- port=None, max_wait=60):
- """Turn ssl charm config option on, with optional non-default
- ssl port specification. Confirm that it is enabled on every
- unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param port: amqp port, use defaults if None
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: on')
-
- # Enable RMQ SSL
- config = {'ssl': 'on'}
- if port:
- config['ssl_port'] = port
-
- deployment.d.configure('rabbitmq-server', config)
-
- # Wait for unit status
- self.rmq_wait_for_cluster(deployment)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
- """Turn ssl charm config option off, confirm that it is disabled
- on every unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: off')
-
- # Disable RMQ SSL
- config = {'ssl': 'off'}
- deployment.d.configure('rabbitmq-server', config)
-
- # Wait for unit status
- self.rmq_wait_for_cluster(deployment)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def connect_amqp_by_unit(self, sentry_unit, ssl=False,
- port=None, fatal=True,
- username="testuser1", password="changeme"):
- """Establish and return a pika amqp connection to the rabbitmq service
- running on a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :param fatal: boolean, default to True (raises on connect error)
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: pika amqp connection pointer or None if failed and non-fatal
- """
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- # Default port logic if port is not specified
- if ssl and not port:
- port = 5671
- elif not ssl and not port:
- port = 5672
-
- self.log.debug('Connecting to amqp on {}:{} ({}) as '
- '{}...'.format(host, port, unit_name, username))
-
- try:
- credentials = pika.PlainCredentials(username, password)
- parameters = pika.ConnectionParameters(host=host, port=port,
- credentials=credentials,
- ssl=ssl,
- connection_attempts=3,
- retry_delay=5,
- socket_timeout=1)
- connection = pika.BlockingConnection(parameters)
- assert connection.server_properties['product'] == 'RabbitMQ'
- self.log.debug('Connect OK')
- return connection
- except Exception as e:
- msg = ('amqp connection failed to {}:{} as '
- '{} ({})'.format(host, port, username, str(e)))
- if fatal:
- amulet.raise_status(amulet.FAIL, msg)
- else:
- self.log.warn(msg)
- return None
-
- def publish_amqp_message_by_unit(self, sentry_unit, message,
- queue="test", ssl=False,
- username="testuser1",
- password="changeme",
- port=None):
- """Publish an amqp message to a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param message: amqp message string
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: None. Raises exception if publish failed.
- """
- self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
- message))
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
-
- # NOTE(beisner): extra debug here re: pika hang potential:
- # https://github.com/pika/pika/issues/297
- # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
- self.log.debug('Defining channel...')
- channel = connection.channel()
- self.log.debug('Declaring queue...')
- channel.queue_declare(queue=queue, auto_delete=False, durable=True)
- self.log.debug('Publishing message...')
- channel.basic_publish(exchange='', routing_key=queue, body=message)
- self.log.debug('Closing channel...')
- channel.close()
- self.log.debug('Closing connection...')
- connection.close()
-
- def get_amqp_message_by_unit(self, sentry_unit, queue="test",
- username="testuser1",
- password="changeme",
- ssl=False, port=None):
- """Get an amqp message from a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: amqp message body as string. Raise if get fails.
- """
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
- channel = connection.channel()
- method_frame, _, body = channel.basic_get(queue)
-
- if method_frame:
- self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
- body))
- channel.basic_ack(method_frame.delivery_tag)
- channel.close()
- connection.close()
- return body
- else:
- msg = 'No message retrieved.'
- amulet.raise_status(amulet.FAIL, msg)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/context.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/context.py
deleted file mode 100644
index c07b33d..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/context.py
+++ /dev/null
@@ -1,1583 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import glob
-import json
-import os
-import re
-import time
-from base64 import b64decode
-from subprocess import check_call, CalledProcessError
-
-import six
-import yaml
-
-from charmhelpers.fetch import (
- apt_install,
- filter_installed_packages,
-)
-from charmhelpers.core.hookenv import (
- config,
- is_relation_made,
- local_unit,
- log,
- relation_get,
- relation_ids,
- related_units,
- relation_set,
- unit_get,
- unit_private_ip,
- charm_name,
- DEBUG,
- INFO,
- WARNING,
- ERROR,
- status_set,
-)
-
-from charmhelpers.core.sysctl import create as sysctl_create
-from charmhelpers.core.strutils import bool_from_string
-
-from charmhelpers.core.host import (
- get_bond_master,
- is_phy_iface,
- list_nics,
- get_nic_hwaddr,
- mkdir,
- write_file,
- pwgen,
-)
-from charmhelpers.contrib.hahelpers.cluster import (
- determine_apache_port,
- determine_api_port,
- https,
- is_clustered,
-)
-from charmhelpers.contrib.hahelpers.apache import (
- get_cert,
- get_ca_cert,
- install_ca_cert,
-)
-from charmhelpers.contrib.openstack.neutron import (
- neutron_plugin_attribute,
- parse_data_port_mappings,
-)
-from charmhelpers.contrib.openstack.ip import (
- resolve_address,
- INTERNAL,
-)
-from charmhelpers.contrib.network.ip import (
- get_address_in_network,
- get_ipv4_addr,
- get_ipv6_addr,
- get_netmask_for_address,
- format_ipv6_addr,
- is_address_in_network,
- is_bridge_member,
-)
-from charmhelpers.contrib.openstack.utils import get_host_ip
-from charmhelpers.core.unitdata import kv
-
-try:
- import psutil
-except ImportError:
- apt_install('python-psutil', fatal=True)
- import psutil
-
-CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
-ADDRESS_TYPES = ['admin', 'internal', 'public']
-
-
-class OSContextError(Exception):
- pass
-
-
-def ensure_packages(packages):
- """Install but do not upgrade required plugin packages."""
- required = filter_installed_packages(packages)
- if required:
- apt_install(required, fatal=True)
-
-
-def context_complete(ctxt):
- _missing = []
- for k, v in six.iteritems(ctxt):
- if v is None or v == '':
- _missing.append(k)
-
- if _missing:
- log('Missing required data: %s' % ' '.join(_missing), level=INFO)
- return False
-
- return True
-
-
-def config_flags_parser(config_flags):
- """Parses config flags string into dict.
-
- This parsing method supports a few different formats for the config
- flag values to be parsed:
-
- 1. A string in the simple format of key=value pairs, with the possibility
- of specifying multiple key value pairs within the same string. For
- example, a string in the format of 'key1=value1, key2=value2' will
- return a dict of:
-
- {'key1': 'value1',
- 'key2': 'value2'}.
-
- 2. A string in the above format, but supporting a comma-delimited list
- of values for the same key. For example, a string in the format of
- 'key1=value1, key2=value3,value4,value5' will return a dict of:
-
- {'key1', 'value1',
- 'key2', 'value2,value3,value4'}
-
- 3. A string containing a colon character (:) prior to an equal
- character (=) will be treated as yaml and parsed as such. This can be
- used to specify more complex key value pairs. For example,
- a string in the format of 'key1: subkey1=value1, subkey2=value2' will
- return a dict of:
-
- {'key1', 'subkey1=value1, subkey2=value2'}
-
- The provided config_flags string may be a list of comma-separated values
- which themselves may be comma-separated list of values.
- """
- # If we find a colon before an equals sign then treat it as yaml.
- # Note: limit it to finding the colon first since this indicates assignment
- # for inline yaml.
- colon = config_flags.find(':')
- equals = config_flags.find('=')
- if colon > 0:
- if colon < equals or equals < 0:
- return yaml.safe_load(config_flags)
-
- if config_flags.find('==') >= 0:
- log("config_flags is not in expected format (key=value)", level=ERROR)
- raise OSContextError
-
- # strip the following from each value.
- post_strippers = ' ,'
- # we strip any leading/trailing '=' or ' ' from the string then
- # split on '='.
- split = config_flags.strip(' =').split('=')
- limit = len(split)
- flags = {}
- for i in range(0, limit - 1):
- current = split[i]
- next = split[i + 1]
- vindex = next.rfind(',')
- if (i == limit - 2) or (vindex < 0):
- value = next
- else:
- value = next[:vindex]
-
- if i == 0:
- key = current
- else:
- # if this not the first entry, expect an embedded key.
- index = current.rfind(',')
- if index < 0:
- log("Invalid config value(s) at index %s" % (i), level=ERROR)
- raise OSContextError
- key = current[index + 1:]
-
- # Add to collection.
- flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
-
- return flags
-
-
-class OSContextGenerator(object):
- """Base class for all context generators."""
- interfaces = []
- related = False
- complete = False
- missing_data = []
-
- def __call__(self):
- raise NotImplementedError
-
- def context_complete(self, ctxt):
- """Check for missing data for the required context data.
- Set self.missing_data if it exists and return False.
- Set self.complete if no missing data and return True.
- """
- # Fresh start
- self.complete = False
- self.missing_data = []
- for k, v in six.iteritems(ctxt):
- if v is None or v == '':
- if k not in self.missing_data:
- self.missing_data.append(k)
-
- if self.missing_data:
- self.complete = False
- log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
- else:
- self.complete = True
- return self.complete
-
- def get_related(self):
- """Check if any of the context interfaces have relation ids.
- Set self.related and return True if one of the interfaces
- has relation ids.
- """
- # Fresh start
- self.related = False
- try:
- for interface in self.interfaces:
- if relation_ids(interface):
- self.related = True
- return self.related
- except AttributeError as e:
- log("{} {}"
- "".format(self, e), 'INFO')
- return self.related
-
-
-class SharedDBContext(OSContextGenerator):
- interfaces = ['shared-db']
-
- def __init__(self,
- database=None, user=None, relation_prefix=None, ssl_dir=None):
- """Allows inspecting relation for settings prefixed with
- relation_prefix. This is useful for parsing access for multiple
- databases returned via the shared-db interface (eg, nova_password,
- quantum_password)
- """
- self.relation_prefix = relation_prefix
- self.database = database
- self.user = user
- self.ssl_dir = ssl_dir
- self.rel_name = self.interfaces[0]
-
- def __call__(self):
- self.database = self.database or config('database')
- self.user = self.user or config('database-user')
- if None in [self.database, self.user]:
- log("Could not generate shared_db context. Missing required charm "
- "config options. (database name and user)", level=ERROR)
- raise OSContextError
-
- ctxt = {}
-
- # NOTE(jamespage) if mysql charm provides a network upon which
- # access to the database should be made, reconfigure relation
- # with the service units local address and defer execution
- access_network = relation_get('access-network')
- if access_network is not None:
- if self.relation_prefix is not None:
- hostname_key = "{}_hostname".format(self.relation_prefix)
- else:
- hostname_key = "hostname"
- access_hostname = get_address_in_network(access_network,
- unit_get('private-address'))
- set_hostname = relation_get(attribute=hostname_key,
- unit=local_unit())
- if set_hostname != access_hostname:
- relation_set(relation_settings={hostname_key: access_hostname})
- return None # Defer any further hook execution for now....
-
- password_setting = 'password'
- if self.relation_prefix:
- password_setting = self.relation_prefix + '_password'
-
- for rid in relation_ids(self.interfaces[0]):
- self.related = True
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- host = rdata.get('db_host')
- host = format_ipv6_addr(host) or host
- ctxt = {
- 'database_host': host,
- 'database': self.database,
- 'database_user': self.user,
- 'database_password': rdata.get(password_setting),
- 'database_type': 'mysql'
- }
- if self.context_complete(ctxt):
- db_ssl(rdata, ctxt, self.ssl_dir)
- return ctxt
- return {}
-
-
-class PostgresqlDBContext(OSContextGenerator):
- interfaces = ['pgsql-db']
-
- def __init__(self, database=None):
- self.database = database
-
- def __call__(self):
- self.database = self.database or config('database')
- if self.database is None:
- log('Could not generate postgresql_db context. Missing required '
- 'charm config options. (database name)', level=ERROR)
- raise OSContextError
-
- ctxt = {}
- for rid in relation_ids(self.interfaces[0]):
- self.related = True
- for unit in related_units(rid):
- rel_host = relation_get('host', rid=rid, unit=unit)
- rel_user = relation_get('user', rid=rid, unit=unit)
- rel_passwd = relation_get('password', rid=rid, unit=unit)
- ctxt = {'database_host': rel_host,
- 'database': self.database,
- 'database_user': rel_user,
- 'database_password': rel_passwd,
- 'database_type': 'postgresql'}
- if self.context_complete(ctxt):
- return ctxt
-
- return {}
-
-
-def db_ssl(rdata, ctxt, ssl_dir):
- if 'ssl_ca' in rdata and ssl_dir:
- ca_path = os.path.join(ssl_dir, 'db-client.ca')
- with open(ca_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_ca']))
-
- ctxt['database_ssl_ca'] = ca_path
- elif 'ssl_ca' in rdata:
- log("Charm not setup for ssl support but ssl ca found", level=INFO)
- return ctxt
-
- if 'ssl_cert' in rdata:
- cert_path = os.path.join(
- ssl_dir, 'db-client.cert')
- if not os.path.exists(cert_path):
- log("Waiting 1m for ssl client cert validity", level=INFO)
- time.sleep(60)
-
- with open(cert_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_cert']))
-
- ctxt['database_ssl_cert'] = cert_path
- key_path = os.path.join(ssl_dir, 'db-client.key')
- with open(key_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_key']))
-
- ctxt['database_ssl_key'] = key_path
-
- return ctxt
-
-
-class IdentityServiceContext(OSContextGenerator):
-
- def __init__(self, service=None, service_user=None, rel_name='identity-service'):
- self.service = service
- self.service_user = service_user
- self.rel_name = rel_name
- self.interfaces = [self.rel_name]
-
- def __call__(self):
- log('Generating template context for ' + self.rel_name, level=DEBUG)
- ctxt = {}
-
- if self.service and self.service_user:
- # This is required for pki token signing if we don't want /tmp to
- # be used.
- cachedir = '/var/cache/%s' % (self.service)
- if not os.path.isdir(cachedir):
- log("Creating service cache dir %s" % (cachedir), level=DEBUG)
- mkdir(path=cachedir, owner=self.service_user,
- group=self.service_user, perms=0o700)
-
- ctxt['signing_dir'] = cachedir
-
- for rid in relation_ids(self.rel_name):
- self.related = True
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- serv_host = rdata.get('service_host')
- serv_host = format_ipv6_addr(serv_host) or serv_host
- auth_host = rdata.get('auth_host')
- auth_host = format_ipv6_addr(auth_host) or auth_host
- svc_protocol = rdata.get('service_protocol') or 'http'
- auth_protocol = rdata.get('auth_protocol') or 'http'
- api_version = rdata.get('api_version') or '2.0'
- ctxt.update({'service_port': rdata.get('service_port'),
- 'service_host': serv_host,
- 'auth_host': auth_host,
- 'auth_port': rdata.get('auth_port'),
- 'admin_tenant_name': rdata.get('service_tenant'),
- 'admin_user': rdata.get('service_username'),
- 'admin_password': rdata.get('service_password'),
- 'service_protocol': svc_protocol,
- 'auth_protocol': auth_protocol,
- 'api_version': api_version})
-
- if self.context_complete(ctxt):
- # NOTE(jamespage) this is required for >= icehouse
- # so a missing value just indicates keystone needs
- # upgrading
- ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
- return ctxt
-
- return {}
-
-
-class AMQPContext(OSContextGenerator):
-
- def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
- self.ssl_dir = ssl_dir
- self.rel_name = rel_name
- self.relation_prefix = relation_prefix
- self.interfaces = [rel_name]
-
- def __call__(self):
- log('Generating template context for amqp', level=DEBUG)
- conf = config()
- if self.relation_prefix:
- user_setting = '%s-rabbit-user' % (self.relation_prefix)
- vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
- else:
- user_setting = 'rabbit-user'
- vhost_setting = 'rabbit-vhost'
-
- try:
- username = conf[user_setting]
- vhost = conf[vhost_setting]
- except KeyError as e:
- log('Could not generate shared_db context. Missing required charm '
- 'config options: %s.' % e, level=ERROR)
- raise OSContextError
-
- ctxt = {}
- for rid in relation_ids(self.rel_name):
- ha_vip_only = False
- self.related = True
- for unit in related_units(rid):
- if relation_get('clustered', rid=rid, unit=unit):
- ctxt['clustered'] = True
- vip = relation_get('vip', rid=rid, unit=unit)
- vip = format_ipv6_addr(vip) or vip
- ctxt['rabbitmq_host'] = vip
- else:
- host = relation_get('private-address', rid=rid, unit=unit)
- host = format_ipv6_addr(host) or host
- ctxt['rabbitmq_host'] = host
-
- ctxt.update({
- 'rabbitmq_user': username,
- 'rabbitmq_password': relation_get('password', rid=rid,
- unit=unit),
- 'rabbitmq_virtual_host': vhost,
- })
-
- ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
- if ssl_port:
- ctxt['rabbit_ssl_port'] = ssl_port
-
- ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
- if ssl_ca:
- ctxt['rabbit_ssl_ca'] = ssl_ca
-
- if relation_get('ha_queues', rid=rid, unit=unit) is not None:
- ctxt['rabbitmq_ha_queues'] = True
-
- ha_vip_only = relation_get('ha-vip-only',
- rid=rid, unit=unit) is not None
-
- if self.context_complete(ctxt):
- if 'rabbit_ssl_ca' in ctxt:
- if not self.ssl_dir:
- log("Charm not setup for ssl support but ssl ca "
- "found", level=INFO)
- break
-
- ca_path = os.path.join(
- self.ssl_dir, 'rabbit-client-ca.pem')
- with open(ca_path, 'w') as fh:
- fh.write(b64decode(ctxt['rabbit_ssl_ca']))
- ctxt['rabbit_ssl_ca'] = ca_path
-
- # Sufficient information found = break out!
- break
-
- # Used for active/active rabbitmq >= grizzly
- if (('clustered' not in ctxt or ha_vip_only) and
- len(related_units(rid)) > 1):
- rabbitmq_hosts = []
- for unit in related_units(rid):
- host = relation_get('private-address', rid=rid, unit=unit)
- host = format_ipv6_addr(host) or host
- rabbitmq_hosts.append(host)
-
- ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
-
- oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
- if oslo_messaging_flags:
- ctxt['oslo_messaging_flags'] = config_flags_parser(
- oslo_messaging_flags)
-
- if not self.complete:
- return {}
-
- return ctxt
-
-
-class CephContext(OSContextGenerator):
- """Generates context for /etc/ceph/ceph.conf templates."""
- interfaces = ['ceph']
-
- def __call__(self):
- if not relation_ids('ceph'):
- return {}
-
- log('Generating template context for ceph', level=DEBUG)
- mon_hosts = []
- ctxt = {
- 'use_syslog': str(config('use-syslog')).lower()
- }
- for rid in relation_ids('ceph'):
- for unit in related_units(rid):
- if not ctxt.get('auth'):
- ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
- if not ctxt.get('key'):
- ctxt['key'] = relation_get('key', rid=rid, unit=unit)
- ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
- unit=unit)
- unit_priv_addr = relation_get('private-address', rid=rid,
- unit=unit)
- ceph_addr = ceph_pub_addr or unit_priv_addr
- ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
- mon_hosts.append(ceph_addr)
-
- ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
-
- if not os.path.isdir('/etc/ceph'):
- os.mkdir('/etc/ceph')
-
- if not self.context_complete(ctxt):
- return {}
-
- ensure_packages(['ceph-common'])
- return ctxt
-
-
-class HAProxyContext(OSContextGenerator):
- """Provides half a context for the haproxy template, which describes
- all peers to be included in the cluster. Each charm needs to include
- its own context generator that describes the port mapping.
- """
- interfaces = ['cluster']
-
- def __init__(self, singlenode_mode=False):
- self.singlenode_mode = singlenode_mode
-
- def __call__(self):
- if not relation_ids('cluster') and not self.singlenode_mode:
- return {}
-
- if config('prefer-ipv6'):
- addr = get_ipv6_addr(exc_list=[config('vip')])[0]
- else:
- addr = get_host_ip(unit_get('private-address'))
-
- l_unit = local_unit().replace('/', '-')
- cluster_hosts = {}
-
- # NOTE(jamespage): build out map of configured network endpoints
- # and associated backends
- for addr_type in ADDRESS_TYPES:
- cfg_opt = 'os-{}-network'.format(addr_type)
- laddr = get_address_in_network(config(cfg_opt))
- if laddr:
- netmask = get_netmask_for_address(laddr)
- cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
- netmask),
- 'backends': {l_unit: laddr}}
- for rid in relation_ids('cluster'):
- for unit in related_units(rid):
- _laddr = relation_get('{}-address'.format(addr_type),
- rid=rid, unit=unit)
- if _laddr:
- _unit = unit.replace('/', '-')
- cluster_hosts[laddr]['backends'][_unit] = _laddr
-
- # NOTE(jamespage) add backend based on private address - this
- # with either be the only backend or the fallback if no acls
- # match in the frontend
- cluster_hosts[addr] = {}
- netmask = get_netmask_for_address(addr)
- cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
- 'backends': {l_unit: addr}}
- for rid in relation_ids('cluster'):
- for unit in related_units(rid):
- _laddr = relation_get('private-address',
- rid=rid, unit=unit)
- if _laddr:
- _unit = unit.replace('/', '-')
- cluster_hosts[addr]['backends'][_unit] = _laddr
-
- ctxt = {
- 'frontends': cluster_hosts,
- 'default_backend': addr
- }
-
- if config('haproxy-server-timeout'):
- ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
-
- if config('haproxy-client-timeout'):
- ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
-
- if config('haproxy-queue-timeout'):
- ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout')
-
- if config('haproxy-connect-timeout'):
- ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
-
- if config('prefer-ipv6'):
- ctxt['ipv6'] = True
- ctxt['local_host'] = 'ip6-localhost'
- ctxt['haproxy_host'] = '::'
- else:
- ctxt['local_host'] = '127.0.0.1'
- ctxt['haproxy_host'] = '0.0.0.0'
-
- ctxt['stat_port'] = '8888'
-
- db = kv()
- ctxt['stat_password'] = db.get('stat-password')
- if not ctxt['stat_password']:
- ctxt['stat_password'] = db.set('stat-password',
- pwgen(32))
- db.flush()
-
- for frontend in cluster_hosts:
- if (len(cluster_hosts[frontend]['backends']) > 1 or
- self.singlenode_mode):
- # Enable haproxy when we have enough peers.
- log('Ensuring haproxy enabled in /etc/default/haproxy.',
- level=DEBUG)
- with open('/etc/default/haproxy', 'w') as out:
- out.write('ENABLED=1\n')
-
- return ctxt
-
- log('HAProxy context is incomplete, this unit has no peers.',
- level=INFO)
- return {}
-
-
-class ImageServiceContext(OSContextGenerator):
- interfaces = ['image-service']
-
- def __call__(self):
- """Obtains the glance API server from the image-service relation.
- Useful in nova and cinder (currently).
- """
- log('Generating template context for image-service.', level=DEBUG)
- rids = relation_ids('image-service')
- if not rids:
- return {}
-
- for rid in rids:
- for unit in related_units(rid):
- api_server = relation_get('glance-api-server',
- rid=rid, unit=unit)
- if api_server:
- return {'glance_api_servers': api_server}
-
- log("ImageService context is incomplete. Missing required relation "
- "data.", level=INFO)
- return {}
-
-
-class ApacheSSLContext(OSContextGenerator):
- """Generates a context for an apache vhost configuration that configures
- HTTPS reverse proxying for one or many endpoints. Generated context
- looks something like::
-
- {
- 'namespace': 'cinder',
- 'private_address': 'iscsi.mycinderhost.com',
- 'endpoints': [(8776, 8766), (8777, 8767)]
- }
-
- The endpoints list consists of a tuples mapping external ports
- to internal ports.
- """
- interfaces = ['https']
-
- # charms should inherit this context and set external ports
- # and service namespace accordingly.
- external_ports = []
- service_namespace = None
-
- def enable_modules(self):
- cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
- check_call(cmd)
-
- def configure_cert(self, cn=None):
- ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
- mkdir(path=ssl_dir)
- cert, key = get_cert(cn)
- if cn:
- cert_filename = 'cert_{}'.format(cn)
- key_filename = 'key_{}'.format(cn)
- else:
- cert_filename = 'cert'
- key_filename = 'key'
-
- write_file(path=os.path.join(ssl_dir, cert_filename),
- content=b64decode(cert))
- write_file(path=os.path.join(ssl_dir, key_filename),
- content=b64decode(key))
-
- def configure_ca(self):
- ca_cert = get_ca_cert()
- if ca_cert:
- install_ca_cert(b64decode(ca_cert))
-
- def canonical_names(self):
- """Figure out which canonical names clients will access this service.
- """
- cns = []
- for r_id in relation_ids('identity-service'):
- for unit in related_units(r_id):
- rdata = relation_get(rid=r_id, unit=unit)
- for k in rdata:
- if k.startswith('ssl_key_'):
- cns.append(k.lstrip('ssl_key_'))
-
- return sorted(list(set(cns)))
-
- def get_network_addresses(self):
- """For each network configured, return corresponding address and vip
- (if available).
-
- Returns a list of tuples of the form:
-
- [(address_in_net_a, vip_in_net_a),
- (address_in_net_b, vip_in_net_b),
- ...]
-
- or, if no vip(s) available:
-
- [(address_in_net_a, address_in_net_a),
- (address_in_net_b, address_in_net_b),
- ...]
- """
- addresses = []
- if config('vip'):
- vips = config('vip').split()
- else:
- vips = []
-
- for net_type in ['os-internal-network', 'os-admin-network',
- 'os-public-network']:
- addr = get_address_in_network(config(net_type),
- unit_get('private-address'))
- if len(vips) > 1 and is_clustered():
- if not config(net_type):
- log("Multiple networks configured but net_type "
- "is None (%s)." % net_type, level=WARNING)
- continue
-
- for vip in vips:
- if is_address_in_network(config(net_type), vip):
- addresses.append((addr, vip))
- break
-
- elif is_clustered() and config('vip'):
- addresses.append((addr, config('vip')))
- else:
- addresses.append((addr, addr))
-
- return sorted(addresses)
-
- def __call__(self):
- if isinstance(self.external_ports, six.string_types):
- self.external_ports = [self.external_ports]
-
- if not self.external_ports or not https():
- return {}
-
- self.configure_ca()
- self.enable_modules()
-
- ctxt = {'namespace': self.service_namespace,
- 'endpoints': [],
- 'ext_ports': []}
-
- cns = self.canonical_names()
- if cns:
- for cn in cns:
- self.configure_cert(cn)
- else:
- # Expect cert/key provided in config (currently assumed that ca
- # uses ip for cn)
- cn = resolve_address(endpoint_type=INTERNAL)
- self.configure_cert(cn)
-
- addresses = self.get_network_addresses()
- for address, endpoint in sorted(set(addresses)):
- for api_port in self.external_ports:
- ext_port = determine_apache_port(api_port,
- singlenode_mode=True)
- int_port = determine_api_port(api_port, singlenode_mode=True)
- portmap = (address, endpoint, int(ext_port), int(int_port))
- ctxt['endpoints'].append(portmap)
- ctxt['ext_ports'].append(int(ext_port))
-
- ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
- return ctxt
-
-
-class NeutronContext(OSContextGenerator):
- interfaces = []
-
- @property
- def plugin(self):
- return None
-
- @property
- def network_manager(self):
- return None
-
- @property
- def packages(self):
- return neutron_plugin_attribute(self.plugin, 'packages',
- self.network_manager)
-
- @property
- def neutron_security_groups(self):
- return None
-
- def _ensure_packages(self):
- for pkgs in self.packages:
- ensure_packages(pkgs)
-
- def _save_flag_file(self):
- if self.network_manager == 'quantum':
- _file = '/etc/nova/quantum_plugin.conf'
- else:
- _file = '/etc/nova/neutron_plugin.conf'
-
- with open(_file, 'wb') as out:
- out.write(self.plugin + '\n')
-
- def ovs_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- ovs_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'ovs',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return ovs_ctxt
-
- def nuage_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- nuage_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'vsp',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return nuage_ctxt
-
- def nvp_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- nvp_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'nvp',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return nvp_ctxt
-
- def n1kv_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- n1kv_user_config_flags = config('n1kv-config-flags')
- restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
- n1kv_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'n1kv',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': n1kv_config,
- 'vsm_ip': config('n1kv-vsm-ip'),
- 'vsm_username': config('n1kv-vsm-username'),
- 'vsm_password': config('n1kv-vsm-password'),
- 'restrict_policy_profiles': restrict_policy_profiles}
-
- if n1kv_user_config_flags:
- flags = config_flags_parser(n1kv_user_config_flags)
- n1kv_ctxt['user_config_flags'] = flags
-
- return n1kv_ctxt
-
- def calico_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- calico_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'Calico',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return calico_ctxt
-
- def neutron_ctxt(self):
- if https():
- proto = 'https'
- else:
- proto = 'http'
-
- if is_clustered():
- host = config('vip')
- else:
- host = unit_get('private-address')
-
- ctxt = {'network_manager': self.network_manager,
- 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
- return ctxt
-
- def pg_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- ovs_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'plumgrid',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
- return ovs_ctxt
-
- def midonet_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- midonet_config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- mido_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'midonet',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': midonet_config}
-
- return mido_ctxt
-
- def __call__(self):
- if self.network_manager not in ['quantum', 'neutron']:
- return {}
-
- if not self.plugin:
- return {}
-
- ctxt = self.neutron_ctxt()
-
- if self.plugin == 'ovs':
- ctxt.update(self.ovs_ctxt())
- elif self.plugin in ['nvp', 'nsx']:
- ctxt.update(self.nvp_ctxt())
- elif self.plugin == 'n1kv':
- ctxt.update(self.n1kv_ctxt())
- elif self.plugin == 'Calico':
- ctxt.update(self.calico_ctxt())
- elif self.plugin == 'vsp':
- ctxt.update(self.nuage_ctxt())
- elif self.plugin == 'plumgrid':
- ctxt.update(self.pg_ctxt())
- elif self.plugin == 'midonet':
- ctxt.update(self.midonet_ctxt())
-
- alchemy_flags = config('neutron-alchemy-flags')
- if alchemy_flags:
- flags = config_flags_parser(alchemy_flags)
- ctxt['neutron_alchemy_flags'] = flags
-
- self._save_flag_file()
- return ctxt
-
-
-class NeutronPortContext(OSContextGenerator):
-
- def resolve_ports(self, ports):
- """Resolve NICs not yet bound to bridge(s)
-
- If hwaddress provided then returns resolved hwaddress otherwise NIC.
- """
- if not ports:
- return None
-
- hwaddr_to_nic = {}
- hwaddr_to_ip = {}
- for nic in list_nics():
- # Ignore virtual interfaces (bond masters will be identified from
- # their slaves)
- if not is_phy_iface(nic):
- continue
-
- _nic = get_bond_master(nic)
- if _nic:
- log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
- level=DEBUG)
- nic = _nic
-
- hwaddr = get_nic_hwaddr(nic)
- hwaddr_to_nic[hwaddr] = nic
- addresses = get_ipv4_addr(nic, fatal=False)
- addresses += get_ipv6_addr(iface=nic, fatal=False)
- hwaddr_to_ip[hwaddr] = addresses
-
- resolved = []
- mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
- for entry in ports:
- if re.match(mac_regex, entry):
- # NIC is in known NICs and does NOT hace an IP address
- if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
- # If the nic is part of a bridge then don't use it
- if is_bridge_member(hwaddr_to_nic[entry]):
- continue
-
- # Entry is a MAC address for a valid interface that doesn't
- # have an IP address assigned yet.
- resolved.append(hwaddr_to_nic[entry])
- else:
- # If the passed entry is not a MAC address, assume it's a valid
- # interface, and that the user put it there on purpose (we can
- # trust it to be the real external network).
- resolved.append(entry)
-
- # Ensure no duplicates
- return list(set(resolved))
-
-
-class OSConfigFlagContext(OSContextGenerator):
- """Provides support for user-defined config flags.
-
- Users can define a comma-seperated list of key=value pairs
- in the charm configuration and apply them at any point in
- any file by using a template flag.
-
- Sometimes users might want config flags inserted within a
- specific section so this class allows users to specify the
- template flag name, allowing for multiple template flags
- (sections) within the same context.
-
- NOTE: the value of config-flags may be a comma-separated list of
- key=value pairs and some Openstack config files support
- comma-separated lists as values.
- """
-
- def __init__(self, charm_flag='config-flags',
- template_flag='user_config_flags'):
- """
- :param charm_flag: config flags in charm configuration.
- :param template_flag: insert point for user-defined flags in template
- file.
- """
- super(OSConfigFlagContext, self).__init__()
- self._charm_flag = charm_flag
- self._template_flag = template_flag
-
- def __call__(self):
- config_flags = config(self._charm_flag)
- if not config_flags:
- return {}
-
- return {self._template_flag:
- config_flags_parser(config_flags)}
-
-
-class LibvirtConfigFlagsContext(OSContextGenerator):
- """
- This context provides support for extending
- the libvirt section through user-defined flags.
- """
- def __call__(self):
- ctxt = {}
- libvirt_flags = config('libvirt-flags')
- if libvirt_flags:
- ctxt['libvirt_flags'] = config_flags_parser(
- libvirt_flags)
- return ctxt
-
-
-class SubordinateConfigContext(OSContextGenerator):
-
- """
- Responsible for inspecting relations to subordinates that
- may be exporting required config via a json blob.
-
- The subordinate interface allows subordinates to export their
- configuration requirements to the principle for multiple config
- files and multiple serivces. Ie, a subordinate that has interfaces
- to both glance and nova may export to following yaml blob as json::
-
- glance:
- /etc/glance/glance-api.conf:
- sections:
- DEFAULT:
- - [key1, value1]
- /etc/glance/glance-registry.conf:
- MYSECTION:
- - [key2, value2]
- nova:
- /etc/nova/nova.conf:
- sections:
- DEFAULT:
- - [key3, value3]
-
-
- It is then up to the principle charms to subscribe this context to
- the service+config file it is interestd in. Configuration data will
- be available in the template context, in glance's case, as::
-
- ctxt = {
- ... other context ...
- 'subordinate_configuration': {
- 'DEFAULT': {
- 'key1': 'value1',
- },
- 'MYSECTION': {
- 'key2': 'value2',
- },
- }
- }
- """
-
- def __init__(self, service, config_file, interface):
- """
- :param service : Service name key to query in any subordinate
- data found
- :param config_file : Service's config file to query sections
- :param interface : Subordinate interface to inspect
- """
- self.config_file = config_file
- if isinstance(service, list):
- self.services = service
- else:
- self.services = [service]
- if isinstance(interface, list):
- self.interfaces = interface
- else:
- self.interfaces = [interface]
-
- def __call__(self):
- ctxt = {'sections': {}}
- rids = []
- for interface in self.interfaces:
- rids.extend(relation_ids(interface))
- for rid in rids:
- for unit in related_units(rid):
- sub_config = relation_get('subordinate_configuration',
- rid=rid, unit=unit)
- if sub_config and sub_config != '':
- try:
- sub_config = json.loads(sub_config)
- except:
- log('Could not parse JSON from '
- 'subordinate_configuration setting from %s'
- % rid, level=ERROR)
- continue
-
- for service in self.services:
- if service not in sub_config:
- log('Found subordinate_configuration on %s but it '
- 'contained nothing for %s service'
- % (rid, service), level=INFO)
- continue
-
- sub_config = sub_config[service]
- if self.config_file not in sub_config:
- log('Found subordinate_configuration on %s but it '
- 'contained nothing for %s'
- % (rid, self.config_file), level=INFO)
- continue
-
- sub_config = sub_config[self.config_file]
- for k, v in six.iteritems(sub_config):
- if k == 'sections':
- for section, config_list in six.iteritems(v):
- log("adding section '%s'" % (section),
- level=DEBUG)
- if ctxt[k].get(section):
- ctxt[k][section].extend(config_list)
- else:
- ctxt[k][section] = config_list
- else:
- ctxt[k] = v
- log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
- return ctxt
-
-
-class LogLevelContext(OSContextGenerator):
-
- def __call__(self):
- ctxt = {}
- ctxt['debug'] = \
- False if config('debug') is None else config('debug')
- ctxt['verbose'] = \
- False if config('verbose') is None else config('verbose')
-
- return ctxt
-
-
-class SyslogContext(OSContextGenerator):
-
- def __call__(self):
- ctxt = {'use_syslog': config('use-syslog')}
- return ctxt
-
-
-class BindHostContext(OSContextGenerator):
-
- def __call__(self):
- if config('prefer-ipv6'):
- return {'bind_host': '::'}
- else:
- return {'bind_host': '0.0.0.0'}
-
-
-class WorkerConfigContext(OSContextGenerator):
-
- @property
- def num_cpus(self):
- # NOTE: use cpu_count if present (16.04 support)
- if hasattr(psutil, 'cpu_count'):
- return psutil.cpu_count()
- else:
- return psutil.NUM_CPUS
-
- def __call__(self):
- multiplier = config('worker-multiplier') or 0
- ctxt = {"workers": self.num_cpus * multiplier}
- return ctxt
-
-
-class ZeroMQContext(OSContextGenerator):
- interfaces = ['zeromq-configuration']
-
- def __call__(self):
- ctxt = {}
- if is_relation_made('zeromq-configuration', 'host'):
- for rid in relation_ids('zeromq-configuration'):
- for unit in related_units(rid):
- ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
- ctxt['zmq_host'] = relation_get('host', unit, rid)
- ctxt['zmq_redis_address'] = relation_get(
- 'zmq_redis_address', unit, rid)
-
- return ctxt
-
-
-class NotificationDriverContext(OSContextGenerator):
-
- def __init__(self, zmq_relation='zeromq-configuration',
- amqp_relation='amqp'):
- """
- :param zmq_relation: Name of Zeromq relation to check
- """
- self.zmq_relation = zmq_relation
- self.amqp_relation = amqp_relation
-
- def __call__(self):
- ctxt = {'notifications': 'False'}
- if is_relation_made(self.amqp_relation):
- ctxt['notifications'] = "True"
-
- return ctxt
-
-
-class SysctlContext(OSContextGenerator):
- """This context check if the 'sysctl' option exists on configuration
- then creates a file with the loaded contents"""
- def __call__(self):
- sysctl_dict = config('sysctl')
- if sysctl_dict:
- sysctl_create(sysctl_dict,
- '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
- return {'sysctl': sysctl_dict}
-
-
-class NeutronAPIContext(OSContextGenerator):
- '''
- Inspects current neutron-plugin-api relation for neutron settings. Return
- defaults if it is not present.
- '''
- interfaces = ['neutron-plugin-api']
-
- def __call__(self):
- self.neutron_defaults = {
- 'l2_population': {
- 'rel_key': 'l2-population',
- 'default': False,
- },
- 'overlay_network_type': {
- 'rel_key': 'overlay-network-type',
- 'default': 'gre',
- },
- 'neutron_security_groups': {
- 'rel_key': 'neutron-security-groups',
- 'default': False,
- },
- 'network_device_mtu': {
- 'rel_key': 'network-device-mtu',
- 'default': None,
- },
- 'enable_dvr': {
- 'rel_key': 'enable-dvr',
- 'default': False,
- },
- 'enable_l3ha': {
- 'rel_key': 'enable-l3ha',
- 'default': False,
- },
- }
- ctxt = self.get_neutron_options({})
- for rid in relation_ids('neutron-plugin-api'):
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- if 'l2-population' in rdata:
- ctxt.update(self.get_neutron_options(rdata))
-
- return ctxt
-
- def get_neutron_options(self, rdata):
- settings = {}
- for nkey in self.neutron_defaults.keys():
- defv = self.neutron_defaults[nkey]['default']
- rkey = self.neutron_defaults[nkey]['rel_key']
- if rkey in rdata.keys():
- if type(defv) is bool:
- settings[nkey] = bool_from_string(rdata[rkey])
- else:
- settings[nkey] = rdata[rkey]
- else:
- settings[nkey] = defv
- return settings
-
-
-class ExternalPortContext(NeutronPortContext):
-
- def __call__(self):
- ctxt = {}
- ports = config('ext-port')
- if ports:
- ports = [p.strip() for p in ports.split()]
- ports = self.resolve_ports(ports)
- if ports:
- ctxt = {"ext_port": ports[0]}
- napi_settings = NeutronAPIContext()()
- mtu = napi_settings.get('network_device_mtu')
- if mtu:
- ctxt['ext_port_mtu'] = mtu
-
- return ctxt
-
-
-class DataPortContext(NeutronPortContext):
-
- def __call__(self):
- ports = config('data-port')
- if ports:
- # Map of {port/mac:bridge}
- portmap = parse_data_port_mappings(ports)
- ports = portmap.keys()
- # Resolve provided ports or mac addresses and filter out those
- # already attached to a bridge.
- resolved = self.resolve_ports(ports)
- # FIXME: is this necessary?
- normalized = {get_nic_hwaddr(port): port for port in resolved
- if port not in ports}
- normalized.update({port: port for port in resolved
- if port in ports})
- if resolved:
- return {normalized[port]: bridge for port, bridge in
- six.iteritems(portmap) if port in normalized.keys()}
-
- return None
-
-
-class PhyNICMTUContext(DataPortContext):
-
- def __call__(self):
- ctxt = {}
- mappings = super(PhyNICMTUContext, self).__call__()
- if mappings and mappings.keys():
- ports = sorted(mappings.keys())
- napi_settings = NeutronAPIContext()()
- mtu = napi_settings.get('network_device_mtu')
- all_ports = set()
- # If any of ports is a vlan device, its underlying device must have
- # mtu applied first.
- for port in ports:
- for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
- lport = os.path.basename(lport)
- all_ports.add(lport.split('_')[1])
-
- all_ports = list(all_ports)
- all_ports.extend(ports)
- if mtu:
- ctxt["devs"] = '\\n'.join(all_ports)
- ctxt['mtu'] = mtu
-
- return ctxt
-
-
-class NetworkServiceContext(OSContextGenerator):
-
- def __init__(self, rel_name='quantum-network-service'):
- self.rel_name = rel_name
- self.interfaces = [rel_name]
-
- def __call__(self):
- for rid in relation_ids(self.rel_name):
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- ctxt = {
- 'keystone_host': rdata.get('keystone_host'),
- 'service_port': rdata.get('service_port'),
- 'auth_port': rdata.get('auth_port'),
- 'service_tenant': rdata.get('service_tenant'),
- 'service_username': rdata.get('service_username'),
- 'service_password': rdata.get('service_password'),
- 'quantum_host': rdata.get('quantum_host'),
- 'quantum_port': rdata.get('quantum_port'),
- 'quantum_url': rdata.get('quantum_url'),
- 'region': rdata.get('region'),
- 'service_protocol':
- rdata.get('service_protocol') or 'http',
- 'auth_protocol':
- rdata.get('auth_protocol') or 'http',
- 'api_version':
- rdata.get('api_version') or '2.0',
- }
- if self.context_complete(ctxt):
- return ctxt
- return {}
-
-
-class InternalEndpointContext(OSContextGenerator):
- """Internal endpoint context.
-
- This context provides the endpoint type used for communication between
- services e.g. between Nova and Cinder internally. Openstack uses Public
- endpoints by default so this allows admins to optionally use internal
- endpoints.
- """
- def __call__(self):
- return {'use_internal_endpoints': config('use-internal-endpoints')}
-
-
-class AppArmorContext(OSContextGenerator):
- """Base class for apparmor contexts."""
-
- def __init__(self):
- self._ctxt = None
- self.aa_profile = None
- self.aa_utils_packages = ['apparmor-utils']
-
- @property
- def ctxt(self):
- if self._ctxt is not None:
- return self._ctxt
- self._ctxt = self._determine_ctxt()
- return self._ctxt
-
- def _determine_ctxt(self):
- """
- Validate aa-profile-mode settings is disable, enforce, or complain.
-
- :return ctxt: Dictionary of the apparmor profile or None
- """
- if config('aa-profile-mode') in ['disable', 'enforce', 'complain']:
- ctxt = {'aa-profile-mode': config('aa-profile-mode')}
- else:
- ctxt = None
- return ctxt
-
- def __call__(self):
- return self.ctxt
-
- def install_aa_utils(self):
- """
- Install packages required for apparmor configuration.
- """
- log("Installing apparmor utils.")
- ensure_packages(self.aa_utils_packages)
-
- def manually_disable_aa_profile(self):
- """
- Manually disable an apparmor profile.
-
- If aa-profile-mode is set to disabled (default) this is required as the
- template has been written but apparmor is yet unaware of the profile
- and aa-disable aa-profile fails. Without this the profile would kick
- into enforce mode on the next service restart.
-
- """
- profile_path = '/etc/apparmor.d'
- disable_path = '/etc/apparmor.d/disable'
- if not os.path.lexists(os.path.join(disable_path, self.aa_profile)):
- os.symlink(os.path.join(profile_path, self.aa_profile),
- os.path.join(disable_path, self.aa_profile))
-
- def setup_aa_profile(self):
- """
- Setup an apparmor profile.
- The ctxt dictionary will contain the apparmor profile mode and
- the apparmor profile name.
- Makes calls out to aa-disable, aa-complain, or aa-enforce to setup
- the apparmor profile.
- """
- self()
- if not self.ctxt:
- log("Not enabling apparmor Profile")
- return
- self.install_aa_utils()
- cmd = ['aa-{}'.format(self.ctxt['aa-profile-mode'])]
- cmd.append(self.ctxt['aa-profile'])
- log("Setting up the apparmor profile for {} in {} mode."
- "".format(self.ctxt['aa-profile'], self.ctxt['aa-profile-mode']))
- try:
- check_call(cmd)
- except CalledProcessError as e:
- # If aa-profile-mode is set to disabled (default) manual
- # disabling is required as the template has been written but
- # apparmor is yet unaware of the profile and aa-disable aa-profile
- # fails. If aa-disable learns to read profile files first this can
- # be removed.
- if self.ctxt['aa-profile-mode'] == 'disable':
- log("Manually disabling the apparmor profile for {}."
- "".format(self.ctxt['aa-profile']))
- self.manually_disable_aa_profile()
- return
- status_set('blocked', "Apparmor profile {} failed to be set to {}."
- "".format(self.ctxt['aa-profile'],
- self.ctxt['aa-profile-mode']))
- raise e
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/__init__.py
deleted file mode 100644
index 7587679..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# dummy __init__.py to fool syncer into thinking this is a syncable python
-# module
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy.sh b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy.sh
deleted file mode 100755
index 0df0717..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-#--------------------------------------------
-# This file is managed by Juju
-#--------------------------------------------
-#
-# Copyright 2009,2012 Canonical Ltd.
-# Author: Tom Haddon
-
-CRITICAL=0
-NOTACTIVE=''
-LOGFILE=/var/log/nagios/check_haproxy.log
-AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}')
-
-typeset -i N_INSTANCES=0
-for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg)
-do
- N_INSTANCES=N_INSTANCES+1
- output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' --regex=",${appserver},.*,UP.*" -e ' 200 OK')
- if [ $? != 0 ]; then
- date >> $LOGFILE
- echo $output >> $LOGFILE
- /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v | grep ",${appserver}," >> $LOGFILE 2>&1
- CRITICAL=1
- NOTACTIVE="${NOTACTIVE} $appserver"
- fi
-done
-
-if [ $CRITICAL = 1 ]; then
- echo "CRITICAL:${NOTACTIVE}"
- exit 2
-fi
-
-echo "OK: All haproxy instances ($N_INSTANCES) looking good"
-exit 0
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
deleted file mode 100755
index 3ebb532..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-#--------------------------------------------
-# This file is managed by Juju
-#--------------------------------------------
-#
-# Copyright 2009,2012 Canonical Ltd.
-# Author: Tom Haddon
-
-# These should be config options at some stage
-CURRQthrsh=0
-MAXQthrsh=100
-
-AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
-
-HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
-
-for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}')
-do
- CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3)
- MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4)
-
- if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then
- echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ"
- exit 2
- fi
-done
-
-echo "OK: All haproxy queue depths looking good"
-exit 0
-
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/ip.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/ip.py
deleted file mode 100644
index 532a1dc..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/ip.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-
-from charmhelpers.core.hookenv import (
- config,
- unit_get,
- service_name,
- network_get_primary_address,
-)
-from charmhelpers.contrib.network.ip import (
- get_address_in_network,
- is_address_in_network,
- is_ipv6,
- get_ipv6_addr,
- resolve_network_cidr,
-)
-from charmhelpers.contrib.hahelpers.cluster import is_clustered
-
-PUBLIC = 'public'
-INTERNAL = 'int'
-ADMIN = 'admin'
-
-ADDRESS_MAP = {
- PUBLIC: {
- 'binding': 'public',
- 'config': 'os-public-network',
- 'fallback': 'public-address',
- 'override': 'os-public-hostname',
- },
- INTERNAL: {
- 'binding': 'internal',
- 'config': 'os-internal-network',
- 'fallback': 'private-address',
- 'override': 'os-internal-hostname',
- },
- ADMIN: {
- 'binding': 'admin',
- 'config': 'os-admin-network',
- 'fallback': 'private-address',
- 'override': 'os-admin-hostname',
- }
-}
-
-
-def canonical_url(configs, endpoint_type=PUBLIC):
- """Returns the correct HTTP URL to this host given the state of HTTPS
- configuration, hacluster and charm configuration.
-
- :param configs: OSTemplateRenderer config templating object to inspect
- for a complete https context.
- :param endpoint_type: str endpoint type to resolve.
- :param returns: str base URL for services on the current service unit.
- """
- scheme = _get_scheme(configs)
-
- address = resolve_address(endpoint_type)
- if is_ipv6(address):
- address = "[{}]".format(address)
-
- return '%s://%s' % (scheme, address)
-
-
-def _get_scheme(configs):
- """Returns the scheme to use for the url (either http or https)
- depending upon whether https is in the configs value.
-
- :param configs: OSTemplateRenderer config templating object to inspect
- for a complete https context.
- :returns: either 'http' or 'https' depending on whether https is
- configured within the configs context.
- """
- scheme = 'http'
- if configs and 'https' in configs.complete_contexts():
- scheme = 'https'
- return scheme
-
-
-def _get_address_override(endpoint_type=PUBLIC):
- """Returns any address overrides that the user has defined based on the
- endpoint type.
-
- Note: this function allows for the service name to be inserted into the
- address if the user specifies {service_name}.somehost.org.
-
- :param endpoint_type: the type of endpoint to retrieve the override
- value for.
- :returns: any endpoint address or hostname that the user has overridden
- or None if an override is not present.
- """
- override_key = ADDRESS_MAP[endpoint_type]['override']
- addr_override = config(override_key)
- if not addr_override:
- return None
- else:
- return addr_override.format(service_name=service_name())
-
-
-def resolve_address(endpoint_type=PUBLIC):
- """Return unit address depending on net config.
-
- If unit is clustered with vip(s) and has net splits defined, return vip on
- correct network. If clustered with no nets defined, return primary vip.
-
- If not clustered, return unit address ensuring address is on configured net
- split if one is configured, or a Juju 2.0 extra-binding has been used.
-
- :param endpoint_type: Network endpoing type
- """
- resolved_address = _get_address_override(endpoint_type)
- if resolved_address:
- return resolved_address
-
- vips = config('vip')
- if vips:
- vips = vips.split()
-
- net_type = ADDRESS_MAP[endpoint_type]['config']
- net_addr = config(net_type)
- net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
- binding = ADDRESS_MAP[endpoint_type]['binding']
- clustered = is_clustered()
-
- if clustered and vips:
- if net_addr:
- for vip in vips:
- if is_address_in_network(net_addr, vip):
- resolved_address = vip
- break
- else:
- # NOTE: endeavour to check vips against network space
- # bindings
- try:
- bound_cidr = resolve_network_cidr(
- network_get_primary_address(binding)
- )
- for vip in vips:
- if is_address_in_network(bound_cidr, vip):
- resolved_address = vip
- break
- except NotImplementedError:
- # If no net-splits configured and no support for extra
- # bindings/network spaces so we expect a single vip
- resolved_address = vips[0]
- else:
- if config('prefer-ipv6'):
- fallback_addr = get_ipv6_addr(exc_list=vips)[0]
- else:
- fallback_addr = unit_get(net_fallback)
-
- if net_addr:
- resolved_address = get_address_in_network(net_addr, fallback_addr)
- else:
- # NOTE: only try to use extra bindings if legacy network
- # configuration is not in use
- try:
- resolved_address = network_get_primary_address(binding)
- except NotImplementedError:
- resolved_address = fallback_addr
-
- if resolved_address is None:
- raise ValueError("Unable to resolve a suitable IP address based on "
- "charm state and configuration. (net_type=%s, "
- "clustered=%s)" % (net_type, clustered))
-
- return resolved_address
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/neutron.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/neutron.py
deleted file mode 100644
index d057ea6..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/neutron.py
+++ /dev/null
@@ -1,384 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Various utilies for dealing with Neutron and the renaming from Quantum.
-
-import six
-from subprocess import check_output
-
-from charmhelpers.core.hookenv import (
- config,
- log,
- ERROR,
-)
-
-from charmhelpers.contrib.openstack.utils import os_release
-
-
-def headers_package():
- """Ensures correct linux-headers for running kernel are installed,
- for building DKMS package"""
- kver = check_output(['uname', '-r']).decode('UTF-8').strip()
- return 'linux-headers-%s' % kver
-
-QUANTUM_CONF_DIR = '/etc/quantum'
-
-
-def kernel_version():
- """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
- kver = check_output(['uname', '-r']).decode('UTF-8').strip()
- kver = kver.split('.')
- return (int(kver[0]), int(kver[1]))
-
-
-def determine_dkms_package():
- """ Determine which DKMS package should be used based on kernel version """
- # NOTE: 3.13 kernels have support for GRE and VXLAN native
- if kernel_version() >= (3, 13):
- return []
- else:
- return [headers_package(), 'openvswitch-datapath-dkms']
-
-
-# legacy
-
-
-def quantum_plugins():
- from charmhelpers.contrib.openstack import context
- return {
- 'ovs': {
- 'config': '/etc/quantum/plugins/openvswitch/'
- 'ovs_quantum_plugin.ini',
- 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
- 'OVSQuantumPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=QUANTUM_CONF_DIR)],
- 'services': ['quantum-plugin-openvswitch-agent'],
- 'packages': [determine_dkms_package(),
- ['quantum-plugin-openvswitch-agent']],
- 'server_packages': ['quantum-server',
- 'quantum-plugin-openvswitch'],
- 'server_services': ['quantum-server']
- },
- 'nvp': {
- 'config': '/etc/quantum/plugins/nicira/nvp.ini',
- 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
- 'QuantumPlugin.NvpPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=QUANTUM_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['quantum-server',
- 'quantum-plugin-nicira'],
- 'server_services': ['quantum-server']
- }
- }
-
-NEUTRON_CONF_DIR = '/etc/neutron'
-
-
-def neutron_plugins():
- from charmhelpers.contrib.openstack import context
- release = os_release('nova-common')
- plugins = {
- 'ovs': {
- 'config': '/etc/neutron/plugins/openvswitch/'
- 'ovs_neutron_plugin.ini',
- 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
- 'OVSNeutronPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': ['neutron-plugin-openvswitch-agent'],
- 'packages': [determine_dkms_package(),
- ['neutron-plugin-openvswitch-agent']],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-openvswitch'],
- 'server_services': ['neutron-server']
- },
- 'nvp': {
- 'config': '/etc/neutron/plugins/nicira/nvp.ini',
- 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
- 'NeutronPlugin.NvpPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-nicira'],
- 'server_services': ['neutron-server']
- },
- 'nsx': {
- 'config': '/etc/neutron/plugins/vmware/nsx.ini',
- 'driver': 'vmware',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-vmware'],
- 'server_services': ['neutron-server']
- },
- 'n1kv': {
- 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
- 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [determine_dkms_package(),
- ['neutron-plugin-cisco']],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-cisco'],
- 'server_services': ['neutron-server']
- },
- 'Calico': {
- 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
- 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': ['calico-felix',
- 'bird',
- 'neutron-dhcp-agent',
- 'nova-api-metadata',
- 'etcd'],
- 'packages': [determine_dkms_package(),
- ['calico-compute',
- 'bird',
- 'neutron-dhcp-agent',
- 'nova-api-metadata',
- 'etcd']],
- 'server_packages': ['neutron-server', 'calico-control', 'etcd'],
- 'server_services': ['neutron-server', 'etcd']
- },
- 'vsp': {
- 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
- 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
- 'server_services': ['neutron-server']
- },
- 'plumgrid': {
- 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
- 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
- 'contexts': [
- context.SharedDBContext(user=config('database-user'),
- database=config('database'),
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': ['plumgrid-lxc',
- 'iovisor-dkms'],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-plumgrid'],
- 'server_services': ['neutron-server']
- },
- 'midonet': {
- 'config': '/etc/neutron/plugins/midonet/midonet.ini',
- 'driver': 'midonet.neutron.plugin.MidonetPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [determine_dkms_package()],
- 'server_packages': ['neutron-server',
- 'python-neutron-plugin-midonet'],
- 'server_services': ['neutron-server']
- }
- }
- if release >= 'icehouse':
- # NOTE: patch in ml2 plugin for icehouse onwards
- plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
- plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
- plugins['ovs']['server_packages'] = ['neutron-server',
- 'neutron-plugin-ml2']
- # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
- plugins['nvp'] = plugins['nsx']
- if release >= 'kilo':
- plugins['midonet']['driver'] = (
- 'neutron.plugins.midonet.plugin.MidonetPluginV2')
- if release >= 'liberty':
- plugins['midonet']['driver'] = (
- 'midonet.neutron.plugin_v1.MidonetPluginV2')
- plugins['midonet']['server_packages'].remove(
- 'python-neutron-plugin-midonet')
- plugins['midonet']['server_packages'].append(
- 'python-networking-midonet')
- plugins['plumgrid']['driver'] = (
- 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2')
- plugins['plumgrid']['server_packages'].remove(
- 'neutron-plugin-plumgrid')
- return plugins
-
-
-def neutron_plugin_attribute(plugin, attr, net_manager=None):
- manager = net_manager or network_manager()
- if manager == 'quantum':
- plugins = quantum_plugins()
- elif manager == 'neutron':
- plugins = neutron_plugins()
- else:
- log("Network manager '%s' does not support plugins." % (manager),
- level=ERROR)
- raise Exception
-
- try:
- _plugin = plugins[plugin]
- except KeyError:
- log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
- raise Exception
-
- try:
- return _plugin[attr]
- except KeyError:
- return None
-
-
-def network_manager():
- '''
- Deals with the renaming of Quantum to Neutron in H and any situations
- that require compatability (eg, deploying H with network-manager=quantum,
- upgrading from G).
- '''
- release = os_release('nova-common')
- manager = config('network-manager').lower()
-
- if manager not in ['quantum', 'neutron']:
- return manager
-
- if release in ['essex']:
- # E does not support neutron
- log('Neutron networking not supported in Essex.', level=ERROR)
- raise Exception
- elif release in ['folsom', 'grizzly']:
- # neutron is named quantum in F and G
- return 'quantum'
- else:
- # ensure accurate naming for all releases post-H
- return 'neutron'
-
-
-def parse_mappings(mappings, key_rvalue=False):
- """By default mappings are lvalue keyed.
-
- If key_rvalue is True, the mapping will be reversed to allow multiple
- configs for the same lvalue.
- """
- parsed = {}
- if mappings:
- mappings = mappings.split()
- for m in mappings:
- p = m.partition(':')
-
- if key_rvalue:
- key_index = 2
- val_index = 0
- # if there is no rvalue skip to next
- if not p[1]:
- continue
- else:
- key_index = 0
- val_index = 2
-
- key = p[key_index].strip()
- parsed[key] = p[val_index].strip()
-
- return parsed
-
-
-def parse_bridge_mappings(mappings):
- """Parse bridge mappings.
-
- Mappings must be a space-delimited list of provider:bridge mappings.
-
- Returns dict of the form {provider:bridge}.
- """
- return parse_mappings(mappings)
-
-
-def parse_data_port_mappings(mappings, default_bridge='br-data'):
- """Parse data port mappings.
-
- Mappings must be a space-delimited list of bridge:port.
-
- Returns dict of the form {port:bridge} where ports may be mac addresses or
- interface names.
- """
-
- # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
- # proposed for <port> since it may be a mac address which will differ
- # across units this allowing first-known-good to be chosen.
- _mappings = parse_mappings(mappings, key_rvalue=True)
- if not _mappings or list(_mappings.values()) == ['']:
- if not mappings:
- return {}
-
- # For backwards-compatibility we need to support port-only provided in
- # config.
- _mappings = {mappings.split()[0]: default_bridge}
-
- ports = _mappings.keys()
- if len(set(ports)) != len(ports):
- raise Exception("It is not allowed to have the same port configured "
- "on more than one bridge")
-
- return _mappings
-
-
-def parse_vlan_range_mappings(mappings):
- """Parse vlan range mappings.
-
- Mappings must be a space-delimited list of provider:start:end mappings.
-
- The start:end range is optional and may be omitted.
-
- Returns dict of the form {provider: (start, end)}.
- """
- _mappings = parse_mappings(mappings)
- if not _mappings:
- return {}
-
- mappings = {}
- for p, r in six.iteritems(_mappings):
- mappings[p] = tuple(r.split(':'))
-
- return mappings
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/__init__.py
deleted file mode 100644
index 7587679..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# dummy __init__.py to fool syncer into thinking this is a syncable python
-# module
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/ceph.conf b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/ceph.conf
deleted file mode 100644
index 33ceee2..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/ceph.conf
+++ /dev/null
@@ -1,21 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# cinder configuration file maintained by Juju
-# local changes may be overwritten.
-###############################################################################
-[global]
-{% if auth -%}
-auth_supported = {{ auth }}
-keyring = /etc/ceph/$cluster.$name.keyring
-mon host = {{ mon_hosts }}
-{% endif -%}
-log to syslog = {{ use_syslog }}
-err to syslog = {{ use_syslog }}
-clog to syslog = {{ use_syslog }}
-
-[client]
-{% if rbd_client_cache_settings -%}
-{% for key, value in rbd_client_cache_settings.iteritems() -%}
-{{ key }} = {{ value }}
-{% endfor -%}
-{%- endif %} \ No newline at end of file
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/git.upstart b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/git.upstart
deleted file mode 100644
index 4bed404..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/git.upstart
+++ /dev/null
@@ -1,17 +0,0 @@
-description "{{ service_description }}"
-author "Juju {{ service_name }} Charm <juju@localhost>"
-
-start on runlevel [2345]
-stop on runlevel [!2345]
-
-respawn
-
-exec start-stop-daemon --start --chuid {{ user_name }} \
- --chdir {{ start_dir }} --name {{ process_name }} \
- --exec {{ executable_name }} -- \
- {% for config_file in config_files -%}
- --config-file={{ config_file }} \
- {% endfor -%}
- {% if log_file -%}
- --log-file={{ log_file }}
- {% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/haproxy.cfg b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/haproxy.cfg
deleted file mode 100644
index 32b6276..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/haproxy.cfg
+++ /dev/null
@@ -1,66 +0,0 @@
-global
- log {{ local_host }} local0
- log {{ local_host }} local1 notice
- maxconn 20000
- user haproxy
- group haproxy
- spread-checks 0
-
-defaults
- log global
- mode tcp
- option tcplog
- option dontlognull
- retries 3
-{%- if haproxy_queue_timeout %}
- timeout queue {{ haproxy_queue_timeout }}
-{%- else %}
- timeout queue 5000
-{%- endif %}
-{%- if haproxy_connect_timeout %}
- timeout connect {{ haproxy_connect_timeout }}
-{%- else %}
- timeout connect 5000
-{%- endif %}
-{%- if haproxy_client_timeout %}
- timeout client {{ haproxy_client_timeout }}
-{%- else %}
- timeout client 30000
-{%- endif %}
-{%- if haproxy_server_timeout %}
- timeout server {{ haproxy_server_timeout }}
-{%- else %}
- timeout server 30000
-{%- endif %}
-
-listen stats
- bind {{ local_host }}:{{ stat_port }}
- mode http
- stats enable
- stats hide-version
- stats realm Haproxy\ Statistics
- stats uri /
- stats auth admin:{{ stat_password }}
-
-{% if frontends -%}
-{% for service, ports in service_ports.items() -%}
-frontend tcp-in_{{ service }}
- bind *:{{ ports[0] }}
- {% if ipv6 -%}
- bind :::{{ ports[0] }}
- {% endif -%}
- {% for frontend in frontends -%}
- acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
- use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
- {% endfor -%}
- default_backend {{ service }}_{{ default_backend }}
-
-{% for frontend in frontends -%}
-backend {{ service }}_{{ frontend }}
- balance leastconn
- {% for unit, address in frontends[frontend]['backends'].items() -%}
- server {{ unit }} {{ address }}:{{ ports[1] }} check
- {% endfor %}
-{% endfor -%}
-{% endfor -%}
-{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend
deleted file mode 100644
index 6a92380..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend
+++ /dev/null
@@ -1,26 +0,0 @@
-{% if endpoints -%}
-{% for ext_port in ext_ports -%}
-Listen {{ ext_port }}
-{% endfor -%}
-{% for address, endpoint, ext, int in endpoints -%}
-<VirtualHost {{ address }}:{{ ext }}>
- ServerName {{ endpoint }}
- SSLEngine on
- SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2
- SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM
- SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
- SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
- ProxyPass / http://localhost:{{ int }}/
- ProxyPassReverse / http://localhost:{{ int }}/
- ProxyPreserveHost on
-</VirtualHost>
-{% endfor -%}
-<Proxy *>
- Order deny,allow
- Allow from all
-</Proxy>
-<Location />
- Order allow,deny
- Allow from all
-</Location>
-{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf
deleted file mode 100644
index 6a92380..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf
+++ /dev/null
@@ -1,26 +0,0 @@
-{% if endpoints -%}
-{% for ext_port in ext_ports -%}
-Listen {{ ext_port }}
-{% endfor -%}
-{% for address, endpoint, ext, int in endpoints -%}
-<VirtualHost {{ address }}:{{ ext }}>
- ServerName {{ endpoint }}
- SSLEngine on
- SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2
- SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM
- SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
- SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
- ProxyPass / http://localhost:{{ int }}/
- ProxyPassReverse / http://localhost:{{ int }}/
- ProxyPreserveHost on
-</VirtualHost>
-{% endfor -%}
-<Proxy *>
- Order deny,allow
- Allow from all
-</Proxy>
-<Location />
- Order allow,deny
- Allow from all
-</Location>
-{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
deleted file mode 100644
index 5dcebe7..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
+++ /dev/null
@@ -1,12 +0,0 @@
-{% if auth_host -%}
-[keystone_authtoken]
-auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}
-auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
-auth_plugin = password
-project_domain_id = default
-user_domain_id = default
-project_name = {{ admin_tenant_name }}
-username = {{ admin_user }}
-password = {{ admin_password }}
-signing_dir = {{ signing_dir }}
-{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy
deleted file mode 100644
index 9356b2b..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy
+++ /dev/null
@@ -1,10 +0,0 @@
-{% if auth_host -%}
-[keystone_authtoken]
-# Juno specific config (Bug #1557223)
-auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }}
-identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
-admin_tenant_name = {{ admin_tenant_name }}
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
-signing_dir = {{ signing_dir }}
-{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka
deleted file mode 100644
index dd6f364..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka
+++ /dev/null
@@ -1,12 +0,0 @@
-{% if auth_host -%}
-[keystone_authtoken]
-auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}
-auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
-auth_type = password
-project_domain_name = default
-user_domain_name = default
-project_name = {{ admin_tenant_name }}
-username = {{ admin_user }}
-password = {{ admin_password }}
-signing_dir = {{ signing_dir }}
-{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo
deleted file mode 100644
index b444c9c..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo
+++ /dev/null
@@ -1,22 +0,0 @@
-{% if rabbitmq_host or rabbitmq_hosts -%}
-[oslo_messaging_rabbit]
-rabbit_userid = {{ rabbitmq_user }}
-rabbit_virtual_host = {{ rabbitmq_virtual_host }}
-rabbit_password = {{ rabbitmq_password }}
-{% if rabbitmq_hosts -%}
-rabbit_hosts = {{ rabbitmq_hosts }}
-{% if rabbitmq_ha_queues -%}
-rabbit_ha_queues = True
-rabbit_durable_queues = False
-{% endif -%}
-{% else -%}
-rabbit_host = {{ rabbitmq_host }}
-{% endif -%}
-{% if rabbit_ssl_port -%}
-rabbit_use_ssl = True
-rabbit_port = {{ rabbit_ssl_port }}
-{% if rabbit_ssl_ca -%}
-kombu_ssl_ca_certs = {{ rabbit_ssl_ca }}
-{% endif -%}
-{% endif -%}
-{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-zeromq b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-zeromq
deleted file mode 100644
index 95f1a76..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-zeromq
+++ /dev/null
@@ -1,14 +0,0 @@
-{% if zmq_host -%}
-# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }})
-rpc_backend = zmq
-rpc_zmq_host = {{ zmq_host }}
-{% if zmq_redis_address -%}
-rpc_zmq_matchmaker = redis
-matchmaker_heartbeat_freq = 15
-matchmaker_heartbeat_ttl = 30
-[matchmaker_redis]
-host = {{ zmq_redis_address }}
-{% else -%}
-rpc_zmq_matchmaker = ring
-{% endif -%}
-{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templating.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templating.py
deleted file mode 100644
index e5e3cb1..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templating.py
+++ /dev/null
@@ -1,323 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-import six
-
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import (
- log,
- ERROR,
- INFO
-)
-from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
-
-try:
- from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
-except ImportError:
- apt_update(fatal=True)
- apt_install('python-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
-
-
-class OSConfigException(Exception):
- pass
-
-
-def get_loader(templates_dir, os_release):
- """
- Create a jinja2.ChoiceLoader containing template dirs up to
- and including os_release. If directory template directory
- is missing at templates_dir, it will be omitted from the loader.
- templates_dir is added to the bottom of the search list as a base
- loading dir.
-
- A charm may also ship a templates dir with this module
- and it will be appended to the bottom of the search list, eg::
-
- hooks/charmhelpers/contrib/openstack/templates
-
- :param templates_dir (str): Base template directory containing release
- sub-directories.
- :param os_release (str): OpenStack release codename to construct template
- loader.
- :returns: jinja2.ChoiceLoader constructed with a list of
- jinja2.FilesystemLoaders, ordered in descending
- order by OpenStack release.
- """
- tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
- for rel in six.itervalues(OPENSTACK_CODENAMES)]
-
- if not os.path.isdir(templates_dir):
- log('Templates directory not found @ %s.' % templates_dir,
- level=ERROR)
- raise OSConfigException
-
- # the bottom contains tempaltes_dir and possibly a common templates dir
- # shipped with the helper.
- loaders = [FileSystemLoader(templates_dir)]
- helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
- if os.path.isdir(helper_templates):
- loaders.append(FileSystemLoader(helper_templates))
-
- for rel, tmpl_dir in tmpl_dirs:
- if os.path.isdir(tmpl_dir):
- loaders.insert(0, FileSystemLoader(tmpl_dir))
- if rel == os_release:
- break
- log('Creating choice loader with dirs: %s' %
- [l.searchpath for l in loaders], level=INFO)
- return ChoiceLoader(loaders)
-
-
-class OSConfigTemplate(object):
- """
- Associates a config file template with a list of context generators.
- Responsible for constructing a template context based on those generators.
- """
- def __init__(self, config_file, contexts):
- self.config_file = config_file
-
- if hasattr(contexts, '__call__'):
- self.contexts = [contexts]
- else:
- self.contexts = contexts
-
- self._complete_contexts = []
-
- def context(self):
- ctxt = {}
- for context in self.contexts:
- _ctxt = context()
- if _ctxt:
- ctxt.update(_ctxt)
- # track interfaces for every complete context.
- [self._complete_contexts.append(interface)
- for interface in context.interfaces
- if interface not in self._complete_contexts]
- return ctxt
-
- def complete_contexts(self):
- '''
- Return a list of interfaces that have satisfied contexts.
- '''
- if self._complete_contexts:
- return self._complete_contexts
- self.context()
- return self._complete_contexts
-
-
-class OSConfigRenderer(object):
- """
- This class provides a common templating system to be used by OpenStack
- charms. It is intended to help charms share common code and templates,
- and ease the burden of managing config templates across multiple OpenStack
- releases.
-
- Basic usage::
-
- # import some common context generates from charmhelpers
- from charmhelpers.contrib.openstack import context
-
- # Create a renderer object for a specific OS release.
- configs = OSConfigRenderer(templates_dir='/tmp/templates',
- openstack_release='folsom')
- # register some config files with context generators.
- configs.register(config_file='/etc/nova/nova.conf',
- contexts=[context.SharedDBContext(),
- context.AMQPContext()])
- configs.register(config_file='/etc/nova/api-paste.ini',
- contexts=[context.IdentityServiceContext()])
- configs.register(config_file='/etc/haproxy/haproxy.conf',
- contexts=[context.HAProxyContext()])
- # write out a single config
- configs.write('/etc/nova/nova.conf')
- # write out all registered configs
- configs.write_all()
-
- **OpenStack Releases and template loading**
-
- When the object is instantiated, it is associated with a specific OS
- release. This dictates how the template loader will be constructed.
-
- The constructed loader attempts to load the template from several places
- in the following order:
- - from the most recent OS release-specific template dir (if one exists)
- - the base templates_dir
- - a template directory shipped in the charm with this helper file.
-
- For the example above, '/tmp/templates' contains the following structure::
-
- /tmp/templates/nova.conf
- /tmp/templates/api-paste.ini
- /tmp/templates/grizzly/api-paste.ini
- /tmp/templates/havana/api-paste.ini
-
- Since it was registered with the grizzly release, it first seraches
- the grizzly directory for nova.conf, then the templates dir.
-
- When writing api-paste.ini, it will find the template in the grizzly
- directory.
-
- If the object were created with folsom, it would fall back to the
- base templates dir for its api-paste.ini template.
-
- This system should help manage changes in config files through
- openstack releases, allowing charms to fall back to the most recently
- updated config template for a given release
-
- The haproxy.conf, since it is not shipped in the templates dir, will
- be loaded from the module directory's template directory, eg
- $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
- us to ship common templates (haproxy, apache) with the helpers.
-
- **Context generators**
-
- Context generators are used to generate template contexts during hook
- execution. Doing so may require inspecting service relations, charm
- config, etc. When registered, a config file is associated with a list
- of generators. When a template is rendered and written, all context
- generates are called in a chain to generate the context dictionary
- passed to the jinja2 template. See context.py for more info.
- """
- def __init__(self, templates_dir, openstack_release):
- if not os.path.isdir(templates_dir):
- log('Could not locate templates dir %s' % templates_dir,
- level=ERROR)
- raise OSConfigException
-
- self.templates_dir = templates_dir
- self.openstack_release = openstack_release
- self.templates = {}
- self._tmpl_env = None
-
- if None in [Environment, ChoiceLoader, FileSystemLoader]:
- # if this code is running, the object is created pre-install hook.
- # jinja2 shouldn't get touched until the module is reloaded on next
- # hook execution, with proper jinja2 bits successfully imported.
- apt_install('python-jinja2')
-
- def register(self, config_file, contexts):
- """
- Register a config file with a list of context generators to be called
- during rendering.
- """
- self.templates[config_file] = OSConfigTemplate(config_file=config_file,
- contexts=contexts)
- log('Registered config file: %s' % config_file, level=INFO)
-
- def _get_tmpl_env(self):
- if not self._tmpl_env:
- loader = get_loader(self.templates_dir, self.openstack_release)
- self._tmpl_env = Environment(loader=loader)
-
- def _get_template(self, template):
- self._get_tmpl_env()
- template = self._tmpl_env.get_template(template)
- log('Loaded template from %s' % template.filename, level=INFO)
- return template
-
- def render(self, config_file):
- if config_file not in self.templates:
- log('Config not registered: %s' % config_file, level=ERROR)
- raise OSConfigException
- ctxt = self.templates[config_file].context()
-
- _tmpl = os.path.basename(config_file)
- try:
- template = self._get_template(_tmpl)
- except exceptions.TemplateNotFound:
- # if no template is found with basename, try looking for it
- # using a munged full path, eg:
- # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
- _tmpl = '_'.join(config_file.split('/')[1:])
- try:
- template = self._get_template(_tmpl)
- except exceptions.TemplateNotFound as e:
- log('Could not load template from %s by %s or %s.' %
- (self.templates_dir, os.path.basename(config_file), _tmpl),
- level=ERROR)
- raise e
-
- log('Rendering from template: %s' % _tmpl, level=INFO)
- return template.render(ctxt)
-
- def write(self, config_file):
- """
- Write a single config file, raises if config file is not registered.
- """
- if config_file not in self.templates:
- log('Config not registered: %s' % config_file, level=ERROR)
- raise OSConfigException
-
- _out = self.render(config_file)
-
- with open(config_file, 'wb') as out:
- out.write(_out)
-
- log('Wrote template %s.' % config_file, level=INFO)
-
- def write_all(self):
- """
- Write out all registered config files.
- """
- [self.write(k) for k in six.iterkeys(self.templates)]
-
- def set_release(self, openstack_release):
- """
- Resets the template environment and generates a new template loader
- based on a the new openstack release.
- """
- self._tmpl_env = None
- self.openstack_release = openstack_release
- self._get_tmpl_env()
-
- def complete_contexts(self):
- '''
- Returns a list of context interfaces that yield a complete context.
- '''
- interfaces = []
- [interfaces.extend(i.complete_contexts())
- for i in six.itervalues(self.templates)]
- return interfaces
-
- def get_incomplete_context_data(self, interfaces):
- '''
- Return dictionary of relation status of interfaces and any missing
- required context data. Example:
- {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
- 'zeromq-configuration': {'related': False}}
- '''
- incomplete_context_data = {}
-
- for i in six.itervalues(self.templates):
- for context in i.contexts:
- for interface in interfaces:
- related = False
- if interface in context.interfaces:
- related = context.get_related()
- missing_data = context.missing_data
- if missing_data:
- incomplete_context_data[interface] = {'missing_data': missing_data}
- if related:
- if incomplete_context_data.get(interface):
- incomplete_context_data[interface].update({'related': True})
- else:
- incomplete_context_data[interface] = {'related': True}
- else:
- incomplete_context_data[interface] = {'related': False}
- return incomplete_context_data
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/utils.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/utils.py
deleted file mode 100644
index 115cc4b..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/utils.py
+++ /dev/null
@@ -1,1576 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Common python helper functions used for OpenStack charms.
-from collections import OrderedDict
-from functools import wraps
-
-import subprocess
-import json
-import os
-import sys
-import re
-import itertools
-import functools
-
-import six
-import tempfile
-import traceback
-import uuid
-import yaml
-
-from charmhelpers.contrib.network import ip
-
-from charmhelpers.core import (
- unitdata,
-)
-
-from charmhelpers.core.hookenv import (
- action_fail,
- action_set,
- config,
- log as juju_log,
- charm_dir,
- DEBUG,
- INFO,
- related_units,
- relation_ids,
- relation_set,
- status_set,
- hook_name
-)
-
-from charmhelpers.contrib.storage.linux.lvm import (
- deactivate_lvm_volume_group,
- is_lvm_physical_volume,
- remove_lvm_physical_volume,
-)
-
-from charmhelpers.contrib.network.ip import (
- get_ipv6_addr,
- is_ipv6,
- port_has_listener,
-)
-
-from charmhelpers.contrib.python.packages import (
- pip_create_virtualenv,
- pip_install,
-)
-
-from charmhelpers.core.host import (
- lsb_release,
- mounts,
- umount,
- service_running,
- service_pause,
- service_resume,
- restart_on_change_helper,
-)
-from charmhelpers.fetch import apt_install, apt_cache, install_remote
-from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
-from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
-
-CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
-CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
-
-DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
- 'restricted main multiverse universe')
-
-UBUNTU_OPENSTACK_RELEASE = OrderedDict([
- ('oneiric', 'diablo'),
- ('precise', 'essex'),
- ('quantal', 'folsom'),
- ('raring', 'grizzly'),
- ('saucy', 'havana'),
- ('trusty', 'icehouse'),
- ('utopic', 'juno'),
- ('vivid', 'kilo'),
- ('wily', 'liberty'),
- ('xenial', 'mitaka'),
-])
-
-
-OPENSTACK_CODENAMES = OrderedDict([
- ('2011.2', 'diablo'),
- ('2012.1', 'essex'),
- ('2012.2', 'folsom'),
- ('2013.1', 'grizzly'),
- ('2013.2', 'havana'),
- ('2014.1', 'icehouse'),
- ('2014.2', 'juno'),
- ('2015.1', 'kilo'),
- ('2015.2', 'liberty'),
- ('2016.1', 'mitaka'),
-])
-
-# The ugly duckling - must list releases oldest to newest
-SWIFT_CODENAMES = OrderedDict([
- ('diablo',
- ['1.4.3']),
- ('essex',
- ['1.4.8']),
- ('folsom',
- ['1.7.4']),
- ('grizzly',
- ['1.7.6', '1.7.7', '1.8.0']),
- ('havana',
- ['1.9.0', '1.9.1', '1.10.0']),
- ('icehouse',
- ['1.11.0', '1.12.0', '1.13.0', '1.13.1']),
- ('juno',
- ['2.0.0', '2.1.0', '2.2.0']),
- ('kilo',
- ['2.2.1', '2.2.2']),
- ('liberty',
- ['2.3.0', '2.4.0', '2.5.0']),
- ('mitaka',
- ['2.5.0', '2.6.0', '2.7.0']),
-])
-
-# >= Liberty version->codename mapping
-PACKAGE_CODENAMES = {
- 'nova-common': OrderedDict([
- ('12.0', 'liberty'),
- ('13.0', 'mitaka'),
- ]),
- 'neutron-common': OrderedDict([
- ('7.0', 'liberty'),
- ('8.0', 'mitaka'),
- ('8.1', 'mitaka'),
- ]),
- 'cinder-common': OrderedDict([
- ('7.0', 'liberty'),
- ('8.0', 'mitaka'),
- ]),
- 'keystone': OrderedDict([
- ('8.0', 'liberty'),
- ('8.1', 'liberty'),
- ('9.0', 'mitaka'),
- ]),
- 'horizon-common': OrderedDict([
- ('8.0', 'liberty'),
- ('9.0', 'mitaka'),
- ]),
- 'ceilometer-common': OrderedDict([
- ('5.0', 'liberty'),
- ('6.1', 'mitaka'),
- ]),
- 'heat-common': OrderedDict([
- ('5.0', 'liberty'),
- ('6.0', 'mitaka'),
- ]),
- 'glance-common': OrderedDict([
- ('11.0', 'liberty'),
- ('12.0', 'mitaka'),
- ]),
- 'openstack-dashboard': OrderedDict([
- ('8.0', 'liberty'),
- ('9.0', 'mitaka'),
- ]),
-}
-
-DEFAULT_LOOPBACK_SIZE = '5G'
-
-
-def error_out(msg):
- juju_log("FATAL ERROR: %s" % msg, level='ERROR')
- sys.exit(1)
-
-
-def get_os_codename_install_source(src):
- '''Derive OpenStack release codename from a given installation source.'''
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- rel = ''
- if src is None:
- return rel
- if src in ['distro', 'distro-proposed']:
- try:
- rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
- except KeyError:
- e = 'Could not derive openstack release for '\
- 'this Ubuntu release: %s' % ubuntu_rel
- error_out(e)
- return rel
-
- if src.startswith('cloud:'):
- ca_rel = src.split(':')[1]
- ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
- return ca_rel
-
- # Best guess match based on deb string provided
- if src.startswith('deb') or src.startswith('ppa'):
- for k, v in six.iteritems(OPENSTACK_CODENAMES):
- if v in src:
- return v
-
-
-def get_os_version_install_source(src):
- codename = get_os_codename_install_source(src)
- return get_os_version_codename(codename)
-
-
-def get_os_codename_version(vers):
- '''Determine OpenStack codename from version number.'''
- try:
- return OPENSTACK_CODENAMES[vers]
- except KeyError:
- e = 'Could not determine OpenStack codename for version %s' % vers
- error_out(e)
-
-
-def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
- '''Determine OpenStack version number from codename.'''
- for k, v in six.iteritems(version_map):
- if v == codename:
- return k
- e = 'Could not derive OpenStack version for '\
- 'codename: %s' % codename
- error_out(e)
-
-
-def get_os_version_codename_swift(codename):
- '''Determine OpenStack version number of swift from codename.'''
- for k, v in six.iteritems(SWIFT_CODENAMES):
- if k == codename:
- return v[-1]
- e = 'Could not derive swift version for '\
- 'codename: %s' % codename
- error_out(e)
-
-
-def get_swift_codename(version):
- '''Determine OpenStack codename that corresponds to swift version.'''
- codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
- if len(codenames) > 1:
- # If more than one release codename contains this version we determine
- # the actual codename based on the highest available install source.
- for codename in reversed(codenames):
- releases = UBUNTU_OPENSTACK_RELEASE
- release = [k for k, v in six.iteritems(releases) if codename in v]
- ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
- if codename in ret or release[0] in ret:
- return codename
- elif len(codenames) == 1:
- return codenames[0]
- return None
-
-
-def get_os_codename_package(package, fatal=True):
- '''Derive OpenStack release codename from an installed package.'''
- import apt_pkg as apt
-
- cache = apt_cache()
-
- try:
- pkg = cache[package]
- except:
- if not fatal:
- return None
- # the package is unknown to the current apt cache.
- e = 'Could not determine version of package with no installation '\
- 'candidate: %s' % package
- error_out(e)
-
- if not pkg.current_ver:
- if not fatal:
- return None
- # package is known, but no version is currently installed.
- e = 'Could not determine version of uninstalled package: %s' % package
- error_out(e)
-
- vers = apt.upstream_version(pkg.current_ver.ver_str)
- if 'swift' in pkg.name:
- # Fully x.y.z match for swift versions
- match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
- else:
- # x.y match only for 20XX.X
- # and ignore patch level for other packages
- match = re.match('^(\d+)\.(\d+)', vers)
-
- if match:
- vers = match.group(0)
-
- # >= Liberty independent project versions
- if (package in PACKAGE_CODENAMES and
- vers in PACKAGE_CODENAMES[package]):
- return PACKAGE_CODENAMES[package][vers]
- else:
- # < Liberty co-ordinated project versions
- try:
- if 'swift' in pkg.name:
- return get_swift_codename(vers)
- else:
- return OPENSTACK_CODENAMES[vers]
- except KeyError:
- if not fatal:
- return None
- e = 'Could not determine OpenStack codename for version %s' % vers
- error_out(e)
-
-
-def get_os_version_package(pkg, fatal=True):
- '''Derive OpenStack version number from an installed package.'''
- codename = get_os_codename_package(pkg, fatal=fatal)
-
- if not codename:
- return None
-
- if 'swift' in pkg:
- vers_map = SWIFT_CODENAMES
- for cname, version in six.iteritems(vers_map):
- if cname == codename:
- return version[-1]
- else:
- vers_map = OPENSTACK_CODENAMES
- for version, cname in six.iteritems(vers_map):
- if cname == codename:
- return version
- # e = "Could not determine OpenStack version for package: %s" % pkg
- # error_out(e)
-
-
-os_rel = None
-
-
-def os_release(package, base='essex'):
- '''
- Returns OpenStack release codename from a cached global.
- If the codename can not be determined from either an installed package or
- the installation source, the earliest release supported by the charm should
- be returned.
- '''
- global os_rel
- if os_rel:
- return os_rel
- os_rel = (get_os_codename_package(package, fatal=False) or
- get_os_codename_install_source(config('openstack-origin')) or
- base)
- return os_rel
-
-
-def import_key(keyid):
- key = keyid.strip()
- if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and
- key.endswith('-----END PGP PUBLIC KEY BLOCK-----')):
- juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
- juju_log("Importing ASCII Armor PGP key", level=DEBUG)
- with tempfile.NamedTemporaryFile() as keyfile:
- with open(keyfile.name, 'w') as fd:
- fd.write(key)
- fd.write("\n")
-
- cmd = ['apt-key', 'add', keyfile.name]
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- error_out("Error importing PGP key '%s'" % key)
- else:
- juju_log("PGP key found (looks like Radix64 format)", level=DEBUG)
- juju_log("Importing PGP key from keyserver", level=DEBUG)
- cmd = ['apt-key', 'adv', '--keyserver',
- 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- error_out("Error importing PGP key '%s'" % key)
-
-
-def get_source_and_pgp_key(input):
- """Look for a pgp key ID or ascii-armor key in the given input."""
- index = input.strip()
- index = input.rfind('|')
- if index < 0:
- return input, None
-
- key = input[index + 1:].strip('|')
- source = input[:index]
- return source, key
-
-
-def configure_installation_source(rel):
- '''Configure apt installation source.'''
- if rel == 'distro':
- return
- elif rel == 'distro-proposed':
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
- f.write(DISTRO_PROPOSED % ubuntu_rel)
- elif rel[:4] == "ppa:":
- src, key = get_source_and_pgp_key(rel)
- if key:
- import_key(key)
-
- subprocess.check_call(["add-apt-repository", "-y", src])
- elif rel[:3] == "deb":
- src, key = get_source_and_pgp_key(rel)
- if key:
- import_key(key)
-
- with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
- f.write(src)
- elif rel[:6] == 'cloud:':
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- rel = rel.split(':')[1]
- u_rel = rel.split('-')[0]
- ca_rel = rel.split('-')[1]
-
- if u_rel != ubuntu_rel:
- e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
- 'version (%s)' % (ca_rel, ubuntu_rel)
- error_out(e)
-
- if 'staging' in ca_rel:
- # staging is just a regular PPA.
- os_rel = ca_rel.split('/')[0]
- ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
- cmd = 'add-apt-repository -y %s' % ppa
- subprocess.check_call(cmd.split(' '))
- return
-
- # map charm config options to actual archive pockets.
- pockets = {
- 'folsom': 'precise-updates/folsom',
- 'folsom/updates': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'grizzly': 'precise-updates/grizzly',
- 'grizzly/updates': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'havana': 'precise-updates/havana',
- 'havana/updates': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'icehouse': 'precise-updates/icehouse',
- 'icehouse/updates': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'juno': 'trusty-updates/juno',
- 'juno/updates': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'kilo': 'trusty-updates/kilo',
- 'kilo/updates': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'liberty': 'trusty-updates/liberty',
- 'liberty/updates': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'mitaka': 'trusty-updates/mitaka',
- 'mitaka/updates': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- }
-
- try:
- pocket = pockets[ca_rel]
- except KeyError:
- e = 'Invalid Cloud Archive release specified: %s' % rel
- error_out(e)
-
- src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
- apt_install('ubuntu-cloud-keyring', fatal=True)
-
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
- f.write(src)
- else:
- error_out("Invalid openstack-release specified: %s" % rel)
-
-
-def config_value_changed(option):
- """
- Determine if config value changed since last call to this function.
- """
- hook_data = unitdata.HookData()
- with hook_data():
- db = unitdata.kv()
- current = config(option)
- saved = db.get(option)
- db.set(option, current)
- if saved is None:
- return False
- return current != saved
-
-
-def save_script_rc(script_path="scripts/scriptrc", **env_vars):
- """
- Write an rc file in the charm-delivered directory containing
- exported environment variables provided by env_vars. Any charm scripts run
- outside the juju hook environment can source this scriptrc to obtain
- updated config information necessary to perform health checks or
- service changes.
- """
- juju_rc_path = "%s/%s" % (charm_dir(), script_path)
- if not os.path.exists(os.path.dirname(juju_rc_path)):
- os.mkdir(os.path.dirname(juju_rc_path))
- with open(juju_rc_path, 'wb') as rc_script:
- rc_script.write(
- "#!/bin/bash\n")
- [rc_script.write('export %s=%s\n' % (u, p))
- for u, p in six.iteritems(env_vars) if u != "script_path"]
-
-
-def openstack_upgrade_available(package):
- """
- Determines if an OpenStack upgrade is available from installation
- source, based on version of installed package.
-
- :param package: str: Name of installed package.
-
- :returns: bool: : Returns True if configured installation source offers
- a newer version of package.
-
- """
-
- import apt_pkg as apt
- src = config('openstack-origin')
- cur_vers = get_os_version_package(package)
- if "swift" in package:
- codename = get_os_codename_install_source(src)
- avail_vers = get_os_version_codename_swift(codename)
- else:
- avail_vers = get_os_version_install_source(src)
- apt.init()
- if "swift" in package:
- major_cur_vers = cur_vers.split('.', 1)[0]
- major_avail_vers = avail_vers.split('.', 1)[0]
- major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
- return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
- return apt.version_compare(avail_vers, cur_vers) == 1
-
-
-def ensure_block_device(block_device):
- '''
- Confirm block_device, create as loopback if necessary.
-
- :param block_device: str: Full path of block device to ensure.
-
- :returns: str: Full path of ensured block device.
- '''
- _none = ['None', 'none', None]
- if (block_device in _none):
- error_out('prepare_storage(): Missing required input: block_device=%s.'
- % block_device)
-
- if block_device.startswith('/dev/'):
- bdev = block_device
- elif block_device.startswith('/'):
- _bd = block_device.split('|')
- if len(_bd) == 2:
- bdev, size = _bd
- else:
- bdev = block_device
- size = DEFAULT_LOOPBACK_SIZE
- bdev = ensure_loopback_device(bdev, size)
- else:
- bdev = '/dev/%s' % block_device
-
- if not is_block_device(bdev):
- error_out('Failed to locate valid block device at %s' % bdev)
-
- return bdev
-
-
-def clean_storage(block_device):
- '''
- Ensures a block device is clean. That is:
- - unmounted
- - any lvm volume groups are deactivated
- - any lvm physical device signatures removed
- - partition table wiped
-
- :param block_device: str: Full path to block device to clean.
- '''
- for mp, d in mounts():
- if d == block_device:
- juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
- (d, mp), level=INFO)
- umount(mp, persist=True)
-
- if is_lvm_physical_volume(block_device):
- deactivate_lvm_volume_group(block_device)
- remove_lvm_physical_volume(block_device)
- else:
- zap_disk(block_device)
-
-is_ip = ip.is_ip
-ns_query = ip.ns_query
-get_host_ip = ip.get_host_ip
-get_hostname = ip.get_hostname
-
-
-def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
- mm_map = {}
- if os.path.isfile(mm_file):
- with open(mm_file, 'r') as f:
- mm_map = json.load(f)
- return mm_map
-
-
-def sync_db_with_multi_ipv6_addresses(database, database_user,
- relation_prefix=None):
- hosts = get_ipv6_addr(dynamic_only=False)
-
- if config('vip'):
- vips = config('vip').split()
- for vip in vips:
- if vip and is_ipv6(vip):
- hosts.append(vip)
-
- kwargs = {'database': database,
- 'username': database_user,
- 'hostname': json.dumps(hosts)}
-
- if relation_prefix:
- for key in list(kwargs.keys()):
- kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
- del kwargs[key]
-
- for rid in relation_ids('shared-db'):
- relation_set(relation_id=rid, **kwargs)
-
-
-def os_requires_version(ostack_release, pkg):
- """
- Decorator for hook to specify minimum supported release
- """
- def wrap(f):
- @wraps(f)
- def wrapped_f(*args):
- if os_release(pkg) < ostack_release:
- raise Exception("This hook is not supported on releases"
- " before %s" % ostack_release)
- f(*args)
- return wrapped_f
- return wrap
-
-
-def git_install_requested():
- """
- Returns true if openstack-origin-git is specified.
- """
- return config('openstack-origin-git') is not None
-
-
-requirements_dir = None
-
-
-def _git_yaml_load(projects_yaml):
- """
- Load the specified yaml into a dictionary.
- """
- if not projects_yaml:
- return None
-
- return yaml.load(projects_yaml)
-
-
-def git_clone_and_install(projects_yaml, core_project):
- """
- Clone/install all specified OpenStack repositories.
-
- The expected format of projects_yaml is:
-
- repositories:
- - {name: keystone,
- repository: 'git://git.openstack.org/openstack/keystone.git',
- branch: 'stable/icehouse'}
- - {name: requirements,
- repository: 'git://git.openstack.org/openstack/requirements.git',
- branch: 'stable/icehouse'}
-
- directory: /mnt/openstack-git
- http_proxy: squid-proxy-url
- https_proxy: squid-proxy-url
-
- The directory, http_proxy, and https_proxy keys are optional.
-
- """
- global requirements_dir
- parent_dir = '/mnt/openstack-git'
- http_proxy = None
-
- projects = _git_yaml_load(projects_yaml)
- _git_validate_projects_yaml(projects, core_project)
-
- old_environ = dict(os.environ)
-
- if 'http_proxy' in projects.keys():
- http_proxy = projects['http_proxy']
- os.environ['http_proxy'] = projects['http_proxy']
- if 'https_proxy' in projects.keys():
- os.environ['https_proxy'] = projects['https_proxy']
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
-
- # Upgrade setuptools and pip from default virtualenv versions. The default
- # versions in trusty break master OpenStack branch deployments.
- for p in ['pip', 'setuptools']:
- pip_install(p, upgrade=True, proxy=http_proxy,
- venv=os.path.join(parent_dir, 'venv'))
-
- for p in projects['repositories']:
- repo = p['repository']
- branch = p['branch']
- depth = '1'
- if 'depth' in p.keys():
- depth = p['depth']
- if p['name'] == 'requirements':
- repo_dir = _git_clone_and_install_single(repo, branch, depth,
- parent_dir, http_proxy,
- update_requirements=False)
- requirements_dir = repo_dir
- else:
- repo_dir = _git_clone_and_install_single(repo, branch, depth,
- parent_dir, http_proxy,
- update_requirements=True)
-
- os.environ = old_environ
-
-
-def _git_validate_projects_yaml(projects, core_project):
- """
- Validate the projects yaml.
- """
- _git_ensure_key_exists('repositories', projects)
-
- for project in projects['repositories']:
- _git_ensure_key_exists('name', project.keys())
- _git_ensure_key_exists('repository', project.keys())
- _git_ensure_key_exists('branch', project.keys())
-
- if projects['repositories'][0]['name'] != 'requirements':
- error_out('{} git repo must be specified first'.format('requirements'))
-
- if projects['repositories'][-1]['name'] != core_project:
- error_out('{} git repo must be specified last'.format(core_project))
-
-
-def _git_ensure_key_exists(key, keys):
- """
- Ensure that key exists in keys.
- """
- if key not in keys:
- error_out('openstack-origin-git key \'{}\' is missing'.format(key))
-
-
-def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
- update_requirements):
- """
- Clone and install a single git repository.
- """
- if not os.path.exists(parent_dir):
- juju_log('Directory already exists at {}. '
- 'No need to create directory.'.format(parent_dir))
- os.mkdir(parent_dir)
-
- juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
- repo_dir = install_remote(
- repo, dest=parent_dir, branch=branch, depth=depth)
-
- venv = os.path.join(parent_dir, 'venv')
-
- if update_requirements:
- if not requirements_dir:
- error_out('requirements repo must be cloned before '
- 'updating from global requirements.')
- _git_update_requirements(venv, repo_dir, requirements_dir)
-
- juju_log('Installing git repo from dir: {}'.format(repo_dir))
- if http_proxy:
- pip_install(repo_dir, proxy=http_proxy, venv=venv)
- else:
- pip_install(repo_dir, venv=venv)
-
- return repo_dir
-
-
-def _git_update_requirements(venv, package_dir, reqs_dir):
- """
- Update from global requirements.
-
- Update an OpenStack git directory's requirements.txt and
- test-requirements.txt from global-requirements.txt.
- """
- orig_dir = os.getcwd()
- os.chdir(reqs_dir)
- python = os.path.join(venv, 'bin/python')
- cmd = [python, 'update.py', package_dir]
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- package = os.path.basename(package_dir)
- error_out("Error updating {} from "
- "global-requirements.txt".format(package))
- os.chdir(orig_dir)
-
-
-def git_pip_venv_dir(projects_yaml):
- """
- Return the pip virtualenv path.
- """
- parent_dir = '/mnt/openstack-git'
-
- projects = _git_yaml_load(projects_yaml)
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- return os.path.join(parent_dir, 'venv')
-
-
-def git_src_dir(projects_yaml, project):
- """
- Return the directory where the specified project's source is located.
- """
- parent_dir = '/mnt/openstack-git'
-
- projects = _git_yaml_load(projects_yaml)
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- for p in projects['repositories']:
- if p['name'] == project:
- return os.path.join(parent_dir, os.path.basename(p['repository']))
-
- return None
-
-
-def git_yaml_value(projects_yaml, key):
- """
- Return the value in projects_yaml for the specified key.
- """
- projects = _git_yaml_load(projects_yaml)
-
- if key in projects.keys():
- return projects[key]
-
- return None
-
-
-def os_workload_status(configs, required_interfaces, charm_func=None):
- """
- Decorator to set workload status based on complete contexts
- """
- def wrap(f):
- @wraps(f)
- def wrapped_f(*args, **kwargs):
- # Run the original function first
- f(*args, **kwargs)
- # Set workload status now that contexts have been
- # acted on
- set_os_workload_status(configs, required_interfaces, charm_func)
- return wrapped_f
- return wrap
-
-
-def set_os_workload_status(configs, required_interfaces, charm_func=None,
- services=None, ports=None):
- """Set the state of the workload status for the charm.
-
- This calls _determine_os_workload_status() to get the new state, message
- and sets the status using status_set()
-
- @param configs: a templating.OSConfigRenderer() object
- @param required_interfaces: {generic: [specific, specific2, ...]}
- @param charm_func: a callable function that returns state, message. The
- signature is charm_func(configs) -> (state, message)
- @param services: list of strings OR dictionary specifying services/ports
- @param ports: OPTIONAL list of port numbers.
- @returns state, message: the new workload status, user message
- """
- state, message = _determine_os_workload_status(
- configs, required_interfaces, charm_func, services, ports)
- status_set(state, message)
-
-
-def _determine_os_workload_status(
- configs, required_interfaces, charm_func=None,
- services=None, ports=None):
- """Determine the state of the workload status for the charm.
-
- This function returns the new workload status for the charm based
- on the state of the interfaces, the paused state and whether the
- services are actually running and any specified ports are open.
-
- This checks:
-
- 1. if the unit should be paused, that it is actually paused. If so the
- state is 'maintenance' + message, else 'broken'.
- 2. that the interfaces/relations are complete. If they are not then
- it sets the state to either 'broken' or 'waiting' and an appropriate
- message.
- 3. If all the relation data is set, then it checks that the actual
- services really are running. If not it sets the state to 'broken'.
-
- If everything is okay then the state returns 'active'.
-
- @param configs: a templating.OSConfigRenderer() object
- @param required_interfaces: {generic: [specific, specific2, ...]}
- @param charm_func: a callable function that returns state, message. The
- signature is charm_func(configs) -> (state, message)
- @param services: list of strings OR dictionary specifying services/ports
- @param ports: OPTIONAL list of port numbers.
- @returns state, message: the new workload status, user message
- """
- state, message = _ows_check_if_paused(services, ports)
-
- if state is None:
- state, message = _ows_check_generic_interfaces(
- configs, required_interfaces)
-
- if state != 'maintenance' and charm_func:
- # _ows_check_charm_func() may modify the state, message
- state, message = _ows_check_charm_func(
- state, message, lambda: charm_func(configs))
-
- if state is None:
- state, message = _ows_check_services_running(services, ports)
-
- if state is None:
- state = 'active'
- message = "Unit is ready"
- juju_log(message, 'INFO')
-
- return state, message
-
-
-def _ows_check_if_paused(services=None, ports=None):
- """Check if the unit is supposed to be paused, and if so check that the
- services/ports (if passed) are actually stopped/not being listened to.
-
- if the unit isn't supposed to be paused, just return None, None
-
- @param services: OPTIONAL services spec or list of service names.
- @param ports: OPTIONAL list of port numbers.
- @returns state, message or None, None
- """
- if is_unit_paused_set():
- state, message = check_actually_paused(services=services,
- ports=ports)
- if state is None:
- # we're paused okay, so set maintenance and return
- state = "maintenance"
- message = "Paused. Use 'resume' action to resume normal service."
- return state, message
- return None, None
-
-
-def _ows_check_generic_interfaces(configs, required_interfaces):
- """Check the complete contexts to determine the workload status.
-
- - Checks for missing or incomplete contexts
- - juju log details of missing required data.
- - determines the correct workload status
- - creates an appropriate message for status_set(...)
-
- if there are no problems then the function returns None, None
-
- @param configs: a templating.OSConfigRenderer() object
- @params required_interfaces: {generic_interface: [specific_interface], }
- @returns state, message or None, None
- """
- incomplete_rel_data = incomplete_relation_data(configs,
- required_interfaces)
- state = None
- message = None
- missing_relations = set()
- incomplete_relations = set()
-
- for generic_interface, relations_states in incomplete_rel_data.items():
- related_interface = None
- missing_data = {}
- # Related or not?
- for interface, relation_state in relations_states.items():
- if relation_state.get('related'):
- related_interface = interface
- missing_data = relation_state.get('missing_data')
- break
- # No relation ID for the generic_interface?
- if not related_interface:
- juju_log("{} relation is missing and must be related for "
- "functionality. ".format(generic_interface), 'WARN')
- state = 'blocked'
- missing_relations.add(generic_interface)
- else:
- # Relation ID eists but no related unit
- if not missing_data:
- # Edge case - relation ID exists but departings
- _hook_name = hook_name()
- if (('departed' in _hook_name or 'broken' in _hook_name) and
- related_interface in _hook_name):
- state = 'blocked'
- missing_relations.add(generic_interface)
- juju_log("{} relation's interface, {}, "
- "relationship is departed or broken "
- "and is required for functionality."
- "".format(generic_interface, related_interface),
- "WARN")
- # Normal case relation ID exists but no related unit
- # (joining)
- else:
- juju_log("{} relations's interface, {}, is related but has"
- " no units in the relation."
- "".format(generic_interface, related_interface),
- "INFO")
- # Related unit exists and data missing on the relation
- else:
- juju_log("{} relation's interface, {}, is related awaiting "
- "the following data from the relationship: {}. "
- "".format(generic_interface, related_interface,
- ", ".join(missing_data)), "INFO")
- if state != 'blocked':
- state = 'waiting'
- if generic_interface not in missing_relations:
- incomplete_relations.add(generic_interface)
-
- if missing_relations:
- message = "Missing relations: {}".format(", ".join(missing_relations))
- if incomplete_relations:
- message += "; incomplete relations: {}" \
- "".format(", ".join(incomplete_relations))
- state = 'blocked'
- elif incomplete_relations:
- message = "Incomplete relations: {}" \
- "".format(", ".join(incomplete_relations))
- state = 'waiting'
-
- return state, message
-
-
-def _ows_check_charm_func(state, message, charm_func_with_configs):
- """Run a custom check function for the charm to see if it wants to
- change the state. This is only run if not in 'maintenance' and
- tests to see if the new state is more important that the previous
- one determined by the interfaces/relations check.
-
- @param state: the previously determined state so far.
- @param message: the user orientated message so far.
- @param charm_func: a callable function that returns state, message
- @returns state, message strings.
- """
- if charm_func_with_configs:
- charm_state, charm_message = charm_func_with_configs()
- if charm_state != 'active' and charm_state != 'unknown':
- state = workload_state_compare(state, charm_state)
- if message:
- charm_message = charm_message.replace("Incomplete relations: ",
- "")
- message = "{}, {}".format(message, charm_message)
- else:
- message = charm_message
- return state, message
-
-
-def _ows_check_services_running(services, ports):
- """Check that the services that should be running are actually running
- and that any ports specified are being listened to.
-
- @param services: list of strings OR dictionary specifying services/ports
- @param ports: list of ports
- @returns state, message: strings or None, None
- """
- messages = []
- state = None
- if services is not None:
- services = _extract_services_list_helper(services)
- services_running, running = _check_running_services(services)
- if not all(running):
- messages.append(
- "Services not running that should be: {}"
- .format(", ".join(_filter_tuples(services_running, False))))
- state = 'blocked'
- # also verify that the ports that should be open are open
- # NB, that ServiceManager objects only OPTIONALLY have ports
- map_not_open, ports_open = (
- _check_listening_on_services_ports(services))
- if not all(ports_open):
- # find which service has missing ports. They are in service
- # order which makes it a bit easier.
- message_parts = {service: ", ".join([str(v) for v in open_ports])
- for service, open_ports in map_not_open.items()}
- message = ", ".join(
- ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
- messages.append(
- "Services with ports not open that should be: {}"
- .format(message))
- state = 'blocked'
-
- if ports is not None:
- # and we can also check ports which we don't know the service for
- ports_open, ports_open_bools = _check_listening_on_ports_list(ports)
- if not all(ports_open_bools):
- messages.append(
- "Ports which should be open, but are not: {}"
- .format(", ".join([str(p) for p, v in ports_open
- if not v])))
- state = 'blocked'
-
- if state is not None:
- message = "; ".join(messages)
- return state, message
-
- return None, None
-
-
-def _extract_services_list_helper(services):
- """Extract a OrderedDict of {service: [ports]} of the supplied services
- for use by the other functions.
-
- The services object can either be:
- - None : no services were passed (an empty dict is returned)
- - a list of strings
- - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- - An array of [{'service': service_name, ...}, ...]
-
- @param services: see above
- @returns OrderedDict(service: [ports], ...)
- """
- if services is None:
- return {}
- if isinstance(services, dict):
- services = services.values()
- # either extract the list of services from the dictionary, or if
- # it is a simple string, use that. i.e. works with mixed lists.
- _s = OrderedDict()
- for s in services:
- if isinstance(s, dict) and 'service' in s:
- _s[s['service']] = s.get('ports', [])
- if isinstance(s, str):
- _s[s] = []
- return _s
-
-
-def _check_running_services(services):
- """Check that the services dict provided is actually running and provide
- a list of (service, boolean) tuples for each service.
-
- Returns both a zipped list of (service, boolean) and a list of booleans
- in the same order as the services.
-
- @param services: OrderedDict of strings: [ports], one for each service to
- check.
- @returns [(service, boolean), ...], : results for checks
- [boolean] : just the result of the service checks
- """
- services_running = [service_running(s) for s in services]
- return list(zip(services, services_running)), services_running
-
-
-def _check_listening_on_services_ports(services, test=False):
- """Check that the unit is actually listening (has the port open) on the
- ports that the service specifies are open. If test is True then the
- function returns the services with ports that are open rather than
- closed.
-
- Returns an OrderedDict of service: ports and a list of booleans
-
- @param services: OrderedDict(service: [port, ...], ...)
- @param test: default=False, if False, test for closed, otherwise open.
- @returns OrderedDict(service: [port-not-open, ...]...), [boolean]
- """
- test = not(not(test)) # ensure test is True or False
- all_ports = list(itertools.chain(*services.values()))
- ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
- map_ports = OrderedDict()
- matched_ports = [p for p, opened in zip(all_ports, ports_states)
- if opened == test] # essentially opened xor test
- for service, ports in services.items():
- set_ports = set(ports).intersection(matched_ports)
- if set_ports:
- map_ports[service] = set_ports
- return map_ports, ports_states
-
-
-def _check_listening_on_ports_list(ports):
- """Check that the ports list given are being listened to
-
- Returns a list of ports being listened to and a list of the
- booleans.
-
- @param ports: LIST or port numbers.
- @returns [(port_num, boolean), ...], [boolean]
- """
- ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
- return zip(ports, ports_open), ports_open
-
-
-def _filter_tuples(services_states, state):
- """Return a simple list from a list of tuples according to the condition
-
- @param services_states: LIST of (string, boolean): service and running
- state.
- @param state: Boolean to match the tuple against.
- @returns [LIST of strings] that matched the tuple RHS.
- """
- return [s for s, b in services_states if b == state]
-
-
-def workload_state_compare(current_workload_state, workload_state):
- """ Return highest priority of two states"""
- hierarchy = {'unknown': -1,
- 'active': 0,
- 'maintenance': 1,
- 'waiting': 2,
- 'blocked': 3,
- }
-
- if hierarchy.get(workload_state) is None:
- workload_state = 'unknown'
- if hierarchy.get(current_workload_state) is None:
- current_workload_state = 'unknown'
-
- # Set workload_state based on hierarchy of statuses
- if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
- return current_workload_state
- else:
- return workload_state
-
-
-def incomplete_relation_data(configs, required_interfaces):
- """Check complete contexts against required_interfaces
- Return dictionary of incomplete relation data.
-
- configs is an OSConfigRenderer object with configs registered
-
- required_interfaces is a dictionary of required general interfaces
- with dictionary values of possible specific interfaces.
- Example:
- required_interfaces = {'database': ['shared-db', 'pgsql-db']}
-
- The interface is said to be satisfied if anyone of the interfaces in the
- list has a complete context.
-
- Return dictionary of incomplete or missing required contexts with relation
- status of interfaces and any missing data points. Example:
- {'message':
- {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
- 'zeromq-configuration': {'related': False}},
- 'identity':
- {'identity-service': {'related': False}},
- 'database':
- {'pgsql-db': {'related': False},
- 'shared-db': {'related': True}}}
- """
- complete_ctxts = configs.complete_contexts()
- incomplete_relations = [
- svc_type
- for svc_type, interfaces in required_interfaces.items()
- if not set(interfaces).intersection(complete_ctxts)]
- return {
- i: configs.get_incomplete_context_data(required_interfaces[i])
- for i in incomplete_relations}
-
-
-def do_action_openstack_upgrade(package, upgrade_callback, configs):
- """Perform action-managed OpenStack upgrade.
-
- Upgrades packages to the configured openstack-origin version and sets
- the corresponding action status as a result.
-
- If the charm was installed from source we cannot upgrade it.
- For backwards compatibility a config flag (action-managed-upgrade) must
- be set for this code to run, otherwise a full service level upgrade will
- fire on config-changed.
-
- @param package: package name for determining if upgrade available
- @param upgrade_callback: function callback to charm's upgrade function
- @param configs: templating object derived from OSConfigRenderer class
-
- @return: True if upgrade successful; False if upgrade failed or skipped
- """
- ret = False
-
- if git_install_requested():
- action_set({'outcome': 'installed from source, skipped upgrade.'})
- else:
- if openstack_upgrade_available(package):
- if config('action-managed-upgrade'):
- juju_log('Upgrading OpenStack release')
-
- try:
- upgrade_callback(configs=configs)
- action_set({'outcome': 'success, upgrade completed.'})
- ret = True
- except:
- action_set({'outcome': 'upgrade failed, see traceback.'})
- action_set({'traceback': traceback.format_exc()})
- action_fail('do_openstack_upgrade resulted in an '
- 'unexpected error')
- else:
- action_set({'outcome': 'action-managed-upgrade config is '
- 'False, skipped upgrade.'})
- else:
- action_set({'outcome': 'no upgrade available.'})
-
- return ret
-
-
-def remote_restart(rel_name, remote_service=None):
- trigger = {
- 'restart-trigger': str(uuid.uuid4()),
- }
- if remote_service:
- trigger['remote-service'] = remote_service
- for rid in relation_ids(rel_name):
- # This subordinate can be related to two seperate services using
- # different subordinate relations so only issue the restart if
- # the principle is conencted down the relation we think it is
- if related_units(relid=rid):
- relation_set(relation_id=rid,
- relation_settings=trigger,
- )
-
-
-def check_actually_paused(services=None, ports=None):
- """Check that services listed in the services object and and ports
- are actually closed (not listened to), to verify that the unit is
- properly paused.
-
- @param services: See _extract_services_list_helper
- @returns status, : string for status (None if okay)
- message : string for problem for status_set
- """
- state = None
- message = None
- messages = []
- if services is not None:
- services = _extract_services_list_helper(services)
- services_running, services_states = _check_running_services(services)
- if any(services_states):
- # there shouldn't be any running so this is a problem
- messages.append("these services running: {}"
- .format(", ".join(
- _filter_tuples(services_running, True))))
- state = "blocked"
- ports_open, ports_open_bools = (
- _check_listening_on_services_ports(services, True))
- if any(ports_open_bools):
- message_parts = {service: ", ".join([str(v) for v in open_ports])
- for service, open_ports in ports_open.items()}
- message = ", ".join(
- ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
- messages.append(
- "these service:ports are open: {}".format(message))
- state = 'blocked'
- if ports is not None:
- ports_open, bools = _check_listening_on_ports_list(ports)
- if any(bools):
- messages.append(
- "these ports which should be closed, but are open: {}"
- .format(", ".join([str(p) for p, v in ports_open if v])))
- state = 'blocked'
- if messages:
- message = ("Services should be paused but {}"
- .format(", ".join(messages)))
- return state, message
-
-
-def set_unit_paused():
- """Set the unit to a paused state in the local kv() store.
- This does NOT actually pause the unit
- """
- with unitdata.HookData()() as t:
- kv = t[0]
- kv.set('unit-paused', True)
-
-
-def clear_unit_paused():
- """Clear the unit from a paused state in the local kv() store
- This does NOT actually restart any services - it only clears the
- local state.
- """
- with unitdata.HookData()() as t:
- kv = t[0]
- kv.set('unit-paused', False)
-
-
-def is_unit_paused_set():
- """Return the state of the kv().get('unit-paused').
- This does NOT verify that the unit really is paused.
-
- To help with units that don't have HookData() (testing)
- if it excepts, return False
- """
- try:
- with unitdata.HookData()() as t:
- kv = t[0]
- # transform something truth-y into a Boolean.
- return not(not(kv.get('unit-paused')))
- except:
- return False
-
-
-def pause_unit(assess_status_func, services=None, ports=None,
- charm_func=None):
- """Pause a unit by stopping the services and setting 'unit-paused'
- in the local kv() store.
-
- Also checks that the services have stopped and ports are no longer
- being listened to.
-
- An optional charm_func() can be called that can either raise an
- Exception or return non None, None to indicate that the unit
- didn't pause cleanly.
-
- The signature for charm_func is:
- charm_func() -> message: string
-
- charm_func() is executed after any services are stopped, if supplied.
-
- The services object can either be:
- - None : no services were passed (an empty dict is returned)
- - a list of strings
- - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- - An array of [{'service': service_name, ...}, ...]
-
- @param assess_status_func: (f() -> message: string | None) or None
- @param services: OPTIONAL see above
- @param ports: OPTIONAL list of port
- @param charm_func: function to run for custom charm pausing.
- @returns None
- @raises Exception(message) on an error for action_fail().
- """
- services = _extract_services_list_helper(services)
- messages = []
- if services:
- for service in services.keys():
- stopped = service_pause(service)
- if not stopped:
- messages.append("{} didn't stop cleanly.".format(service))
- if charm_func:
- try:
- message = charm_func()
- if message:
- messages.append(message)
- except Exception as e:
- message.append(str(e))
- set_unit_paused()
- if assess_status_func:
- message = assess_status_func()
- if message:
- messages.append(message)
- if messages:
- raise Exception("Couldn't pause: {}".format("; ".join(messages)))
-
-
-def resume_unit(assess_status_func, services=None, ports=None,
- charm_func=None):
- """Resume a unit by starting the services and clearning 'unit-paused'
- in the local kv() store.
-
- Also checks that the services have started and ports are being listened to.
-
- An optional charm_func() can be called that can either raise an
- Exception or return non None to indicate that the unit
- didn't resume cleanly.
-
- The signature for charm_func is:
- charm_func() -> message: string
-
- charm_func() is executed after any services are started, if supplied.
-
- The services object can either be:
- - None : no services were passed (an empty dict is returned)
- - a list of strings
- - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- - An array of [{'service': service_name, ...}, ...]
-
- @param assess_status_func: (f() -> message: string | None) or None
- @param services: OPTIONAL see above
- @param ports: OPTIONAL list of port
- @param charm_func: function to run for custom charm resuming.
- @returns None
- @raises Exception(message) on an error for action_fail().
- """
- services = _extract_services_list_helper(services)
- messages = []
- if services:
- for service in services.keys():
- started = service_resume(service)
- if not started:
- messages.append("{} didn't start cleanly.".format(service))
- if charm_func:
- try:
- message = charm_func()
- if message:
- messages.append(message)
- except Exception as e:
- message.append(str(e))
- clear_unit_paused()
- if assess_status_func:
- message = assess_status_func()
- if message:
- messages.append(message)
- if messages:
- raise Exception("Couldn't resume: {}".format("; ".join(messages)))
-
-
-def make_assess_status_func(*args, **kwargs):
- """Creates an assess_status_func() suitable for handing to pause_unit()
- and resume_unit().
-
- This uses the _determine_os_workload_status(...) function to determine
- what the workload_status should be for the unit. If the unit is
- not in maintenance or active states, then the message is returned to
- the caller. This is so an action that doesn't result in either a
- complete pause or complete resume can signal failure with an action_fail()
- """
- def _assess_status_func():
- state, message = _determine_os_workload_status(*args, **kwargs)
- status_set(state, message)
- if state not in ['maintenance', 'active']:
- return message
- return None
-
- return _assess_status_func
-
-
-def pausable_restart_on_change(restart_map, stopstart=False,
- restart_functions=None):
- """A restart_on_change decorator that checks to see if the unit is
- paused. If it is paused then the decorated function doesn't fire.
-
- This is provided as a helper, as the @restart_on_change(...) decorator
- is in core.host, yet the openstack specific helpers are in this file
- (contrib.openstack.utils). Thus, this needs to be an optional feature
- for openstack charms (or charms that wish to use the openstack
- pause/resume type features).
-
- It is used as follows:
-
- from contrib.openstack.utils import (
- pausable_restart_on_change as restart_on_change)
-
- @restart_on_change(restart_map, stopstart=<boolean>)
- def some_hook(...):
- pass
-
- see core.utils.restart_on_change() for more details.
-
- @param f: the function to decorate
- @param restart_map: the restart map {conf_file: [services]}
- @param stopstart: DEFAULT false; whether to stop, start or just restart
- @returns decorator to use a restart_on_change with pausability
- """
- def wrap(f):
- @functools.wraps(f)
- def wrapped_f(*args, **kwargs):
- if is_unit_paused_set():
- return f(*args, **kwargs)
- # otherwise, normal restart_on_change functionality
- return restart_on_change_helper(
- (lambda: f(*args, **kwargs)), restart_map, stopstart,
- restart_functions)
- return wrapped_f
- return wrap
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/peerstorage/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/peerstorage/__init__.py
deleted file mode 100644
index eafca44..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/peerstorage/__init__.py
+++ /dev/null
@@ -1,269 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import json
-import six
-
-from charmhelpers.core.hookenv import relation_id as current_relation_id
-from charmhelpers.core.hookenv import (
- is_relation_made,
- relation_ids,
- relation_get as _relation_get,
- local_unit,
- relation_set as _relation_set,
- leader_get as _leader_get,
- leader_set,
- is_leader,
-)
-
-
-"""
-This helper provides functions to support use of a peer relation
-for basic key/value storage, with the added benefit that all storage
-can be replicated across peer units.
-
-Requirement to use:
-
-To use this, the "peer_echo()" method has to be called form the peer
-relation's relation-changed hook:
-
-@hooks.hook("cluster-relation-changed") # Adapt the to your peer relation name
-def cluster_relation_changed():
- peer_echo()
-
-Once this is done, you can use peer storage from anywhere:
-
-@hooks.hook("some-hook")
-def some_hook():
- # You can store and retrieve key/values this way:
- if is_relation_made("cluster"): # from charmhelpers.core.hookenv
- # There are peers available so we can work with peer storage
- peer_store("mykey", "myvalue")
- value = peer_retrieve("mykey")
- print value
- else:
- print "No peers joind the relation, cannot share key/values :("
-"""
-
-
-def leader_get(attribute=None, rid=None):
- """Wrapper to ensure that settings are migrated from the peer relation.
-
- This is to support upgrading an environment that does not support
- Juju leadership election to one that does.
-
- If a setting is not extant in the leader-get but is on the relation-get
- peer rel, it is migrated and marked as such so that it is not re-migrated.
- """
- migration_key = '__leader_get_migrated_settings__'
- if not is_leader():
- return _leader_get(attribute=attribute)
-
- settings_migrated = False
- leader_settings = _leader_get(attribute=attribute)
- previously_migrated = _leader_get(attribute=migration_key)
-
- if previously_migrated:
- migrated = set(json.loads(previously_migrated))
- else:
- migrated = set([])
-
- try:
- if migration_key in leader_settings:
- del leader_settings[migration_key]
- except TypeError:
- pass
-
- if attribute:
- if attribute in migrated:
- return leader_settings
-
- # If attribute not present in leader db, check if this unit has set
- # the attribute in the peer relation
- if not leader_settings:
- peer_setting = _relation_get(attribute=attribute, unit=local_unit(),
- rid=rid)
- if peer_setting:
- leader_set(settings={attribute: peer_setting})
- leader_settings = peer_setting
-
- if leader_settings:
- settings_migrated = True
- migrated.add(attribute)
- else:
- r_settings = _relation_get(unit=local_unit(), rid=rid)
- if r_settings:
- for key in set(r_settings.keys()).difference(migrated):
- # Leader setting wins
- if not leader_settings.get(key):
- leader_settings[key] = r_settings[key]
-
- settings_migrated = True
- migrated.add(key)
-
- if settings_migrated:
- leader_set(**leader_settings)
-
- if migrated and settings_migrated:
- migrated = json.dumps(list(migrated))
- leader_set(settings={migration_key: migrated})
-
- return leader_settings
-
-
-def relation_set(relation_id=None, relation_settings=None, **kwargs):
- """Attempt to use leader-set if supported in the current version of Juju,
- otherwise falls back on relation-set.
-
- Note that we only attempt to use leader-set if the provided relation_id is
- a peer relation id or no relation id is provided (in which case we assume
- we are within the peer relation context).
- """
- try:
- if relation_id in relation_ids('cluster'):
- return leader_set(settings=relation_settings, **kwargs)
- else:
- raise NotImplementedError
- except NotImplementedError:
- return _relation_set(relation_id=relation_id,
- relation_settings=relation_settings, **kwargs)
-
-
-def relation_get(attribute=None, unit=None, rid=None):
- """Attempt to use leader-get if supported in the current version of Juju,
- otherwise falls back on relation-get.
-
- Note that we only attempt to use leader-get if the provided rid is a peer
- relation id or no relation id is provided (in which case we assume we are
- within the peer relation context).
- """
- try:
- if rid in relation_ids('cluster'):
- return leader_get(attribute, rid)
- else:
- raise NotImplementedError
- except NotImplementedError:
- return _relation_get(attribute=attribute, rid=rid, unit=unit)
-
-
-def peer_retrieve(key, relation_name='cluster'):
- """Retrieve a named key from peer relation `relation_name`."""
- cluster_rels = relation_ids(relation_name)
- if len(cluster_rels) > 0:
- cluster_rid = cluster_rels[0]
- return relation_get(attribute=key, rid=cluster_rid,
- unit=local_unit())
- else:
- raise ValueError('Unable to detect'
- 'peer relation {}'.format(relation_name))
-
-
-def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_',
- inc_list=None, exc_list=None):
- """ Retrieve k/v pairs given a prefix and filter using {inc,exc}_list """
- inc_list = inc_list if inc_list else []
- exc_list = exc_list if exc_list else []
- peerdb_settings = peer_retrieve('-', relation_name=relation_name)
- matched = {}
- if peerdb_settings is None:
- return matched
- for k, v in peerdb_settings.items():
- full_prefix = prefix + delimiter
- if k.startswith(full_prefix):
- new_key = k.replace(full_prefix, '')
- if new_key in exc_list:
- continue
- if new_key in inc_list or len(inc_list) == 0:
- matched[new_key] = v
- return matched
-
-
-def peer_store(key, value, relation_name='cluster'):
- """Store the key/value pair on the named peer relation `relation_name`."""
- cluster_rels = relation_ids(relation_name)
- if len(cluster_rels) > 0:
- cluster_rid = cluster_rels[0]
- relation_set(relation_id=cluster_rid,
- relation_settings={key: value})
- else:
- raise ValueError('Unable to detect '
- 'peer relation {}'.format(relation_name))
-
-
-def peer_echo(includes=None, force=False):
- """Echo filtered attributes back onto the same relation for storage.
-
- This is a requirement to use the peerstorage module - it needs to be called
- from the peer relation's changed hook.
-
- If Juju leader support exists this will be a noop unless force is True.
- """
- try:
- is_leader()
- except NotImplementedError:
- pass
- else:
- if not force:
- return # NOOP if leader-election is supported
-
- # Use original non-leader calls
- relation_get = _relation_get
- relation_set = _relation_set
-
- rdata = relation_get()
- echo_data = {}
- if includes is None:
- echo_data = rdata.copy()
- for ex in ['private-address', 'public-address']:
- if ex in echo_data:
- echo_data.pop(ex)
- else:
- for attribute, value in six.iteritems(rdata):
- for include in includes:
- if include in attribute:
- echo_data[attribute] = value
- if len(echo_data) > 0:
- relation_set(relation_settings=echo_data)
-
-
-def peer_store_and_set(relation_id=None, peer_relation_name='cluster',
- peer_store_fatal=False, relation_settings=None,
- delimiter='_', **kwargs):
- """Store passed-in arguments both in argument relation and in peer storage.
-
- It functions like doing relation_set() and peer_store() at the same time,
- with the same data.
-
- @param relation_id: the id of the relation to store the data on. Defaults
- to the current relation.
- @param peer_store_fatal: Set to True, the function will raise an exception
- should the peer sotrage not be avialable."""
-
- relation_settings = relation_settings if relation_settings else {}
- relation_set(relation_id=relation_id,
- relation_settings=relation_settings,
- **kwargs)
- if is_relation_made(peer_relation_name):
- for key, value in six.iteritems(dict(list(kwargs.items()) +
- list(relation_settings.items()))):
- key_prefix = relation_id or current_relation_id()
- peer_store(key_prefix + delimiter + key,
- value,
- relation_name=peer_relation_name)
- else:
- if peer_store_fatal:
- raise ValueError('Unable to detect '
- 'peer relation {}'.format(peer_relation_name))
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/python/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/python/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/python/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/python/packages.py b/charms/trusty/ceilometer/charmhelpers/contrib/python/packages.py
deleted file mode 100644
index a2411c3..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/python/packages.py
+++ /dev/null
@@ -1,145 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import subprocess
-import sys
-
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import charm_dir, log
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-
-def pip_execute(*args, **kwargs):
- """Overriden pip_execute() to stop sys.path being changed.
-
- The act of importing main from the pip module seems to cause add wheels
- from the /usr/share/python-wheels which are installed by various tools.
- This function ensures that sys.path remains the same after the call is
- executed.
- """
- try:
- _path = sys.path
- try:
- from pip import main as _pip_execute
- except ImportError:
- apt_update()
- apt_install('python-pip')
- from pip import main as _pip_execute
- _pip_execute(*args, **kwargs)
- finally:
- sys.path = _path
-
-
-def parse_options(given, available):
- """Given a set of options, check if available"""
- for key, value in sorted(given.items()):
- if not value:
- continue
- if key in available:
- yield "--{0}={1}".format(key, value)
-
-
-def pip_install_requirements(requirements, constraints=None, **options):
- """Install a requirements file.
-
- :param constraints: Path to pip constraints file.
- http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
- """
- command = ["install"]
-
- available_options = ('proxy', 'src', 'log', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- command.append("-r {0}".format(requirements))
- if constraints:
- command.append("-c {0}".format(constraints))
- log("Installing from file: {} with constraints {} "
- "and options: {}".format(requirements, constraints, command))
- else:
- log("Installing from file: {} with options: {}".format(requirements,
- command))
- pip_execute(command)
-
-
-def pip_install(package, fatal=False, upgrade=False, venv=None, **options):
- """Install a python package"""
- if venv:
- venv_python = os.path.join(venv, 'bin/pip')
- command = [venv_python, "install"]
- else:
- command = ["install"]
-
- available_options = ('proxy', 'src', 'log', 'index-url', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- if upgrade:
- command.append('--upgrade')
-
- if isinstance(package, list):
- command.extend(package)
- else:
- command.append(package)
-
- log("Installing {} package with options: {}".format(package,
- command))
- if venv:
- subprocess.check_call(command)
- else:
- pip_execute(command)
-
-
-def pip_uninstall(package, **options):
- """Uninstall a python package"""
- command = ["uninstall", "-q", "-y"]
-
- available_options = ('proxy', 'log', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- if isinstance(package, list):
- command.extend(package)
- else:
- command.append(package)
-
- log("Uninstalling {} package with options: {}".format(package,
- command))
- pip_execute(command)
-
-
-def pip_list():
- """Returns the list of current python installed packages
- """
- return pip_execute(["list"])
-
-
-def pip_create_virtualenv(path=None):
- """Create an isolated Python environment."""
- apt_install('python-virtualenv')
-
- if path:
- venv_path = path
- else:
- venv_path = os.path.join(charm_dir(), 'venv')
-
- if not os.path.exists(venv_path):
- subprocess.check_call(['virtualenv', venv_path])
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/storage/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/storage/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/storage/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/ceph.py b/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/ceph.py
deleted file mode 100644
index d008081..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/ceph.py
+++ /dev/null
@@ -1,1206 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Copyright 2012 Canonical Ltd.
-#
-# This file is sourced from lp:openstack-charm-helpers
-#
-# Authors:
-# James Page <james.page@ubuntu.com>
-# Adam Gandelman <adamg@ubuntu.com>
-#
-import bisect
-import errno
-import hashlib
-import six
-
-import os
-import shutil
-import json
-import time
-import uuid
-
-from subprocess import (
- check_call,
- check_output,
- CalledProcessError,
-)
-from charmhelpers.core.hookenv import (
- local_unit,
- relation_get,
- relation_ids,
- relation_set,
- related_units,
- log,
- DEBUG,
- INFO,
- WARNING,
- ERROR,
-)
-from charmhelpers.core.host import (
- mount,
- mounts,
- service_start,
- service_stop,
- service_running,
- umount,
-)
-from charmhelpers.fetch import (
- apt_install,
-)
-
-from charmhelpers.core.kernel import modprobe
-
-KEYRING = '/etc/ceph/ceph.client.{}.keyring'
-KEYFILE = '/etc/ceph/ceph.client.{}.key'
-
-CEPH_CONF = """[global]
-auth supported = {auth}
-keyring = {keyring}
-mon host = {mon_hosts}
-log to syslog = {use_syslog}
-err to syslog = {use_syslog}
-clog to syslog = {use_syslog}
-"""
-# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs)
-powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608]
-
-
-def validator(value, valid_type, valid_range=None):
- """
- Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
- Example input:
- validator(value=1,
- valid_type=int,
- valid_range=[0, 2])
- This says I'm testing value=1. It must be an int inclusive in [0,2]
-
- :param value: The value to validate
- :param valid_type: The type that value should be.
- :param valid_range: A range of values that value can assume.
- :return:
- """
- assert isinstance(value, valid_type), "{} is not a {}".format(
- value,
- valid_type)
- if valid_range is not None:
- assert isinstance(valid_range, list), \
- "valid_range must be a list, was given {}".format(valid_range)
- # If we're dealing with strings
- if valid_type is six.string_types:
- assert value in valid_range, \
- "{} is not in the list {}".format(value, valid_range)
- # Integer, float should have a min and max
- else:
- if len(valid_range) != 2:
- raise ValueError(
- "Invalid valid_range list of {} for {}. "
- "List must be [min,max]".format(valid_range, value))
- assert value >= valid_range[0], \
- "{} is less than minimum allowed value of {}".format(
- value, valid_range[0])
- assert value <= valid_range[1], \
- "{} is greater than maximum allowed value of {}".format(
- value, valid_range[1])
-
-
-class PoolCreationError(Exception):
- """
- A custom error to inform the caller that a pool creation failed. Provides an error message
- """
-
- def __init__(self, message):
- super(PoolCreationError, self).__init__(message)
-
-
-class Pool(object):
- """
- An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool.
- Do not call create() on this base class as it will not do anything. Instantiate a child class and call create().
- """
-
- def __init__(self, service, name):
- self.service = service
- self.name = name
-
- # Create the pool if it doesn't exist already
- # To be implemented by subclasses
- def create(self):
- pass
-
- def add_cache_tier(self, cache_pool, mode):
- """
- Adds a new cache tier to an existing pool.
- :param cache_pool: six.string_types. The cache tier pool name to add.
- :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"]
- :return: None
- """
- # Check the input types and values
- validator(value=cache_pool, valid_type=six.string_types)
- validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"])
-
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool])
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode])
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool])
- check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom'])
-
- def remove_cache_tier(self, cache_pool):
- """
- Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete.
- :param cache_pool: six.string_types. The cache tier pool name to remove.
- :return: None
- """
- # read-only is easy, writeback is much harder
- mode = get_cache_mode(self.service, cache_pool)
- version = ceph_version()
- if mode == 'readonly':
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
-
- elif mode == 'writeback':
- pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
- 'cache-mode', cache_pool, 'forward']
- if version >= '10.1':
- # Jewel added a mandatory flag
- pool_forward_cmd.append('--yes-i-really-mean-it')
-
- check_call(pool_forward_cmd)
- # Flush the cache and wait for it to return
- check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
-
- def get_pgs(self, pool_size):
- """
- :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for
- erasure coded pools
- :return: int. The number of pgs to use.
- """
- validator(value=pool_size, valid_type=int)
- osd_list = get_osds(self.service)
- if not osd_list:
- # NOTE(james-page): Default to 200 for older ceph versions
- # which don't support OSD query from cli
- return 200
-
- osd_list_length = len(osd_list)
- # Calculate based on Ceph best practices
- if osd_list_length < 5:
- return 128
- elif 5 < osd_list_length < 10:
- return 512
- elif 10 < osd_list_length < 50:
- return 4096
- else:
- estimate = (osd_list_length * 100) / pool_size
- # Return the next nearest power of 2
- index = bisect.bisect_right(powers_of_two, estimate)
- return powers_of_two[index]
-
-
-class ReplicatedPool(Pool):
- def __init__(self, service, name, pg_num=None, replicas=2):
- super(ReplicatedPool, self).__init__(service=service, name=name)
- self.replicas = replicas
- if pg_num is None:
- self.pg_num = self.get_pgs(self.replicas)
- else:
- self.pg_num = pg_num
-
- def create(self):
- if not pool_exists(self.service, self.name):
- # Create it
- cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
- self.name, str(self.pg_num)]
- try:
- check_call(cmd)
- # Set the pool replica size
- update_pool(client=self.service,
- pool=self.name,
- settings={'size': str(self.replicas)})
- except CalledProcessError:
- raise
-
-
-# Default jerasure erasure coded pool
-class ErasurePool(Pool):
- def __init__(self, service, name, erasure_code_profile="default"):
- super(ErasurePool, self).__init__(service=service, name=name)
- self.erasure_code_profile = erasure_code_profile
-
- def create(self):
- if not pool_exists(self.service, self.name):
- # Try to find the erasure profile information so we can properly size the pgs
- erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile)
-
- # Check for errors
- if erasure_profile is None:
- log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile),
- level=ERROR)
- raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile))
- if 'k' not in erasure_profile or 'm' not in erasure_profile:
- # Error
- log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile),
- level=ERROR)
- raise PoolCreationError(
- message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile))
-
- pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m']))
- # Create it
- cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs),
- 'erasure', self.erasure_code_profile]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
- """Get an existing erasure code profile if it already exists.
- Returns json formatted output"""
-
-
-def get_mon_map(service):
- """
- Returns the current monitor map.
- :param service: six.string_types. The Ceph user name to run the command under
- :return: json string. :raise: ValueError if the monmap fails to parse.
- Also raises CalledProcessError if our ceph command fails
- """
- try:
- mon_status = check_output(
- ['ceph', '--id', service,
- 'mon_status', '--format=json'])
- try:
- return json.loads(mon_status)
- except ValueError as v:
- log("Unable to parse mon_status json: {}. Error: {}".format(
- mon_status, v.message))
- raise
- except CalledProcessError as e:
- log("mon_status command failed with message: {}".format(
- e.message))
- raise
-
-
-def hash_monitor_names(service):
- """
- Uses the get_mon_map() function to get information about the monitor
- cluster.
- Hash the name of each monitor. Return a sorted list of monitor hashes
- in an ascending order.
- :param service: six.string_types. The Ceph user name to run the command under
- :rtype : dict. json dict of monitor name, ip address and rank
- example: {
- 'name': 'ip-172-31-13-165',
- 'rank': 0,
- 'addr': '172.31.13.165:6789/0'}
- """
- try:
- hash_list = []
- monitor_list = get_mon_map(service=service)
- if monitor_list['monmap']['mons']:
- for mon in monitor_list['monmap']['mons']:
- hash_list.append(
- hashlib.sha224(mon['name'].encode('utf-8')).hexdigest())
- return sorted(hash_list)
- else:
- return None
- except (ValueError, CalledProcessError):
- raise
-
-
-def monitor_key_delete(service, key):
- """
- Delete a key and value pair from the monitor cluster
- :param service: six.string_types. The Ceph user name to run the command under
- Deletes a key value pair on the monitor cluster.
- :param key: six.string_types. The key to delete.
- """
- try:
- check_output(
- ['ceph', '--id', service,
- 'config-key', 'del', str(key)])
- except CalledProcessError as e:
- log("Monitor config-key put failed with message: {}".format(
- e.output))
- raise
-
-
-def monitor_key_set(service, key, value):
- """
- Sets a key value pair on the monitor cluster.
- :param service: six.string_types. The Ceph user name to run the command under
- :param key: six.string_types. The key to set.
- :param value: The value to set. This will be converted to a string
- before setting
- """
- try:
- check_output(
- ['ceph', '--id', service,
- 'config-key', 'put', str(key), str(value)])
- except CalledProcessError as e:
- log("Monitor config-key put failed with message: {}".format(
- e.output))
- raise
-
-
-def monitor_key_get(service, key):
- """
- Gets the value of an existing key in the monitor cluster.
- :param service: six.string_types. The Ceph user name to run the command under
- :param key: six.string_types. The key to search for.
- :return: Returns the value of that key or None if not found.
- """
- try:
- output = check_output(
- ['ceph', '--id', service,
- 'config-key', 'get', str(key)])
- return output
- except CalledProcessError as e:
- log("Monitor config-key get failed with message: {}".format(
- e.output))
- return None
-
-
-def monitor_key_exists(service, key):
- """
- Searches for the existence of a key in the monitor cluster.
- :param service: six.string_types. The Ceph user name to run the command under
- :param key: six.string_types. The key to search for
- :return: Returns True if the key exists, False if not and raises an
- exception if an unknown error occurs. :raise: CalledProcessError if
- an unknown error occurs
- """
- try:
- check_call(
- ['ceph', '--id', service,
- 'config-key', 'exists', str(key)])
- # I can return true here regardless because Ceph returns
- # ENOENT if the key wasn't found
- return True
- except CalledProcessError as e:
- if e.returncode == errno.ENOENT:
- return False
- else:
- log("Unknown error from ceph config-get exists: {} {}".format(
- e.returncode, e.output))
- raise
-
-
-def get_erasure_profile(service, name):
- """
- :param service: six.string_types. The Ceph user name to run the command under
- :param name:
- :return:
- """
- try:
- out = check_output(['ceph', '--id', service,
- 'osd', 'erasure-code-profile', 'get',
- name, '--format=json'])
- return json.loads(out)
- except (CalledProcessError, OSError, ValueError):
- return None
-
-
-def pool_set(service, pool_name, key, value):
- """
- Sets a value for a RADOS pool in ceph.
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :param key: six.string_types
- :param value:
- :return: None. Can raise CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def snapshot_pool(service, pool_name, snapshot_name):
- """
- Snapshots a RADOS pool in ceph.
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :param snapshot_name: six.string_types
- :return: None. Can raise CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def remove_pool_snapshot(service, pool_name, snapshot_name):
- """
- Remove a snapshot from a RADOS pool in ceph.
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :param snapshot_name: six.string_types
- :return: None. Can raise CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-# max_bytes should be an int or long
-def set_pool_quota(service, pool_name, max_bytes):
- """
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :param max_bytes: int or long
- :return: None. Can raise CalledProcessError
- """
- # Set a byte quota on a RADOS pool in ceph.
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
- 'max_bytes', str(max_bytes)]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def remove_pool_quota(service, pool_name):
- """
- Set a byte quota on a RADOS pool in ceph.
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :return: None. Can raise CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0']
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def remove_erasure_profile(service, profile_name):
- """
- Create a new erasure code profile if one does not already exist for it. Updates
- the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
- for more details
- :param service: six.string_types. The Ceph user name to run the command under
- :param profile_name: six.string_types
- :return: None. Can raise CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm',
- profile_name]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
- failure_domain='host',
- data_chunks=2, coding_chunks=1,
- locality=None, durability_estimator=None):
- """
- Create a new erasure code profile if one does not already exist for it. Updates
- the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
- for more details
- :param service: six.string_types. The Ceph user name to run the command under
- :param profile_name: six.string_types
- :param erasure_plugin_name: six.string_types
- :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region',
- 'room', 'root', 'row'])
- :param data_chunks: int
- :param coding_chunks: int
- :param locality: int
- :param durability_estimator: int
- :return: None. Can raise CalledProcessError
- """
- # Ensure this failure_domain is allowed by Ceph
- validator(failure_domain, six.string_types,
- ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
-
- cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
- 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
- 'ruleset_failure_domain=' + failure_domain]
- if locality is not None and durability_estimator is not None:
- raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
-
- # Add plugin specific information
- if locality is not None:
- # For local erasure codes
- cmd.append('l=' + str(locality))
- if durability_estimator is not None:
- # For Shec erasure codes
- cmd.append('c=' + str(durability_estimator))
-
- if erasure_profile_exists(service, profile_name):
- cmd.append('--force')
-
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def rename_pool(service, old_name, new_name):
- """
- Rename a Ceph pool from old_name to new_name
- :param service: six.string_types. The Ceph user name to run the command under
- :param old_name: six.string_types
- :param new_name: six.string_types
- :return: None
- """
- validator(value=old_name, valid_type=six.string_types)
- validator(value=new_name, valid_type=six.string_types)
-
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name]
- check_call(cmd)
-
-
-def erasure_profile_exists(service, name):
- """
- Check to see if an Erasure code profile already exists.
- :param service: six.string_types. The Ceph user name to run the command under
- :param name: six.string_types
- :return: int or None
- """
- validator(value=name, valid_type=six.string_types)
- try:
- check_call(['ceph', '--id', service,
- 'osd', 'erasure-code-profile', 'get',
- name])
- return True
- except CalledProcessError:
- return False
-
-
-def get_cache_mode(service, pool_name):
- """
- Find the current caching mode of the pool_name given.
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :return: int or None
- """
- validator(value=service, valid_type=six.string_types)
- validator(value=pool_name, valid_type=six.string_types)
- out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json'])
- try:
- osd_json = json.loads(out)
- for pool in osd_json['pools']:
- if pool['pool_name'] == pool_name:
- return pool['cache_mode']
- return None
- except ValueError:
- raise
-
-
-def pool_exists(service, name):
- """Check to see if a RADOS pool already exists."""
- try:
- out = check_output(['rados', '--id', service,
- 'lspools']).decode('UTF-8')
- except CalledProcessError:
- return False
-
- return name in out.split()
-
-
-def get_osds(service):
- """Return a list of all Ceph Object Storage Daemons currently in the
- cluster.
- """
- version = ceph_version()
- if version and version >= '0.56':
- return json.loads(check_output(['ceph', '--id', service,
- 'osd', 'ls',
- '--format=json']).decode('UTF-8'))
-
- return None
-
-
-def install():
- """Basic Ceph client installation."""
- ceph_dir = "/etc/ceph"
- if not os.path.exists(ceph_dir):
- os.mkdir(ceph_dir)
-
- apt_install('ceph-common', fatal=True)
-
-
-def rbd_exists(service, pool, rbd_img):
- """Check to see if a RADOS block device exists."""
- try:
- out = check_output(['rbd', 'list', '--id',
- service, '--pool', pool]).decode('UTF-8')
- except CalledProcessError:
- return False
-
- return rbd_img in out
-
-
-def create_rbd_image(service, pool, image, sizemb):
- """Create a new RADOS block device."""
- cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
- '--pool', pool]
- check_call(cmd)
-
-
-def update_pool(client, pool, settings):
- cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
- for k, v in six.iteritems(settings):
- cmd.append(k)
- cmd.append(v)
-
- check_call(cmd)
-
-
-def create_pool(service, name, replicas=3, pg_num=None):
- """Create a new RADOS pool."""
- if pool_exists(service, name):
- log("Ceph pool {} already exists, skipping creation".format(name),
- level=WARNING)
- return
-
- if not pg_num:
- # Calculate the number of placement groups based
- # on upstream recommended best practices.
- osds = get_osds(service)
- if osds:
- pg_num = (len(osds) * 100 // replicas)
- else:
- # NOTE(james-page): Default to 200 for older ceph versions
- # which don't support OSD query from cli
- pg_num = 200
-
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)]
- check_call(cmd)
-
- update_pool(service, name, settings={'size': str(replicas)})
-
-
-def delete_pool(service, name):
- """Delete a RADOS pool from ceph."""
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
- '--yes-i-really-really-mean-it']
- check_call(cmd)
-
-
-def _keyfile_path(service):
- return KEYFILE.format(service)
-
-
-def _keyring_path(service):
- return KEYRING.format(service)
-
-
-def create_keyring(service, key):
- """Create a new Ceph keyring containing key."""
- keyring = _keyring_path(service)
- if os.path.exists(keyring):
- log('Ceph keyring exists at %s.' % keyring, level=WARNING)
- return
-
- cmd = ['ceph-authtool', keyring, '--create-keyring',
- '--name=client.{}'.format(service), '--add-key={}'.format(key)]
- check_call(cmd)
- log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
-
-
-def delete_keyring(service):
- """Delete an existing Ceph keyring."""
- keyring = _keyring_path(service)
- if not os.path.exists(keyring):
- log('Keyring does not exist at %s' % keyring, level=WARNING)
- return
-
- os.remove(keyring)
- log('Deleted ring at %s.' % keyring, level=INFO)
-
-
-def create_key_file(service, key):
- """Create a file containing key."""
- keyfile = _keyfile_path(service)
- if os.path.exists(keyfile):
- log('Keyfile exists at %s.' % keyfile, level=WARNING)
- return
-
- with open(keyfile, 'w') as fd:
- fd.write(key)
-
- log('Created new keyfile at %s.' % keyfile, level=INFO)
-
-
-def get_ceph_nodes(relation='ceph'):
- """Query named relation to determine current nodes."""
- hosts = []
- for r_id in relation_ids(relation):
- for unit in related_units(r_id):
- hosts.append(relation_get('private-address', unit=unit, rid=r_id))
-
- return hosts
-
-
-def configure(service, key, auth, use_syslog):
- """Perform basic configuration of Ceph."""
- create_keyring(service, key)
- create_key_file(service, key)
- hosts = get_ceph_nodes()
- with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
- ceph_conf.write(CEPH_CONF.format(auth=auth,
- keyring=_keyring_path(service),
- mon_hosts=",".join(map(str, hosts)),
- use_syslog=use_syslog))
- modprobe('rbd')
-
-
-def image_mapped(name):
- """Determine whether a RADOS block device is mapped locally."""
- try:
- out = check_output(['rbd', 'showmapped']).decode('UTF-8')
- except CalledProcessError:
- return False
-
- return name in out
-
-
-def map_block_storage(service, pool, image):
- """Map a RADOS block device for local use."""
- cmd = [
- 'rbd',
- 'map',
- '{}/{}'.format(pool, image),
- '--user',
- service,
- '--secret',
- _keyfile_path(service),
- ]
- check_call(cmd)
-
-
-def filesystem_mounted(fs):
- """Determine whether a filesytems is already mounted."""
- return fs in [f for f, m in mounts()]
-
-
-def make_filesystem(blk_device, fstype='ext4', timeout=10):
- """Make a new filesystem on the specified block device."""
- count = 0
- e_noent = os.errno.ENOENT
- while not os.path.exists(blk_device):
- if count >= timeout:
- log('Gave up waiting on block device %s' % blk_device,
- level=ERROR)
- raise IOError(e_noent, os.strerror(e_noent), blk_device)
-
- log('Waiting for block device %s to appear' % blk_device,
- level=DEBUG)
- count += 1
- time.sleep(1)
- else:
- log('Formatting block device %s as filesystem %s.' %
- (blk_device, fstype), level=INFO)
- check_call(['mkfs', '-t', fstype, blk_device])
-
-
-def place_data_on_block_device(blk_device, data_src_dst):
- """Migrate data in data_src_dst to blk_device and then remount."""
- # mount block device into /mnt
- mount(blk_device, '/mnt')
- # copy data to /mnt
- copy_files(data_src_dst, '/mnt')
- # umount block device
- umount('/mnt')
- # Grab user/group ID's from original source
- _dir = os.stat(data_src_dst)
- uid = _dir.st_uid
- gid = _dir.st_gid
- # re-mount where the data should originally be
- # TODO: persist is currently a NO-OP in core.host
- mount(blk_device, data_src_dst, persist=True)
- # ensure original ownership of new mount.
- os.chown(data_src_dst, uid, gid)
-
-
-def copy_files(src, dst, symlinks=False, ignore=None):
- """Copy files from src to dst."""
- for item in os.listdir(src):
- s = os.path.join(src, item)
- d = os.path.join(dst, item)
- if os.path.isdir(s):
- shutil.copytree(s, d, symlinks, ignore)
- else:
- shutil.copy2(s, d)
-
-
-def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
- blk_device, fstype, system_services=[],
- replicas=3):
- """NOTE: This function must only be called from a single service unit for
- the same rbd_img otherwise data loss will occur.
-
- Ensures given pool and RBD image exists, is mapped to a block device,
- and the device is formatted and mounted at the given mount_point.
-
- If formatting a device for the first time, data existing at mount_point
- will be migrated to the RBD device before being re-mounted.
-
- All services listed in system_services will be stopped prior to data
- migration and restarted when complete.
- """
- # Ensure pool, RBD image, RBD mappings are in place.
- if not pool_exists(service, pool):
- log('Creating new pool {}.'.format(pool), level=INFO)
- create_pool(service, pool, replicas=replicas)
-
- if not rbd_exists(service, pool, rbd_img):
- log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
- create_rbd_image(service, pool, rbd_img, sizemb)
-
- if not image_mapped(rbd_img):
- log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
- level=INFO)
- map_block_storage(service, pool, rbd_img)
-
- # make file system
- # TODO: What happens if for whatever reason this is run again and
- # the data is already in the rbd device and/or is mounted??
- # When it is mounted already, it will fail to make the fs
- # XXX: This is really sketchy! Need to at least add an fstab entry
- # otherwise this hook will blow away existing data if its executed
- # after a reboot.
- if not filesystem_mounted(mount_point):
- make_filesystem(blk_device, fstype)
-
- for svc in system_services:
- if service_running(svc):
- log('Stopping services {} prior to migrating data.'
- .format(svc), level=DEBUG)
- service_stop(svc)
-
- place_data_on_block_device(blk_device, mount_point)
-
- for svc in system_services:
- log('Starting service {} after migrating data.'
- .format(svc), level=DEBUG)
- service_start(svc)
-
-
-def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'):
- """Ensures a ceph keyring is created for a named service and optionally
- ensures user and group ownership.
-
- Returns False if no ceph key is available in relation state.
- """
- key = None
- for rid in relation_ids(relation):
- for unit in related_units(rid):
- key = relation_get('key', rid=rid, unit=unit)
- if key:
- break
-
- if not key:
- return False
-
- create_keyring(service=service, key=key)
- keyring = _keyring_path(service)
- if user and group:
- check_call(['chown', '%s.%s' % (user, group), keyring])
-
- return True
-
-
-def ceph_version():
- """Retrieve the local version of ceph."""
- if os.path.exists('/usr/bin/ceph'):
- cmd = ['ceph', '-v']
- output = check_output(cmd).decode('US-ASCII')
- output = output.split()
- if len(output) > 3:
- return output[2]
- else:
- return None
- else:
- return None
-
-
-class CephBrokerRq(object):
- """Ceph broker request.
-
- Multiple operations can be added to a request and sent to the Ceph broker
- to be executed.
-
- Request is json-encoded for sending over the wire.
-
- The API is versioned and defaults to version 1.
- """
-
- def __init__(self, api_version=1, request_id=None):
- self.api_version = api_version
- if request_id:
- self.request_id = request_id
- else:
- self.request_id = str(uuid.uuid1())
- self.ops = []
-
- def add_op_create_pool(self, name, replica_count=3, pg_num=None):
- """Adds an operation to create a pool.
-
- @param pg_num setting: optional setting. If not provided, this value
- will be calculated by the broker based on how many OSDs are in the
- cluster at the time of creation. Note that, if provided, this value
- will be capped at the current available maximum.
- """
- self.ops.append({'op': 'create-pool', 'name': name,
- 'replicas': replica_count, 'pg_num': pg_num})
-
- def set_ops(self, ops):
- """Set request ops to provided value.
-
- Useful for injecting ops that come from a previous request
- to allow comparisons to ensure validity.
- """
- self.ops = ops
-
- @property
- def request(self):
- return json.dumps({'api-version': self.api_version, 'ops': self.ops,
- 'request-id': self.request_id})
-
- def _ops_equal(self, other):
- if len(self.ops) == len(other.ops):
- for req_no in range(0, len(self.ops)):
- for key in ['replicas', 'name', 'op', 'pg_num']:
- if self.ops[req_no].get(key) != other.ops[req_no].get(key):
- return False
- else:
- return False
- return True
-
- def __eq__(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.api_version == other.api_version and \
- self._ops_equal(other):
- return True
- else:
- return False
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
-
-class CephBrokerRsp(object):
- """Ceph broker response.
-
- Response is json-decoded and contents provided as methods/properties.
-
- The API is versioned and defaults to version 1.
- """
-
- def __init__(self, encoded_rsp):
- self.api_version = None
- self.rsp = json.loads(encoded_rsp)
-
- @property
- def request_id(self):
- return self.rsp.get('request-id')
-
- @property
- def exit_code(self):
- return self.rsp.get('exit-code')
-
- @property
- def exit_msg(self):
- return self.rsp.get('stderr')
-
-
-# Ceph Broker Conversation:
-# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
-# and send that request to ceph via the ceph relation. The CephBrokerRq has a
-# unique id so that the client can identity which CephBrokerRsp is associated
-# with the request. Ceph will also respond to each client unit individually
-# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
-# via key broker-rsp-glance-0
-#
-# To use this the charm can just do something like:
-#
-# from charmhelpers.contrib.storage.linux.ceph import (
-# send_request_if_needed,
-# is_request_complete,
-# CephBrokerRq,
-# )
-#
-# @hooks.hook('ceph-relation-changed')
-# def ceph_changed():
-# rq = CephBrokerRq()
-# rq.add_op_create_pool(name='poolname', replica_count=3)
-#
-# if is_request_complete(rq):
-# <Request complete actions>
-# else:
-# send_request_if_needed(get_ceph_request())
-#
-# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
-# of glance having sent a request to ceph which ceph has successfully processed
-# 'ceph:8': {
-# 'ceph/0': {
-# 'auth': 'cephx',
-# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
-# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
-# 'ceph-public-address': '10.5.44.103',
-# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
-# 'private-address': '10.5.44.103',
-# },
-# 'glance/0': {
-# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
-# '"ops": [{"replicas": 3, "name": "glance", '
-# '"op": "create-pool"}]}'),
-# 'private-address': '10.5.44.109',
-# },
-# }
-
-def get_previous_request(rid):
- """Return the last ceph broker request sent on a given relation
-
- @param rid: Relation id to query for request
- """
- request = None
- broker_req = relation_get(attribute='broker_req', rid=rid,
- unit=local_unit())
- if broker_req:
- request_data = json.loads(broker_req)
- request = CephBrokerRq(api_version=request_data['api-version'],
- request_id=request_data['request-id'])
- request.set_ops(request_data['ops'])
-
- return request
-
-
-def get_request_states(request, relation='ceph'):
- """Return a dict of requests per relation id with their corresponding
- completion state.
-
- This allows a charm, which has a request for ceph, to see whether there is
- an equivalent request already being processed and if so what state that
- request is in.
-
- @param request: A CephBrokerRq object
- """
- complete = []
- requests = {}
- for rid in relation_ids(relation):
- complete = False
- previous_request = get_previous_request(rid)
- if request == previous_request:
- sent = True
- complete = is_request_complete_for_rid(previous_request, rid)
- else:
- sent = False
- complete = False
-
- requests[rid] = {
- 'sent': sent,
- 'complete': complete,
- }
-
- return requests
-
-
-def is_request_sent(request, relation='ceph'):
- """Check to see if a functionally equivalent request has already been sent
-
- Returns True if a similair request has been sent
-
- @param request: A CephBrokerRq object
- """
- states = get_request_states(request, relation=relation)
- for rid in states.keys():
- if not states[rid]['sent']:
- return False
-
- return True
-
-
-def is_request_complete(request, relation='ceph'):
- """Check to see if a functionally equivalent request has already been
- completed
-
- Returns True if a similair request has been completed
-
- @param request: A CephBrokerRq object
- """
- states = get_request_states(request, relation=relation)
- for rid in states.keys():
- if not states[rid]['complete']:
- return False
-
- return True
-
-
-def is_request_complete_for_rid(request, rid):
- """Check if a given request has been completed on the given relation
-
- @param request: A CephBrokerRq object
- @param rid: Relation ID
- """
- broker_key = get_broker_rsp_key()
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- if rdata.get(broker_key):
- rsp = CephBrokerRsp(rdata.get(broker_key))
- if rsp.request_id == request.request_id:
- if not rsp.exit_code:
- return True
- else:
- # The remote unit sent no reply targeted at this unit so either the
- # remote ceph cluster does not support unit targeted replies or it
- # has not processed our request yet.
- if rdata.get('broker_rsp'):
- request_data = json.loads(rdata['broker_rsp'])
- if request_data.get('request-id'):
- log('Ignoring legacy broker_rsp without unit key as remote '
- 'service supports unit specific replies', level=DEBUG)
- else:
- log('Using legacy broker_rsp as remote service does not '
- 'supports unit specific replies', level=DEBUG)
- rsp = CephBrokerRsp(rdata['broker_rsp'])
- if not rsp.exit_code:
- return True
-
- return False
-
-
-def get_broker_rsp_key():
- """Return broker response key for this unit
-
- This is the key that ceph is going to use to pass request status
- information back to this unit
- """
- return 'broker-rsp-' + local_unit().replace('/', '-')
-
-
-def send_request_if_needed(request, relation='ceph'):
- """Send broker request if an equivalent request has not already been sent
-
- @param request: A CephBrokerRq object
- """
- if is_request_sent(request, relation=relation):
- log('Request already sent but not complete, not sending new request',
- level=DEBUG)
- else:
- for rid in relation_ids(relation):
- log('Sending request {}'.format(request.request_id), level=DEBUG)
- relation_set(relation_id=rid, broker_req=request.request)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/loopback.py b/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/loopback.py
deleted file mode 100644
index 3a3f514..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/loopback.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-from subprocess import (
- check_call,
- check_output,
-)
-
-import six
-
-
-##################################################
-# loopback device helpers.
-##################################################
-def loopback_devices():
- '''
- Parse through 'losetup -a' output to determine currently mapped
- loopback devices. Output is expected to look like:
-
- /dev/loop0: [0807]:961814 (/tmp/my.img)
-
- :returns: dict: a dict mapping {loopback_dev: backing_file}
- '''
- loopbacks = {}
- cmd = ['losetup', '-a']
- devs = [d.strip().split(' ') for d in
- check_output(cmd).splitlines() if d != '']
- for dev, _, f in devs:
- loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
- return loopbacks
-
-
-def create_loopback(file_path):
- '''
- Create a loopback device for a given backing file.
-
- :returns: str: Full path to new loopback device (eg, /dev/loop0)
- '''
- file_path = os.path.abspath(file_path)
- check_call(['losetup', '--find', file_path])
- for d, f in six.iteritems(loopback_devices()):
- if f == file_path:
- return d
-
-
-def ensure_loopback_device(path, size):
- '''
- Ensure a loopback device exists for a given backing file path and size.
- If it a loopback device is not mapped to file, a new one will be created.
-
- TODO: Confirm size of found loopback device.
-
- :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
- '''
- for d, f in six.iteritems(loopback_devices()):
- if f == path:
- return d
-
- if not os.path.exists(path):
- cmd = ['truncate', '--size', size, path]
- check_call(cmd)
-
- return create_loopback(path)
-
-
-def is_mapped_loopback_device(device):
- """
- Checks if a given device name is an existing/mapped loopback device.
- :param device: str: Full path to the device (eg, /dev/loop1).
- :returns: str: Path to the backing file if is a loopback device
- empty string otherwise
- """
- return loopback_devices().get(device, "")
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/lvm.py b/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/lvm.py
deleted file mode 100644
index 34b5f71..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/lvm.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from subprocess import (
- CalledProcessError,
- check_call,
- check_output,
- Popen,
- PIPE,
-)
-
-
-##################################################
-# LVM helpers.
-##################################################
-def deactivate_lvm_volume_group(block_device):
- '''
- Deactivate any volume gruop associated with an LVM physical volume.
-
- :param block_device: str: Full path to LVM physical volume
- '''
- vg = list_lvm_volume_group(block_device)
- if vg:
- cmd = ['vgchange', '-an', vg]
- check_call(cmd)
-
-
-def is_lvm_physical_volume(block_device):
- '''
- Determine whether a block device is initialized as an LVM PV.
-
- :param block_device: str: Full path of block device to inspect.
-
- :returns: boolean: True if block device is a PV, False if not.
- '''
- try:
- check_output(['pvdisplay', block_device])
- return True
- except CalledProcessError:
- return False
-
-
-def remove_lvm_physical_volume(block_device):
- '''
- Remove LVM PV signatures from a given block device.
-
- :param block_device: str: Full path of block device to scrub.
- '''
- p = Popen(['pvremove', '-ff', block_device],
- stdin=PIPE)
- p.communicate(input='y\n')
-
-
-def list_lvm_volume_group(block_device):
- '''
- List LVM volume group associated with a given block device.
-
- Assumes block device is a valid LVM PV.
-
- :param block_device: str: Full path of block device to inspect.
-
- :returns: str: Name of volume group associated with block device or None
- '''
- vg = None
- pvd = check_output(['pvdisplay', block_device]).splitlines()
- for l in pvd:
- l = l.decode('UTF-8')
- if l.strip().startswith('VG Name'):
- vg = ' '.join(l.strip().split()[2:])
- return vg
-
-
-def create_lvm_physical_volume(block_device):
- '''
- Initialize a block device as an LVM physical volume.
-
- :param block_device: str: Full path of block device to initialize.
-
- '''
- check_call(['pvcreate', block_device])
-
-
-def create_lvm_volume_group(volume_group, block_device):
- '''
- Create an LVM volume group backed by a given block device.
-
- Assumes block device has already been initialized as an LVM PV.
-
- :param volume_group: str: Name of volume group to create.
- :block_device: str: Full path of PV-initialized block device.
- '''
- check_call(['vgcreate', volume_group, block_device])
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/utils.py b/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/utils.py
deleted file mode 100644
index 4e35c29..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/utils.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-from stat import S_ISBLK
-
-from subprocess import (
- check_call,
- check_output,
- call
-)
-
-
-def is_block_device(path):
- '''
- Confirm device at path is a valid block device node.
-
- :returns: boolean: True if path is a block device, False if not.
- '''
- if not os.path.exists(path):
- return False
- return S_ISBLK(os.stat(path).st_mode)
-
-
-def zap_disk(block_device):
- '''
- Clear a block device of partition table. Relies on sgdisk, which is
- installed as pat of the 'gdisk' package in Ubuntu.
-
- :param block_device: str: Full path of block device to clean.
- '''
- # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
- # sometimes sgdisk exits non-zero; this is OK, dd will clean up
- call(['sgdisk', '--zap-all', '--', block_device])
- call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
- dev_end = check_output(['blockdev', '--getsz',
- block_device]).decode('UTF-8')
- gpt_end = int(dev_end.split()[0]) - 100
- check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
- 'bs=1M', 'count=1'])
- check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
- 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
-
-
-def is_device_mounted(device):
- '''Given a device path, return True if that device is mounted, and False
- if it isn't.
-
- :param device: str: Full path of the device to check.
- :returns: boolean: True if the path represents a mounted device, False if
- it doesn't.
- '''
- try:
- out = check_output(['lsblk', '-P', device]).decode('UTF-8')
- except:
- return False
- return bool(re.search(r'MOUNTPOINT=".+"', out))
diff --git a/charms/trusty/ceilometer/charmhelpers/core/__init__.py b/charms/trusty/ceilometer/charmhelpers/core/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/core/decorators.py b/charms/trusty/ceilometer/charmhelpers/core/decorators.py
deleted file mode 100644
index bb05620..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/decorators.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Copyright 2014 Canonical Ltd.
-#
-# Authors:
-# Edward Hope-Morley <opentastic@gmail.com>
-#
-
-import time
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
-)
-
-
-def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
- """If the decorated function raises exception exc_type, allow num_retries
- retry attempts before raise the exception.
- """
- def _retry_on_exception_inner_1(f):
- def _retry_on_exception_inner_2(*args, **kwargs):
- retries = num_retries
- multiplier = 1
- while True:
- try:
- return f(*args, **kwargs)
- except exc_type:
- if not retries:
- raise
-
- delay = base_delay * multiplier
- multiplier += 1
- log("Retrying '%s' %d more times (delay=%s)" %
- (f.__name__, retries, delay), level=INFO)
- retries -= 1
- if delay:
- time.sleep(delay)
-
- return _retry_on_exception_inner_2
-
- return _retry_on_exception_inner_1
diff --git a/charms/trusty/ceilometer/charmhelpers/core/files.py b/charms/trusty/ceilometer/charmhelpers/core/files.py
deleted file mode 100644
index 0f12d32..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/files.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
-
-import os
-import subprocess
-
-
-def sed(filename, before, after, flags='g'):
- """
- Search and replaces the given pattern on filename.
-
- :param filename: relative or absolute file path.
- :param before: expression to be replaced (see 'man sed')
- :param after: expression to replace with (see 'man sed')
- :param flags: sed-compatible regex flags in example, to make
- the search and replace case insensitive, specify ``flags="i"``.
- The ``g`` flag is always specified regardless, so you do not
- need to remember to include it when overriding this parameter.
- :returns: If the sed command exit code was zero then return,
- otherwise raise CalledProcessError.
- """
- expression = r's/{0}/{1}/{2}'.format(before,
- after, flags)
-
- return subprocess.check_call(["sed", "-i", "-r", "-e",
- expression,
- os.path.expanduser(filename)])
diff --git a/charms/trusty/ceilometer/charmhelpers/core/fstab.py b/charms/trusty/ceilometer/charmhelpers/core/fstab.py
deleted file mode 100644
index 3056fba..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/fstab.py
+++ /dev/null
@@ -1,134 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import io
-import os
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-class Fstab(io.FileIO):
- """This class extends file in order to implement a file reader/writer
- for file `/etc/fstab`
- """
-
- class Entry(object):
- """Entry class represents a non-comment line on the `/etc/fstab` file
- """
- def __init__(self, device, mountpoint, filesystem,
- options, d=0, p=0):
- self.device = device
- self.mountpoint = mountpoint
- self.filesystem = filesystem
-
- if not options:
- options = "defaults"
-
- self.options = options
- self.d = int(d)
- self.p = int(p)
-
- def __eq__(self, o):
- return str(self) == str(o)
-
- def __str__(self):
- return "{} {} {} {} {} {}".format(self.device,
- self.mountpoint,
- self.filesystem,
- self.options,
- self.d,
- self.p)
-
- DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
-
- def __init__(self, path=None):
- if path:
- self._path = path
- else:
- self._path = self.DEFAULT_PATH
- super(Fstab, self).__init__(self._path, 'rb+')
-
- def _hydrate_entry(self, line):
- # NOTE: use split with no arguments to split on any
- # whitespace including tabs
- return Fstab.Entry(*filter(
- lambda x: x not in ('', None),
- line.strip("\n").split()))
-
- @property
- def entries(self):
- self.seek(0)
- for line in self.readlines():
- line = line.decode('us-ascii')
- try:
- if line.strip() and not line.strip().startswith("#"):
- yield self._hydrate_entry(line)
- except ValueError:
- pass
-
- def get_entry_by_attr(self, attr, value):
- for entry in self.entries:
- e_attr = getattr(entry, attr)
- if e_attr == value:
- return entry
- return None
-
- def add_entry(self, entry):
- if self.get_entry_by_attr('device', entry.device):
- return False
-
- self.write((str(entry) + '\n').encode('us-ascii'))
- self.truncate()
- return entry
-
- def remove_entry(self, entry):
- self.seek(0)
-
- lines = [l.decode('us-ascii') for l in self.readlines()]
-
- found = False
- for index, line in enumerate(lines):
- if line.strip() and not line.strip().startswith("#"):
- if self._hydrate_entry(line) == entry:
- found = True
- break
-
- if not found:
- return False
-
- lines.remove(line)
-
- self.seek(0)
- self.write(''.join(lines).encode('us-ascii'))
- self.truncate()
- return True
-
- @classmethod
- def remove_by_mountpoint(cls, mountpoint, path=None):
- fstab = cls(path=path)
- entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
- if entry:
- return fstab.remove_entry(entry)
- return False
-
- @classmethod
- def add(cls, device, mountpoint, filesystem, options=None, path=None):
- return cls(path=path).add_entry(Fstab.Entry(device,
- mountpoint, filesystem,
- options=options))
diff --git a/charms/trusty/ceilometer/charmhelpers/core/hookenv.py b/charms/trusty/ceilometer/charmhelpers/core/hookenv.py
deleted file mode 100644
index 0132129..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/hookenv.py
+++ /dev/null
@@ -1,1009 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"Interactions with the Juju environment"
-# Copyright 2013 Canonical Ltd.
-#
-# Authors:
-# Charm Helpers Developers <juju@lists.ubuntu.com>
-
-from __future__ import print_function
-import copy
-from distutils.version import LooseVersion
-from functools import wraps
-import glob
-import os
-import json
-import yaml
-import subprocess
-import sys
-import errno
-import tempfile
-from subprocess import CalledProcessError
-
-import six
-if not six.PY3:
- from UserDict import UserDict
-else:
- from collections import UserDict
-
-CRITICAL = "CRITICAL"
-ERROR = "ERROR"
-WARNING = "WARNING"
-INFO = "INFO"
-DEBUG = "DEBUG"
-MARKER = object()
-
-cache = {}
-
-
-def cached(func):
- """Cache return values for multiple executions of func + args
-
- For example::
-
- @cached
- def unit_get(attribute):
- pass
-
- unit_get('test')
-
- will cache the result of unit_get + 'test' for future calls.
- """
- @wraps(func)
- def wrapper(*args, **kwargs):
- global cache
- key = str((func, args, kwargs))
- try:
- return cache[key]
- except KeyError:
- pass # Drop out of the exception handler scope.
- res = func(*args, **kwargs)
- cache[key] = res
- return res
- wrapper._wrapped = func
- return wrapper
-
-
-def flush(key):
- """Flushes any entries from function cache where the
- key is found in the function+args """
- flush_list = []
- for item in cache:
- if key in item:
- flush_list.append(item)
- for item in flush_list:
- del cache[item]
-
-
-def log(message, level=None):
- """Write a message to the juju log"""
- command = ['juju-log']
- if level:
- command += ['-l', level]
- if not isinstance(message, six.string_types):
- message = repr(message)
- command += [message]
- # Missing juju-log should not cause failures in unit tests
- # Send log output to stderr
- try:
- subprocess.call(command)
- except OSError as e:
- if e.errno == errno.ENOENT:
- if level:
- message = "{}: {}".format(level, message)
- message = "juju-log: {}".format(message)
- print(message, file=sys.stderr)
- else:
- raise
-
-
-class Serializable(UserDict):
- """Wrapper, an object that can be serialized to yaml or json"""
-
- def __init__(self, obj):
- # wrap the object
- UserDict.__init__(self)
- self.data = obj
-
- def __getattr__(self, attr):
- # See if this object has attribute.
- if attr in ("json", "yaml", "data"):
- return self.__dict__[attr]
- # Check for attribute in wrapped object.
- got = getattr(self.data, attr, MARKER)
- if got is not MARKER:
- return got
- # Proxy to the wrapped object via dict interface.
- try:
- return self.data[attr]
- except KeyError:
- raise AttributeError(attr)
-
- def __getstate__(self):
- # Pickle as a standard dictionary.
- return self.data
-
- def __setstate__(self, state):
- # Unpickle into our wrapper.
- self.data = state
-
- def json(self):
- """Serialize the object to json"""
- return json.dumps(self.data)
-
- def yaml(self):
- """Serialize the object to yaml"""
- return yaml.dump(self.data)
-
-
-def execution_environment():
- """A convenient bundling of the current execution context"""
- context = {}
- context['conf'] = config()
- if relation_id():
- context['reltype'] = relation_type()
- context['relid'] = relation_id()
- context['rel'] = relation_get()
- context['unit'] = local_unit()
- context['rels'] = relations()
- context['env'] = os.environ
- return context
-
-
-def in_relation_hook():
- """Determine whether we're running in a relation hook"""
- return 'JUJU_RELATION' in os.environ
-
-
-def relation_type():
- """The scope for the current relation hook"""
- return os.environ.get('JUJU_RELATION', None)
-
-
-@cached
-def relation_id(relation_name=None, service_or_unit=None):
- """The relation ID for the current or a specified relation"""
- if not relation_name and not service_or_unit:
- return os.environ.get('JUJU_RELATION_ID', None)
- elif relation_name and service_or_unit:
- service_name = service_or_unit.split('/')[0]
- for relid in relation_ids(relation_name):
- remote_service = remote_service_name(relid)
- if remote_service == service_name:
- return relid
- else:
- raise ValueError('Must specify neither or both of relation_name and service_or_unit')
-
-
-def local_unit():
- """Local unit ID"""
- return os.environ['JUJU_UNIT_NAME']
-
-
-def remote_unit():
- """The remote unit for the current relation hook"""
- return os.environ.get('JUJU_REMOTE_UNIT', None)
-
-
-def service_name():
- """The name service group this unit belongs to"""
- return local_unit().split('/')[0]
-
-
-@cached
-def remote_service_name(relid=None):
- """The remote service name for a given relation-id (or the current relation)"""
- if relid is None:
- unit = remote_unit()
- else:
- units = related_units(relid)
- unit = units[0] if units else None
- return unit.split('/')[0] if unit else None
-
-
-def hook_name():
- """The name of the currently executing hook"""
- return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
-
-
-class Config(dict):
- """A dictionary representation of the charm's config.yaml, with some
- extra features:
-
- - See which values in the dictionary have changed since the previous hook.
- - For values that have changed, see what the previous value was.
- - Store arbitrary data for use in a later hook.
-
- NOTE: Do not instantiate this object directly - instead call
- ``hookenv.config()``, which will return an instance of :class:`Config`.
-
- Example usage::
-
- >>> # inside a hook
- >>> from charmhelpers.core import hookenv
- >>> config = hookenv.config()
- >>> config['foo']
- 'bar'
- >>> # store a new key/value for later use
- >>> config['mykey'] = 'myval'
-
-
- >>> # user runs `juju set mycharm foo=baz`
- >>> # now we're inside subsequent config-changed hook
- >>> config = hookenv.config()
- >>> config['foo']
- 'baz'
- >>> # test to see if this val has changed since last hook
- >>> config.changed('foo')
- True
- >>> # what was the previous value?
- >>> config.previous('foo')
- 'bar'
- >>> # keys/values that we add are preserved across hooks
- >>> config['mykey']
- 'myval'
-
- """
- CONFIG_FILE_NAME = '.juju-persistent-config'
-
- def __init__(self, *args, **kw):
- super(Config, self).__init__(*args, **kw)
- self.implicit_save = True
- self._prev_dict = None
- self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
- if os.path.exists(self.path):
- self.load_previous()
- atexit(self._implicit_save)
-
- def load_previous(self, path=None):
- """Load previous copy of config from disk.
-
- In normal usage you don't need to call this method directly - it
- is called automatically at object initialization.
-
- :param path:
-
- File path from which to load the previous config. If `None`,
- config is loaded from the default location. If `path` is
- specified, subsequent `save()` calls will write to the same
- path.
-
- """
- self.path = path or self.path
- with open(self.path) as f:
- self._prev_dict = json.load(f)
- for k, v in copy.deepcopy(self._prev_dict).items():
- if k not in self:
- self[k] = v
-
- def changed(self, key):
- """Return True if the current value for this key is different from
- the previous value.
-
- """
- if self._prev_dict is None:
- return True
- return self.previous(key) != self.get(key)
-
- def previous(self, key):
- """Return previous value for this key, or None if there
- is no previous value.
-
- """
- if self._prev_dict:
- return self._prev_dict.get(key)
- return None
-
- def save(self):
- """Save this config to disk.
-
- If the charm is using the :mod:`Services Framework <services.base>`
- or :meth:'@hook <Hooks.hook>' decorator, this
- is called automatically at the end of successful hook execution.
- Otherwise, it should be called directly by user code.
-
- To disable automatic saves, set ``implicit_save=False`` on this
- instance.
-
- """
- with open(self.path, 'w') as f:
- json.dump(self, f)
-
- def _implicit_save(self):
- if self.implicit_save:
- self.save()
-
-
-@cached
-def config(scope=None):
- """Juju charm configuration"""
- config_cmd_line = ['config-get']
- if scope is not None:
- config_cmd_line.append(scope)
- config_cmd_line.append('--format=json')
- try:
- config_data = json.loads(
- subprocess.check_output(config_cmd_line).decode('UTF-8'))
- if scope is not None:
- return config_data
- return Config(config_data)
- except ValueError:
- return None
-
-
-@cached
-def relation_get(attribute=None, unit=None, rid=None):
- """Get relation information"""
- _args = ['relation-get', '--format=json']
- if rid:
- _args.append('-r')
- _args.append(rid)
- _args.append(attribute or '-')
- if unit:
- _args.append(unit)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except CalledProcessError as e:
- if e.returncode == 2:
- return None
- raise
-
-
-def relation_set(relation_id=None, relation_settings=None, **kwargs):
- """Set relation information for the current unit"""
- relation_settings = relation_settings if relation_settings else {}
- relation_cmd_line = ['relation-set']
- accepts_file = "--file" in subprocess.check_output(
- relation_cmd_line + ["--help"], universal_newlines=True)
- if relation_id is not None:
- relation_cmd_line.extend(('-r', relation_id))
- settings = relation_settings.copy()
- settings.update(kwargs)
- for key, value in settings.items():
- # Force value to be a string: it always should, but some call
- # sites pass in things like dicts or numbers.
- if value is not None:
- settings[key] = "{}".format(value)
- if accepts_file:
- # --file was introduced in Juju 1.23.2. Use it by default if
- # available, since otherwise we'll break if the relation data is
- # too big. Ideally we should tell relation-set to read the data from
- # stdin, but that feature is broken in 1.23.2: Bug #1454678.
- with tempfile.NamedTemporaryFile(delete=False) as settings_file:
- settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
- subprocess.check_call(
- relation_cmd_line + ["--file", settings_file.name])
- os.remove(settings_file.name)
- else:
- for key, value in settings.items():
- if value is None:
- relation_cmd_line.append('{}='.format(key))
- else:
- relation_cmd_line.append('{}={}'.format(key, value))
- subprocess.check_call(relation_cmd_line)
- # Flush cache of any relation-gets for local unit
- flush(local_unit())
-
-
-def relation_clear(r_id=None):
- ''' Clears any relation data already set on relation r_id '''
- settings = relation_get(rid=r_id,
- unit=local_unit())
- for setting in settings:
- if setting not in ['public-address', 'private-address']:
- settings[setting] = None
- relation_set(relation_id=r_id,
- **settings)
-
-
-@cached
-def relation_ids(reltype=None):
- """A list of relation_ids"""
- reltype = reltype or relation_type()
- relid_cmd_line = ['relation-ids', '--format=json']
- if reltype is not None:
- relid_cmd_line.append(reltype)
- return json.loads(
- subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
- return []
-
-
-@cached
-def related_units(relid=None):
- """A list of related units"""
- relid = relid or relation_id()
- units_cmd_line = ['relation-list', '--format=json']
- if relid is not None:
- units_cmd_line.extend(('-r', relid))
- return json.loads(
- subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
-
-
-@cached
-def relation_for_unit(unit=None, rid=None):
- """Get the json represenation of a unit's relation"""
- unit = unit or remote_unit()
- relation = relation_get(unit=unit, rid=rid)
- for key in relation:
- if key.endswith('-list'):
- relation[key] = relation[key].split()
- relation['__unit__'] = unit
- return relation
-
-
-@cached
-def relations_for_id(relid=None):
- """Get relations of a specific relation ID"""
- relation_data = []
- relid = relid or relation_ids()
- for unit in related_units(relid):
- unit_data = relation_for_unit(unit, relid)
- unit_data['__relid__'] = relid
- relation_data.append(unit_data)
- return relation_data
-
-
-@cached
-def relations_of_type(reltype=None):
- """Get relations of a specific type"""
- relation_data = []
- reltype = reltype or relation_type()
- for relid in relation_ids(reltype):
- for relation in relations_for_id(relid):
- relation['__relid__'] = relid
- relation_data.append(relation)
- return relation_data
-
-
-@cached
-def metadata():
- """Get the current charm metadata.yaml contents as a python object"""
- with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
- return yaml.safe_load(md)
-
-
-@cached
-def relation_types():
- """Get a list of relation types supported by this charm"""
- rel_types = []
- md = metadata()
- for key in ('provides', 'requires', 'peers'):
- section = md.get(key)
- if section:
- rel_types.extend(section.keys())
- return rel_types
-
-
-@cached
-def peer_relation_id():
- '''Get the peers relation id if a peers relation has been joined, else None.'''
- md = metadata()
- section = md.get('peers')
- if section:
- for key in section:
- relids = relation_ids(key)
- if relids:
- return relids[0]
- return None
-
-
-@cached
-def relation_to_interface(relation_name):
- """
- Given the name of a relation, return the interface that relation uses.
-
- :returns: The interface name, or ``None``.
- """
- return relation_to_role_and_interface(relation_name)[1]
-
-
-@cached
-def relation_to_role_and_interface(relation_name):
- """
- Given the name of a relation, return the role and the name of the interface
- that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
-
- :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
- """
- _metadata = metadata()
- for role in ('provides', 'requires', 'peers'):
- interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
- if interface:
- return role, interface
- return None, None
-
-
-@cached
-def role_and_interface_to_relations(role, interface_name):
- """
- Given a role and interface name, return a list of relation names for the
- current charm that use that interface under that role (where role is one
- of ``provides``, ``requires``, or ``peers``).
-
- :returns: A list of relation names.
- """
- _metadata = metadata()
- results = []
- for relation_name, relation in _metadata.get(role, {}).items():
- if relation['interface'] == interface_name:
- results.append(relation_name)
- return results
-
-
-@cached
-def interface_to_relations(interface_name):
- """
- Given an interface, return a list of relation names for the current
- charm that use that interface.
-
- :returns: A list of relation names.
- """
- results = []
- for role in ('provides', 'requires', 'peers'):
- results.extend(role_and_interface_to_relations(role, interface_name))
- return results
-
-
-@cached
-def charm_name():
- """Get the name of the current charm as is specified on metadata.yaml"""
- return metadata().get('name')
-
-
-@cached
-def relations():
- """Get a nested dictionary of relation data for all related units"""
- rels = {}
- for reltype in relation_types():
- relids = {}
- for relid in relation_ids(reltype):
- units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
- for unit in related_units(relid):
- reldata = relation_get(unit=unit, rid=relid)
- units[unit] = reldata
- relids[relid] = units
- rels[reltype] = relids
- return rels
-
-
-@cached
-def is_relation_made(relation, keys='private-address'):
- '''
- Determine whether a relation is established by checking for
- presence of key(s). If a list of keys is provided, they
- must all be present for the relation to be identified as made
- '''
- if isinstance(keys, str):
- keys = [keys]
- for r_id in relation_ids(relation):
- for unit in related_units(r_id):
- context = {}
- for k in keys:
- context[k] = relation_get(k, rid=r_id,
- unit=unit)
- if None not in context.values():
- return True
- return False
-
-
-def open_port(port, protocol="TCP"):
- """Open a service network port"""
- _args = ['open-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-def close_port(port, protocol="TCP"):
- """Close a service network port"""
- _args = ['close-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-@cached
-def unit_get(attribute):
- """Get the unit ID for the remote unit"""
- _args = ['unit-get', '--format=json', attribute]
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-def unit_public_ip():
- """Get this unit's public IP address"""
- return unit_get('public-address')
-
-
-def unit_private_ip():
- """Get this unit's private IP address"""
- return unit_get('private-address')
-
-
-@cached
-def storage_get(attribute=None, storage_id=None):
- """Get storage attributes"""
- _args = ['storage-get', '--format=json']
- if storage_id:
- _args.extend(('-s', storage_id))
- if attribute:
- _args.append(attribute)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-@cached
-def storage_list(storage_name=None):
- """List the storage IDs for the unit"""
- _args = ['storage-list', '--format=json']
- if storage_name:
- _args.append(storage_name)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except OSError as e:
- import errno
- if e.errno == errno.ENOENT:
- # storage-list does not exist
- return []
- raise
-
-
-class UnregisteredHookError(Exception):
- """Raised when an undefined hook is called"""
- pass
-
-
-class Hooks(object):
- """A convenient handler for hook functions.
-
- Example::
-
- hooks = Hooks()
-
- # register a hook, taking its name from the function name
- @hooks.hook()
- def install():
- pass # your code here
-
- # register a hook, providing a custom hook name
- @hooks.hook("config-changed")
- def config_changed():
- pass # your code here
-
- if __name__ == "__main__":
- # execute a hook based on the name the program is called by
- hooks.execute(sys.argv)
- """
-
- def __init__(self, config_save=None):
- super(Hooks, self).__init__()
- self._hooks = {}
-
- # For unknown reasons, we allow the Hooks constructor to override
- # config().implicit_save.
- if config_save is not None:
- config().implicit_save = config_save
-
- def register(self, name, function):
- """Register a hook"""
- self._hooks[name] = function
-
- def execute(self, args):
- """Execute a registered hook based on args[0]"""
- _run_atstart()
- hook_name = os.path.basename(args[0])
- if hook_name in self._hooks:
- try:
- self._hooks[hook_name]()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- _run_atexit()
- raise
- _run_atexit()
- else:
- raise UnregisteredHookError(hook_name)
-
- def hook(self, *hook_names):
- """Decorator, registering them as hooks"""
- def wrapper(decorated):
- for hook_name in hook_names:
- self.register(hook_name, decorated)
- else:
- self.register(decorated.__name__, decorated)
- if '_' in decorated.__name__:
- self.register(
- decorated.__name__.replace('_', '-'), decorated)
- return decorated
- return wrapper
-
-
-def charm_dir():
- """Return the root directory of the current charm"""
- return os.environ.get('CHARM_DIR')
-
-
-@cached
-def action_get(key=None):
- """Gets the value of an action parameter, or all key/value param pairs"""
- cmd = ['action-get']
- if key is not None:
- cmd.append(key)
- cmd.append('--format=json')
- action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
- return action_data
-
-
-def action_set(values):
- """Sets the values to be returned after the action finishes"""
- cmd = ['action-set']
- for k, v in list(values.items()):
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-def action_fail(message):
- """Sets the action status to failed and sets the error message.
-
- The results set by action_set are preserved."""
- subprocess.check_call(['action-fail', message])
-
-
-def action_name():
- """Get the name of the currently executing action."""
- return os.environ.get('JUJU_ACTION_NAME')
-
-
-def action_uuid():
- """Get the UUID of the currently executing action."""
- return os.environ.get('JUJU_ACTION_UUID')
-
-
-def action_tag():
- """Get the tag for the currently executing action."""
- return os.environ.get('JUJU_ACTION_TAG')
-
-
-def status_set(workload_state, message):
- """Set the workload state with a message
-
- Use status-set to set the workload state with a message which is visible
- to the user via juju status. If the status-set command is not found then
- assume this is juju < 1.23 and juju-log the message unstead.
-
- workload_state -- valid juju workload state.
- message -- status update message
- """
- valid_states = ['maintenance', 'blocked', 'waiting', 'active']
- if workload_state not in valid_states:
- raise ValueError(
- '{!r} is not a valid workload state'.format(workload_state)
- )
- cmd = ['status-set', workload_state, message]
- try:
- ret = subprocess.call(cmd)
- if ret == 0:
- return
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- log_message = 'status-set failed: {} {}'.format(workload_state,
- message)
- log(log_message, level='INFO')
-
-
-def status_get():
- """Retrieve the previously set juju workload state and message
-
- If the status-get command is not found then assume this is juju < 1.23 and
- return 'unknown', ""
-
- """
- cmd = ['status-get', "--format=json", "--include-data"]
- try:
- raw_status = subprocess.check_output(cmd)
- except OSError as e:
- if e.errno == errno.ENOENT:
- return ('unknown', "")
- else:
- raise
- else:
- status = json.loads(raw_status.decode("UTF-8"))
- return (status["status"], status["message"])
-
-
-def translate_exc(from_exc, to_exc):
- def inner_translate_exc1(f):
- @wraps(f)
- def inner_translate_exc2(*args, **kwargs):
- try:
- return f(*args, **kwargs)
- except from_exc:
- raise to_exc
-
- return inner_translate_exc2
-
- return inner_translate_exc1
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def is_leader():
- """Does the current unit hold the juju leadership
-
- Uses juju to determine whether the current unit is the leader of its peers
- """
- cmd = ['is-leader', '--format=json']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_get(attribute=None):
- """Juju leader get value(s)"""
- cmd = ['leader-get', '--format=json'] + [attribute or '-']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_set(settings=None, **kwargs):
- """Juju leader set value(s)"""
- # Don't log secrets.
- # log("Juju leader-set '%s'" % (settings), level=DEBUG)
- cmd = ['leader-set']
- settings = settings or {}
- settings.update(kwargs)
- for k, v in settings.items():
- if v is None:
- cmd.append('{}='.format(k))
- else:
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_register(ptype, klass, pid):
- """ is used while a hook is running to let Juju know that a
- payload has been started."""
- cmd = ['payload-register']
- for x in [ptype, klass, pid]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_unregister(klass, pid):
- """ is used while a hook is running to let Juju know
- that a payload has been manually stopped. The <class> and <id> provided
- must match a payload that has been previously registered with juju using
- payload-register."""
- cmd = ['payload-unregister']
- for x in [klass, pid]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_status_set(klass, pid, status):
- """is used to update the current status of a registered payload.
- The <class> and <id> provided must match a payload that has been previously
- registered with juju using payload-register. The <status> must be one of the
- follow: starting, started, stopping, stopped"""
- cmd = ['payload-status-set']
- for x in [klass, pid, status]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def resource_get(name):
- """used to fetch the resource path of the given name.
-
- <name> must match a name of defined resource in metadata.yaml
-
- returns either a path or False if resource not available
- """
- if not name:
- return False
-
- cmd = ['resource-get', name]
- try:
- return subprocess.check_output(cmd).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
-
-
-@cached
-def juju_version():
- """Full version string (eg. '1.23.3.1-trusty-amd64')"""
- # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
- jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
- return subprocess.check_output([jujud, 'version'],
- universal_newlines=True).strip()
-
-
-@cached
-def has_juju_version(minimum_version):
- """Return True if the Juju version is at least the provided version"""
- return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
-
-
-_atexit = []
-_atstart = []
-
-
-def atstart(callback, *args, **kwargs):
- '''Schedule a callback to run before the main hook.
-
- Callbacks are run in the order they were added.
-
- This is useful for modules and classes to perform initialization
- and inject behavior. In particular:
-
- - Run common code before all of your hooks, such as logging
- the hook name or interesting relation data.
- - Defer object or module initialization that requires a hook
- context until we know there actually is a hook context,
- making testing easier.
- - Rather than requiring charm authors to include boilerplate to
- invoke your helper's behavior, have it run automatically if
- your object is instantiated or module imported.
-
- This is not at all useful after your hook framework as been launched.
- '''
- global _atstart
- _atstart.append((callback, args, kwargs))
-
-
-def atexit(callback, *args, **kwargs):
- '''Schedule a callback to run on successful hook completion.
-
- Callbacks are run in the reverse order that they were added.'''
- _atexit.append((callback, args, kwargs))
-
-
-def _run_atstart():
- '''Hook frameworks must invoke this before running the main hook body.'''
- global _atstart
- for callback, args, kwargs in _atstart:
- callback(*args, **kwargs)
- del _atstart[:]
-
-
-def _run_atexit():
- '''Hook frameworks must invoke this after the main hook body has
- successfully completed. Do not invoke it if the hook fails.'''
- global _atexit
- for callback, args, kwargs in reversed(_atexit):
- callback(*args, **kwargs)
- del _atexit[:]
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def network_get_primary_address(binding):
- '''
- Retrieve the primary network address for a named binding
-
- :param binding: string. The name of a relation of extra-binding
- :return: string. The primary IP address for the named binding
- :raise: NotImplementedError if run on Juju < 2.0
- '''
- cmd = ['network-get', '--primary-address', binding]
- return subprocess.check_output(cmd).strip()
diff --git a/charms/trusty/ceilometer/charmhelpers/core/host.py b/charms/trusty/ceilometer/charmhelpers/core/host.py
deleted file mode 100644
index 64b2df5..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/host.py
+++ /dev/null
@@ -1,714 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""Tools for working with the host system"""
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# Nick Moffitt <nick.moffitt@canonical.com>
-# Matthew Wedgwood <matthew.wedgwood@canonical.com>
-
-import os
-import re
-import pwd
-import glob
-import grp
-import random
-import string
-import subprocess
-import hashlib
-import functools
-import itertools
-from contextlib import contextmanager
-from collections import OrderedDict
-
-import six
-
-from .hookenv import log
-from .fstab import Fstab
-
-
-def service_start(service_name):
- """Start a system service"""
- return service('start', service_name)
-
-
-def service_stop(service_name):
- """Stop a system service"""
- return service('stop', service_name)
-
-
-def service_restart(service_name):
- """Restart a system service"""
- return service('restart', service_name)
-
-
-def service_reload(service_name, restart_on_failure=False):
- """Reload a system service, optionally falling back to restart if
- reload fails"""
- service_result = service('reload', service_name)
- if not service_result and restart_on_failure:
- service_result = service('restart', service_name)
- return service_result
-
-
-def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
- """Pause a system service.
-
- Stop it, and prevent it from starting again at boot."""
- stopped = True
- if service_running(service_name):
- stopped = service_stop(service_name)
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if init_is_systemd():
- service('disable', service_name)
- elif os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- with open(override_path, 'w') as fh:
- fh.write("manual\n")
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "disable"])
- else:
- raise ValueError(
- "Unable to detect {0} as SystemD, Upstart {1} or"
- " SysV {2}".format(
- service_name, upstart_file, sysv_file))
- return stopped
-
-
-def service_resume(service_name, init_dir="/etc/init",
- initd_dir="/etc/init.d"):
- """Resume a system service.
-
- Reenable starting again at boot. Start the service"""
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if init_is_systemd():
- service('enable', service_name)
- elif os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- if os.path.exists(override_path):
- os.unlink(override_path)
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "enable"])
- else:
- raise ValueError(
- "Unable to detect {0} as SystemD, Upstart {1} or"
- " SysV {2}".format(
- service_name, upstart_file, sysv_file))
-
- started = service_running(service_name)
- if not started:
- started = service_start(service_name)
- return started
-
-
-def service(action, service_name):
- """Control a system service"""
- if init_is_systemd():
- cmd = ['systemctl', action, service_name]
- else:
- cmd = ['service', service_name, action]
- return subprocess.call(cmd) == 0
-
-
-_UPSTART_CONF = "/etc/init/{}.conf"
-_INIT_D_CONF = "/etc/init.d/{}"
-
-
-def service_running(service_name):
- """Determine whether a system service is running"""
- if init_is_systemd():
- return service('is-active', service_name)
- else:
- if os.path.exists(_UPSTART_CONF.format(service_name)):
- try:
- output = subprocess.check_output(
- ['status', service_name],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
- else:
- # This works for upstart scripts where the 'service' command
- # returns a consistent string to represent running 'start/running'
- if "start/running" in output:
- return True
- elif os.path.exists(_INIT_D_CONF.format(service_name)):
- # Check System V scripts init script return codes
- return service('status', service_name)
- return False
-
-
-def service_available(service_name):
- """Determine whether a system service is available"""
- try:
- subprocess.check_output(
- ['service', service_name, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError as e:
- return b'unrecognized service' not in e.output
- else:
- return True
-
-
-SYSTEMD_SYSTEM = '/run/systemd/system'
-
-
-def init_is_systemd():
- """Return True if the host system uses systemd, False otherwise."""
- return os.path.isdir(SYSTEMD_SYSTEM)
-
-
-def adduser(username, password=None, shell='/bin/bash', system_user=False,
- primary_group=None, secondary_groups=None):
- """Add a user to the system.
-
- Will log but otherwise succeed if the user already exists.
-
- :param str username: Username to create
- :param str password: Password for user; if ``None``, create a system user
- :param str shell: The default shell for the user
- :param bool system_user: Whether to create a login or system user
- :param str primary_group: Primary group for user; defaults to username
- :param list secondary_groups: Optional list of additional groups
-
- :returns: The password database entry struct, as returned by `pwd.getpwnam`
- """
- try:
- user_info = pwd.getpwnam(username)
- log('user {0} already exists!'.format(username))
- except KeyError:
- log('creating user {0}'.format(username))
- cmd = ['useradd']
- if system_user or password is None:
- cmd.append('--system')
- else:
- cmd.extend([
- '--create-home',
- '--shell', shell,
- '--password', password,
- ])
- if not primary_group:
- try:
- grp.getgrnam(username)
- primary_group = username # avoid "group exists" error
- except KeyError:
- pass
- if primary_group:
- cmd.extend(['-g', primary_group])
- if secondary_groups:
- cmd.extend(['-G', ','.join(secondary_groups)])
- cmd.append(username)
- subprocess.check_call(cmd)
- user_info = pwd.getpwnam(username)
- return user_info
-
-
-def user_exists(username):
- """Check if a user exists"""
- try:
- pwd.getpwnam(username)
- user_exists = True
- except KeyError:
- user_exists = False
- return user_exists
-
-
-def add_group(group_name, system_group=False):
- """Add a group to the system"""
- try:
- group_info = grp.getgrnam(group_name)
- log('group {0} already exists!'.format(group_name))
- except KeyError:
- log('creating group {0}'.format(group_name))
- cmd = ['addgroup']
- if system_group:
- cmd.append('--system')
- else:
- cmd.extend([
- '--group',
- ])
- cmd.append(group_name)
- subprocess.check_call(cmd)
- group_info = grp.getgrnam(group_name)
- return group_info
-
-
-def add_user_to_group(username, group):
- """Add a user to a group"""
- cmd = ['gpasswd', '-a', username, group]
- log("Adding user {} to group {}".format(username, group))
- subprocess.check_call(cmd)
-
-
-def rsync(from_path, to_path, flags='-r', options=None):
- """Replicate the contents of a path"""
- options = options or ['--delete', '--executability']
- cmd = ['/usr/bin/rsync', flags]
- cmd.extend(options)
- cmd.append(from_path)
- cmd.append(to_path)
- log(" ".join(cmd))
- return subprocess.check_output(cmd).decode('UTF-8').strip()
-
-
-def symlink(source, destination):
- """Create a symbolic link"""
- log("Symlinking {} as {}".format(source, destination))
- cmd = [
- 'ln',
- '-sf',
- source,
- destination,
- ]
- subprocess.check_call(cmd)
-
-
-def mkdir(path, owner='root', group='root', perms=0o555, force=False):
- """Create a directory"""
- log("Making dir {} {}:{} {:o}".format(path, owner, group,
- perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- realpath = os.path.abspath(path)
- path_exists = os.path.exists(realpath)
- if path_exists and force:
- if not os.path.isdir(realpath):
- log("Removing non-directory file {} prior to mkdir()".format(path))
- os.unlink(realpath)
- os.makedirs(realpath, perms)
- elif not path_exists:
- os.makedirs(realpath, perms)
- os.chown(realpath, uid, gid)
- os.chmod(realpath, perms)
-
-
-def write_file(path, content, owner='root', group='root', perms=0o444):
- """Create or overwrite a file with the contents of a byte string."""
- log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- with open(path, 'wb') as target:
- os.fchown(target.fileno(), uid, gid)
- os.fchmod(target.fileno(), perms)
- target.write(content)
-
-
-def fstab_remove(mp):
- """Remove the given mountpoint entry from /etc/fstab"""
- return Fstab.remove_by_mountpoint(mp)
-
-
-def fstab_add(dev, mp, fs, options=None):
- """Adds the given device entry to the /etc/fstab file"""
- return Fstab.add(dev, mp, fs, options=options)
-
-
-def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
- """Mount a filesystem at a particular mountpoint"""
- cmd_args = ['mount']
- if options is not None:
- cmd_args.extend(['-o', options])
- cmd_args.extend([device, mountpoint])
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
- return False
-
- if persist:
- return fstab_add(device, mountpoint, filesystem, options=options)
- return True
-
-
-def umount(mountpoint, persist=False):
- """Unmount a filesystem"""
- cmd_args = ['umount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
-
- if persist:
- return fstab_remove(mountpoint)
- return True
-
-
-def mounts():
- """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
- with open('/proc/mounts') as f:
- # [['/mount/point','/dev/path'],[...]]
- system_mounts = [m[1::-1] for m in [l.strip().split()
- for l in f.readlines()]]
- return system_mounts
-
-
-def fstab_mount(mountpoint):
- """Mount filesystem using fstab"""
- cmd_args = ['mount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
- return True
-
-
-def file_hash(path, hash_type='md5'):
- """Generate a hash checksum of the contents of 'path' or None if not found.
-
- :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- """
- if os.path.exists(path):
- h = getattr(hashlib, hash_type)()
- with open(path, 'rb') as source:
- h.update(source.read())
- return h.hexdigest()
- else:
- return None
-
-
-def path_hash(path):
- """Generate a hash checksum of all files matching 'path'. Standard
- wildcards like '*' and '?' are supported, see documentation for the 'glob'
- module for more information.
-
- :return: dict: A { filename: hash } dictionary for all matched files.
- Empty if none found.
- """
- return {
- filename: file_hash(filename)
- for filename in glob.iglob(path)
- }
-
-
-def check_hash(path, checksum, hash_type='md5'):
- """Validate a file using a cryptographic checksum.
-
- :param str checksum: Value of the checksum used to validate the file.
- :param str hash_type: Hash algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- :raises ChecksumError: If the file fails the checksum
-
- """
- actual_checksum = file_hash(path, hash_type)
- if checksum != actual_checksum:
- raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
-
-
-class ChecksumError(ValueError):
- """A class derived from Value error to indicate the checksum failed."""
- pass
-
-
-def restart_on_change(restart_map, stopstart=False, restart_functions=None):
- """Restart services based on configuration files changing
-
- This function is used a decorator, for example::
-
- @restart_on_change({
- '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
- '/etc/apache/sites-enabled/*': [ 'apache2' ]
- })
- def config_changed():
- pass # your code here
-
- In this example, the cinder-api and cinder-volume services
- would be restarted if /etc/ceph/ceph.conf is changed by the
- ceph_client_changed function. The apache2 service would be
- restarted if any file matching the pattern got changed, created
- or removed. Standard wildcards are supported, see documentation
- for the 'glob' module for more information.
-
- @param restart_map: {path_file_name: [service_name, ...]
- @param stopstart: DEFAULT false; whether to stop, start OR restart
- @param restart_functions: nonstandard functions to use to restart services
- {svc: func, ...}
- @returns result from decorated function
- """
- def wrap(f):
- @functools.wraps(f)
- def wrapped_f(*args, **kwargs):
- return restart_on_change_helper(
- (lambda: f(*args, **kwargs)), restart_map, stopstart,
- restart_functions)
- return wrapped_f
- return wrap
-
-
-def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
- restart_functions=None):
- """Helper function to perform the restart_on_change function.
-
- This is provided for decorators to restart services if files described
- in the restart_map have changed after an invocation of lambda_f().
-
- @param lambda_f: function to call.
- @param restart_map: {file: [service, ...]}
- @param stopstart: whether to stop, start or restart a service
- @param restart_functions: nonstandard functions to use to restart services
- {svc: func, ...}
- @returns result of lambda_f()
- """
- if restart_functions is None:
- restart_functions = {}
- checksums = {path: path_hash(path) for path in restart_map}
- r = lambda_f()
- # create a list of lists of the services to restart
- restarts = [restart_map[path]
- for path in restart_map
- if path_hash(path) != checksums[path]]
- # create a flat list of ordered services without duplicates from lists
- services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
- if services_list:
- actions = ('stop', 'start') if stopstart else ('restart',)
- for service_name in services_list:
- if service_name in restart_functions:
- restart_functions[service_name](service_name)
- else:
- for action in actions:
- service(action, service_name)
- return r
-
-
-def lsb_release():
- """Return /etc/lsb-release in a dict"""
- d = {}
- with open('/etc/lsb-release', 'r') as lsb:
- for l in lsb:
- k, v = l.split('=')
- d[k.strip()] = v.strip()
- return d
-
-
-def pwgen(length=None):
- """Generate a random pasword."""
- if length is None:
- # A random length is ok to use a weak PRNG
- length = random.choice(range(35, 45))
- alphanumeric_chars = [
- l for l in (string.ascii_letters + string.digits)
- if l not in 'l0QD1vAEIOUaeiou']
- # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
- # actual password
- random_generator = random.SystemRandom()
- random_chars = [
- random_generator.choice(alphanumeric_chars) for _ in range(length)]
- return(''.join(random_chars))
-
-
-def is_phy_iface(interface):
- """Returns True if interface is not virtual, otherwise False."""
- if interface:
- sys_net = '/sys/class/net'
- if os.path.isdir(sys_net):
- for iface in glob.glob(os.path.join(sys_net, '*')):
- if '/virtual/' in os.path.realpath(iface):
- continue
-
- if interface == os.path.basename(iface):
- return True
-
- return False
-
-
-def get_bond_master(interface):
- """Returns bond master if interface is bond slave otherwise None.
-
- NOTE: the provided interface is expected to be physical
- """
- if interface:
- iface_path = '/sys/class/net/%s' % (interface)
- if os.path.exists(iface_path):
- if '/virtual/' in os.path.realpath(iface_path):
- return None
-
- master = os.path.join(iface_path, 'master')
- if os.path.exists(master):
- master = os.path.realpath(master)
- # make sure it is a bond master
- if os.path.exists(os.path.join(master, 'bonding')):
- return os.path.basename(master)
-
- return None
-
-
-def list_nics(nic_type=None):
- """Return a list of nics of given type(s)"""
- if isinstance(nic_type, six.string_types):
- int_types = [nic_type]
- else:
- int_types = nic_type
-
- interfaces = []
- if nic_type:
- for int_type in int_types:
- cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- ip_output = ip_output.split('\n')
- ip_output = (line for line in ip_output if line)
- for line in ip_output:
- if line.split()[1].startswith(int_type):
- matched = re.search('.*: (' + int_type +
- r'[0-9]+\.[0-9]+)@.*', line)
- if matched:
- iface = matched.groups()[0]
- else:
- iface = line.split()[1].replace(":", "")
-
- if iface not in interfaces:
- interfaces.append(iface)
- else:
- cmd = ['ip', 'a']
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- ip_output = (line.strip() for line in ip_output if line)
-
- key = re.compile('^[0-9]+:\s+(.+):')
- for line in ip_output:
- matched = re.search(key, line)
- if matched:
- iface = matched.group(1)
- iface = iface.partition("@")[0]
- if iface not in interfaces:
- interfaces.append(iface)
-
- return interfaces
-
-
-def set_nic_mtu(nic, mtu):
- """Set the Maximum Transmission Unit (MTU) on a network interface."""
- cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
- subprocess.check_call(cmd)
-
-
-def get_nic_mtu(nic):
- """Return the Maximum Transmission Unit (MTU) for a network interface."""
- cmd = ['ip', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- mtu = ""
- for line in ip_output:
- words = line.split()
- if 'mtu' in words:
- mtu = words[words.index("mtu") + 1]
- return mtu
-
-
-def get_nic_hwaddr(nic):
- """Return the Media Access Control (MAC) for a network interface."""
- cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- hwaddr = ""
- words = ip_output.split()
- if 'link/ether' in words:
- hwaddr = words[words.index('link/ether') + 1]
- return hwaddr
-
-
-def cmp_pkgrevno(package, revno, pkgcache=None):
- """Compare supplied revno with the revno of the installed package
-
- * 1 => Installed revno is greater than supplied arg
- * 0 => Installed revno is the same as supplied arg
- * -1 => Installed revno is less than supplied arg
-
- This function imports apt_cache function from charmhelpers.fetch if
- the pkgcache argument is None. Be sure to add charmhelpers.fetch if
- you call this function, or pass an apt_pkg.Cache() instance.
- """
- import apt_pkg
- if not pkgcache:
- from charmhelpers.fetch import apt_cache
- pkgcache = apt_cache()
- pkg = pkgcache[package]
- return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
-
-
-@contextmanager
-def chdir(directory):
- """Change the current working directory to a different directory for a code
- block and return the previous directory after the block exits. Useful to
- run commands from a specificed directory.
-
- :param str directory: The directory path to change to for this context.
- """
- cur = os.getcwd()
- try:
- yield os.chdir(directory)
- finally:
- os.chdir(cur)
-
-
-def chownr(path, owner, group, follow_links=True, chowntopdir=False):
- """Recursively change user and group ownership of files and directories
- in given path. Doesn't chown path itself by default, only its children.
-
- :param str path: The string path to start changing ownership.
- :param str owner: The owner string to use when looking up the uid.
- :param str group: The group string to use when looking up the gid.
- :param bool follow_links: Also Chown links if True
- :param bool chowntopdir: Also chown path itself if True
- """
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- if follow_links:
- chown = os.chown
- else:
- chown = os.lchown
-
- if chowntopdir:
- broken_symlink = os.path.lexists(path) and not os.path.exists(path)
- if not broken_symlink:
- chown(path, uid, gid)
- for root, dirs, files in os.walk(path):
- for name in dirs + files:
- full = os.path.join(root, name)
- broken_symlink = os.path.lexists(full) and not os.path.exists(full)
- if not broken_symlink:
- chown(full, uid, gid)
-
-
-def lchownr(path, owner, group):
- """Recursively change user and group ownership of files and directories
- in a given path, not following symbolic links. See the documentation for
- 'os.lchown' for more information.
-
- :param str path: The string path to start changing ownership.
- :param str owner: The owner string to use when looking up the uid.
- :param str group: The group string to use when looking up the gid.
- """
- chownr(path, owner, group, follow_links=False)
-
-
-def get_total_ram():
- """The total amount of system RAM in bytes.
-
- This is what is reported by the OS, and may be overcommitted when
- there are multiple containers hosted on the same machine.
- """
- with open('/proc/meminfo', 'r') as f:
- for line in f.readlines():
- if line:
- key, value, unit = line.split()
- if key == 'MemTotal:':
- assert unit == 'kB', 'Unknown unit'
- return int(value) * 1024 # Classic, not KiB.
- raise NotImplementedError()
diff --git a/charms/trusty/ceilometer/charmhelpers/core/hugepage.py b/charms/trusty/ceilometer/charmhelpers/core/hugepage.py
deleted file mode 100644
index a783ad9..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/hugepage.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-from charmhelpers.core import fstab
-from charmhelpers.core import sysctl
-from charmhelpers.core.host import (
- add_group,
- add_user_to_group,
- fstab_mount,
- mkdir,
-)
-from charmhelpers.core.strutils import bytes_from_string
-from subprocess import check_output
-
-
-def hugepage_support(user, group='hugetlb', nr_hugepages=256,
- max_map_count=65536, mnt_point='/run/hugepages/kvm',
- pagesize='2MB', mount=True, set_shmmax=False):
- """Enable hugepages on system.
-
- Args:
- user (str) -- Username to allow access to hugepages to
- group (str) -- Group name to own hugepages
- nr_hugepages (int) -- Number of pages to reserve
- max_map_count (int) -- Number of Virtual Memory Areas a process can own
- mnt_point (str) -- Directory to mount hugepages on
- pagesize (str) -- Size of hugepages
- mount (bool) -- Whether to Mount hugepages
- """
- group_info = add_group(group)
- gid = group_info.gr_gid
- add_user_to_group(user, group)
- if max_map_count < 2 * nr_hugepages:
- max_map_count = 2 * nr_hugepages
- sysctl_settings = {
- 'vm.nr_hugepages': nr_hugepages,
- 'vm.max_map_count': max_map_count,
- 'vm.hugetlb_shm_group': gid,
- }
- if set_shmmax:
- shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
- shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
- if shmmax_minsize > shmmax_current:
- sysctl_settings['kernel.shmmax'] = shmmax_minsize
- sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
- mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
- lfstab = fstab.Fstab()
- fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
- if fstab_entry:
- lfstab.remove_entry(fstab_entry)
- entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
- 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
- lfstab.add_entry(entry)
- if mount:
- fstab_mount(mnt_point)
diff --git a/charms/trusty/ceilometer/charmhelpers/core/kernel.py b/charms/trusty/ceilometer/charmhelpers/core/kernel.py
deleted file mode 100644
index 5dc6495..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/kernel.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-from charmhelpers.core.hookenv import (
- log,
- INFO
-)
-
-from subprocess import check_call, check_output
-import re
-
-
-def modprobe(module, persist=True):
- """Load a kernel module and configure for auto-load on reboot."""
- cmd = ['modprobe', module]
-
- log('Loading kernel module %s' % module, level=INFO)
-
- check_call(cmd)
- if persist:
- with open('/etc/modules', 'r+') as modules:
- if module not in modules.read():
- modules.write(module)
-
-
-def rmmod(module, force=False):
- """Remove a module from the linux kernel"""
- cmd = ['rmmod']
- if force:
- cmd.append('-f')
- cmd.append(module)
- log('Removing kernel module %s' % module, level=INFO)
- return check_call(cmd)
-
-
-def lsmod():
- """Shows what kernel modules are currently loaded"""
- return check_output(['lsmod'],
- universal_newlines=True)
-
-
-def is_module_loaded(module):
- """Checks if a kernel module is already loaded"""
- matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
- return len(matches) > 0
-
-
-def update_initramfs(version='all'):
- """Updates an initramfs image"""
- return check_call(["update-initramfs", "-k", version, "-u"])
diff --git a/charms/trusty/ceilometer/charmhelpers/core/services/__init__.py b/charms/trusty/ceilometer/charmhelpers/core/services/__init__.py
deleted file mode 100644
index 0928158..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/services/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from .base import * # NOQA
-from .helpers import * # NOQA
diff --git a/charms/trusty/ceilometer/charmhelpers/core/services/base.py b/charms/trusty/ceilometer/charmhelpers/core/services/base.py
deleted file mode 100644
index a42660c..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/services/base.py
+++ /dev/null
@@ -1,353 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import json
-from inspect import getargspec
-from collections import Iterable, OrderedDict
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-__all__ = ['ServiceManager', 'ManagerCallback',
- 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
- 'service_restart', 'service_stop']
-
-
-class ServiceManager(object):
- def __init__(self, services=None):
- """
- Register a list of services, given their definitions.
-
- Service definitions are dicts in the following formats (all keys except
- 'service' are optional)::
-
- {
- "service": <service name>,
- "required_data": <list of required data contexts>,
- "provided_data": <list of provided data contexts>,
- "data_ready": <one or more callbacks>,
- "data_lost": <one or more callbacks>,
- "start": <one or more callbacks>,
- "stop": <one or more callbacks>,
- "ports": <list of ports to manage>,
- }
-
- The 'required_data' list should contain dicts of required data (or
- dependency managers that act like dicts and know how to collect the data).
- Only when all items in the 'required_data' list are populated are the list
- of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
- information.
-
- The 'provided_data' list should contain relation data providers, most likely
- a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
- that will indicate a set of data to set on a given relation.
-
- The 'data_ready' value should be either a single callback, or a list of
- callbacks, to be called when all items in 'required_data' pass `is_ready()`.
- Each callback will be called with the service name as the only parameter.
- After all of the 'data_ready' callbacks are called, the 'start' callbacks
- are fired.
-
- The 'data_lost' value should be either a single callback, or a list of
- callbacks, to be called when a 'required_data' item no longer passes
- `is_ready()`. Each callback will be called with the service name as the
- only parameter. After all of the 'data_lost' callbacks are called,
- the 'stop' callbacks are fired.
-
- The 'start' value should be either a single callback, or a list of
- callbacks, to be called when starting the service, after the 'data_ready'
- callbacks are complete. Each callback will be called with the service
- name as the only parameter. This defaults to
- `[host.service_start, services.open_ports]`.
-
- The 'stop' value should be either a single callback, or a list of
- callbacks, to be called when stopping the service. If the service is
- being stopped because it no longer has all of its 'required_data', this
- will be called after all of the 'data_lost' callbacks are complete.
- Each callback will be called with the service name as the only parameter.
- This defaults to `[services.close_ports, host.service_stop]`.
-
- The 'ports' value should be a list of ports to manage. The default
- 'start' handler will open the ports after the service is started,
- and the default 'stop' handler will close the ports prior to stopping
- the service.
-
-
- Examples:
-
- The following registers an Upstart service called bingod that depends on
- a mongodb relation and which runs a custom `db_migrate` function prior to
- restarting the service, and a Runit service called spadesd::
-
- manager = services.ServiceManager([
- {
- 'service': 'bingod',
- 'ports': [80, 443],
- 'required_data': [MongoRelation(), config(), {'my': 'data'}],
- 'data_ready': [
- services.template(source='bingod.conf'),
- services.template(source='bingod.ini',
- target='/etc/bingod.ini',
- owner='bingo', perms=0400),
- ],
- },
- {
- 'service': 'spadesd',
- 'data_ready': services.template(source='spadesd_run.j2',
- target='/etc/sv/spadesd/run',
- perms=0555),
- 'start': runit_start,
- 'stop': runit_stop,
- },
- ])
- manager.manage()
- """
- self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
- self._ready = None
- self.services = OrderedDict()
- for service in services or []:
- service_name = service['service']
- self.services[service_name] = service
-
- def manage(self):
- """
- Handle the current hook by doing The Right Thing with the registered services.
- """
- hookenv._run_atstart()
- try:
- hook_name = hookenv.hook_name()
- if hook_name == 'stop':
- self.stop_services()
- else:
- self.reconfigure_services()
- self.provide_data()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- hookenv._run_atexit()
- hookenv._run_atexit()
-
- def provide_data(self):
- """
- Set the relation data for each provider in the ``provided_data`` list.
-
- A provider must have a `name` attribute, which indicates which relation
- to set data on, and a `provide_data()` method, which returns a dict of
- data to set.
-
- The `provide_data()` method can optionally accept two parameters:
-
- * ``remote_service`` The name of the remote service that the data will
- be provided to. The `provide_data()` method will be called once
- for each connected service (not unit). This allows the method to
- tailor its data to the given service.
- * ``service_ready`` Whether or not the service definition had all of
- its requirements met, and thus the ``data_ready`` callbacks run.
-
- Note that the ``provided_data`` methods are now called **after** the
- ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
- a chance to generate any data necessary for the providing to the remote
- services.
- """
- for service_name, service in self.services.items():
- service_ready = self.is_ready(service_name)
- for provider in service.get('provided_data', []):
- for relid in hookenv.relation_ids(provider.name):
- units = hookenv.related_units(relid)
- if not units:
- continue
- remote_service = units[0].split('/')[0]
- argspec = getargspec(provider.provide_data)
- if len(argspec.args) > 1:
- data = provider.provide_data(remote_service, service_ready)
- else:
- data = provider.provide_data()
- if data:
- hookenv.relation_set(relid, data)
-
- def reconfigure_services(self, *service_names):
- """
- Update all files for one or more registered services, and,
- if ready, optionally restart them.
-
- If no service names are given, reconfigures all registered services.
- """
- for service_name in service_names or self.services.keys():
- if self.is_ready(service_name):
- self.fire_event('data_ready', service_name)
- self.fire_event('start', service_name, default=[
- service_restart,
- manage_ports])
- self.save_ready(service_name)
- else:
- if self.was_ready(service_name):
- self.fire_event('data_lost', service_name)
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
- self.save_lost(service_name)
-
- def stop_services(self, *service_names):
- """
- Stop one or more registered services, by name.
-
- If no service names are given, stops all registered services.
- """
- for service_name in service_names or self.services.keys():
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
-
- def get_service(self, service_name):
- """
- Given the name of a registered service, return its service definition.
- """
- service = self.services.get(service_name)
- if not service:
- raise KeyError('Service not registered: %s' % service_name)
- return service
-
- def fire_event(self, event_name, service_name, default=None):
- """
- Fire a data_ready, data_lost, start, or stop event on a given service.
- """
- service = self.get_service(service_name)
- callbacks = service.get(event_name, default)
- if not callbacks:
- return
- if not isinstance(callbacks, Iterable):
- callbacks = [callbacks]
- for callback in callbacks:
- if isinstance(callback, ManagerCallback):
- callback(self, service_name, event_name)
- else:
- callback(service_name)
-
- def is_ready(self, service_name):
- """
- Determine if a registered service is ready, by checking its 'required_data'.
-
- A 'required_data' item can be any mapping type, and is considered ready
- if `bool(item)` evaluates as True.
- """
- service = self.get_service(service_name)
- reqs = service.get('required_data', [])
- return all(bool(req) for req in reqs)
-
- def _load_ready_file(self):
- if self._ready is not None:
- return
- if os.path.exists(self._ready_file):
- with open(self._ready_file) as fp:
- self._ready = set(json.load(fp))
- else:
- self._ready = set()
-
- def _save_ready_file(self):
- if self._ready is None:
- return
- with open(self._ready_file, 'w') as fp:
- json.dump(list(self._ready), fp)
-
- def save_ready(self, service_name):
- """
- Save an indicator that the given service is now data_ready.
- """
- self._load_ready_file()
- self._ready.add(service_name)
- self._save_ready_file()
-
- def save_lost(self, service_name):
- """
- Save an indicator that the given service is no longer data_ready.
- """
- self._load_ready_file()
- self._ready.discard(service_name)
- self._save_ready_file()
-
- def was_ready(self, service_name):
- """
- Determine if the given service was previously data_ready.
- """
- self._load_ready_file()
- return service_name in self._ready
-
-
-class ManagerCallback(object):
- """
- Special case of a callback that takes the `ServiceManager` instance
- in addition to the service name.
-
- Subclasses should implement `__call__` which should accept three parameters:
-
- * `manager` The `ServiceManager` instance
- * `service_name` The name of the service it's being triggered for
- * `event_name` The name of the event that this callback is handling
- """
- def __call__(self, manager, service_name, event_name):
- raise NotImplementedError()
-
-
-class PortManagerCallback(ManagerCallback):
- """
- Callback class that will open or close ports, for use as either
- a start or stop action.
- """
- def __call__(self, manager, service_name, event_name):
- service = manager.get_service(service_name)
- new_ports = service.get('ports', [])
- port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
- if os.path.exists(port_file):
- with open(port_file) as fp:
- old_ports = fp.read().split(',')
- for old_port in old_ports:
- if bool(old_port):
- old_port = int(old_port)
- if old_port not in new_ports:
- hookenv.close_port(old_port)
- with open(port_file, 'w') as fp:
- fp.write(','.join(str(port) for port in new_ports))
- for port in new_ports:
- if event_name == 'start':
- hookenv.open_port(port)
- elif event_name == 'stop':
- hookenv.close_port(port)
-
-
-def service_stop(service_name):
- """
- Wrapper around host.service_stop to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_running(service_name):
- host.service_stop(service_name)
-
-
-def service_restart(service_name):
- """
- Wrapper around host.service_restart to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_available(service_name):
- if host.service_running(service_name):
- host.service_restart(service_name)
- else:
- host.service_start(service_name)
-
-
-# Convenience aliases
-open_ports = close_ports = manage_ports = PortManagerCallback()
diff --git a/charms/trusty/ceilometer/charmhelpers/core/services/helpers.py b/charms/trusty/ceilometer/charmhelpers/core/services/helpers.py
deleted file mode 100644
index 2423704..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/services/helpers.py
+++ /dev/null
@@ -1,292 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import yaml
-
-from charmhelpers.core import hookenv
-from charmhelpers.core import host
-from charmhelpers.core import templating
-
-from charmhelpers.core.services.base import ManagerCallback
-
-
-__all__ = ['RelationContext', 'TemplateCallback',
- 'render_template', 'template']
-
-
-class RelationContext(dict):
- """
- Base class for a context generator that gets relation data from juju.
-
- Subclasses must provide the attributes `name`, which is the name of the
- interface of interest, `interface`, which is the type of the interface of
- interest, and `required_keys`, which is the set of keys required for the
- relation to be considered complete. The data for all interfaces matching
- the `name` attribute that are complete will used to populate the dictionary
- values (see `get_data`, below).
-
- The generated context will be namespaced under the relation :attr:`name`,
- to prevent potential naming conflicts.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = None
- interface = None
-
- def __init__(self, name=None, additional_required_keys=None):
- if not hasattr(self, 'required_keys'):
- self.required_keys = []
-
- if name is not None:
- self.name = name
- if additional_required_keys:
- self.required_keys.extend(additional_required_keys)
- self.get_data()
-
- def __bool__(self):
- """
- Returns True if all of the required_keys are available.
- """
- return self.is_ready()
-
- __nonzero__ = __bool__
-
- def __repr__(self):
- return super(RelationContext, self).__repr__()
-
- def is_ready(self):
- """
- Returns True if all of the `required_keys` are available from any units.
- """
- ready = len(self.get(self.name, [])) > 0
- if not ready:
- hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
- return ready
-
- def _is_ready(self, unit_data):
- """
- Helper method that tests a set of relation data and returns True if
- all of the `required_keys` are present.
- """
- return set(unit_data.keys()).issuperset(set(self.required_keys))
-
- def get_data(self):
- """
- Retrieve the relation data for each unit involved in a relation and,
- if complete, store it in a list under `self[self.name]`. This
- is automatically called when the RelationContext is instantiated.
-
- The units are sorted lexographically first by the service ID, then by
- the unit ID. Thus, if an interface has two other services, 'db:1'
- and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
- and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
- set of data, the relation data for the units will be stored in the
- order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
-
- If you only care about a single unit on the relation, you can just
- access it as `{{ interface[0]['key'] }}`. However, if you can at all
- support multiple units on a relation, you should iterate over the list,
- like::
-
- {% for unit in interface -%}
- {{ unit['key'] }}{% if not loop.last %},{% endif %}
- {%- endfor %}
-
- Note that since all sets of relation data from all related services and
- units are in a single list, if you need to know which service or unit a
- set of data came from, you'll need to extend this class to preserve
- that information.
- """
- if not hookenv.relation_ids(self.name):
- return
-
- ns = self.setdefault(self.name, [])
- for rid in sorted(hookenv.relation_ids(self.name)):
- for unit in sorted(hookenv.related_units(rid)):
- reldata = hookenv.relation_get(rid=rid, unit=unit)
- if self._is_ready(reldata):
- ns.append(reldata)
-
- def provide_data(self):
- """
- Return data to be relation_set for this interface.
- """
- return {}
-
-
-class MysqlRelation(RelationContext):
- """
- Relation context for the `mysql` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'db'
- interface = 'mysql'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'user', 'password', 'database']
- RelationContext.__init__(self, *args, **kwargs)
-
-
-class HttpRelation(RelationContext):
- """
- Relation context for the `http` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'website'
- interface = 'http'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'port']
- RelationContext.__init__(self, *args, **kwargs)
-
- def provide_data(self):
- return {
- 'host': hookenv.unit_get('private-address'),
- 'port': 80,
- }
-
-
-class RequiredConfig(dict):
- """
- Data context that loads config options with one or more mandatory options.
-
- Once the required options have been changed from their default values, all
- config options will be available, namespaced under `config` to prevent
- potential naming conflicts (for example, between a config option and a
- relation property).
-
- :param list *args: List of options that must be changed from their default values.
- """
-
- def __init__(self, *args):
- self.required_options = args
- self['config'] = hookenv.config()
- with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
- self.config = yaml.load(fp).get('options', {})
-
- def __bool__(self):
- for option in self.required_options:
- if option not in self['config']:
- return False
- current_value = self['config'][option]
- default_value = self.config[option].get('default')
- if current_value == default_value:
- return False
- if current_value in (None, '') and default_value in (None, ''):
- return False
- return True
-
- def __nonzero__(self):
- return self.__bool__()
-
-
-class StoredContext(dict):
- """
- A data context that always returns the data that it was first created with.
-
- This is useful to do a one-time generation of things like passwords, that
- will thereafter use the same value that was originally generated, instead
- of generating a new value each time it is run.
- """
- def __init__(self, file_name, config_data):
- """
- If the file exists, populate `self` with the data from the file.
- Otherwise, populate with the given data and persist it to the file.
- """
- if os.path.exists(file_name):
- self.update(self.read_context(file_name))
- else:
- self.store_context(file_name, config_data)
- self.update(config_data)
-
- def store_context(self, file_name, config_data):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'w') as file_stream:
- os.fchmod(file_stream.fileno(), 0o600)
- yaml.dump(config_data, file_stream)
-
- def read_context(self, file_name):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'r') as file_stream:
- data = yaml.load(file_stream)
- if not data:
- raise OSError("%s is empty" % file_name)
- return data
-
-
-class TemplateCallback(ManagerCallback):
- """
- Callback class that will render a Jinja2 template, for use as a ready
- action.
-
- :param str source: The template source file, relative to
- `$CHARM_DIR/templates`
-
- :param str target: The target to write the rendered template to (or None)
- :param str owner: The owner of the rendered file
- :param str group: The group of the rendered file
- :param int perms: The permissions of the rendered file
- :param partial on_change_action: functools partial to be executed when
- rendered file changes
- :param jinja2 loader template_loader: A jinja2 template loader
-
- :return str: The rendered template
- """
- def __init__(self, source, target,
- owner='root', group='root', perms=0o444,
- on_change_action=None, template_loader=None):
- self.source = source
- self.target = target
- self.owner = owner
- self.group = group
- self.perms = perms
- self.on_change_action = on_change_action
- self.template_loader = template_loader
-
- def __call__(self, manager, service_name, event_name):
- pre_checksum = ''
- if self.on_change_action and os.path.isfile(self.target):
- pre_checksum = host.file_hash(self.target)
- service = manager.get_service(service_name)
- context = {'ctx': {}}
- for ctx in service.get('required_data', []):
- context.update(ctx)
- context['ctx'].update(ctx)
-
- result = templating.render(self.source, self.target, context,
- self.owner, self.group, self.perms,
- template_loader=self.template_loader)
- if self.on_change_action:
- if pre_checksum == host.file_hash(self.target):
- hookenv.log(
- 'No change detected: {}'.format(self.target),
- hookenv.DEBUG)
- else:
- self.on_change_action()
-
- return result
-
-
-# Convenience aliases for templates
-render_template = template = TemplateCallback
diff --git a/charms/trusty/ceilometer/charmhelpers/core/strutils.py b/charms/trusty/ceilometer/charmhelpers/core/strutils.py
deleted file mode 100644
index 7e3f969..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/strutils.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-import re
-
-
-def bool_from_string(value):
- """Interpret string value as boolean.
-
- Returns True if value translates to True otherwise False.
- """
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
- value = value.strip().lower()
-
- if value in ['y', 'yes', 'true', 't', 'on']:
- return True
- elif value in ['n', 'no', 'false', 'f', 'off']:
- return False
-
- msg = "Unable to interpret string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
-
-def bytes_from_string(value):
- """Interpret human readable string value as bytes.
-
- Returns int
- """
- BYTE_POWER = {
- 'K': 1,
- 'KB': 1,
- 'M': 2,
- 'MB': 2,
- 'G': 3,
- 'GB': 3,
- 'T': 4,
- 'TB': 4,
- 'P': 5,
- 'PB': 5,
- }
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
- matches = re.match("([0-9]+)([a-zA-Z]+)", value)
- if not matches:
- msg = "Unable to interpret string value '%s' as bytes" % (value)
- raise ValueError(msg)
- return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
diff --git a/charms/trusty/ceilometer/charmhelpers/core/sysctl.py b/charms/trusty/ceilometer/charmhelpers/core/sysctl.py
deleted file mode 100644
index 21cc8ab..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/sysctl.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-
-from subprocess import check_call
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- ERROR,
-)
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-def create(sysctl_dict, sysctl_file):
- """Creates a sysctl.conf file from a YAML associative array
-
- :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
- :type sysctl_dict: str
- :param sysctl_file: path to the sysctl file to be saved
- :type sysctl_file: str or unicode
- :returns: None
- """
- try:
- sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
- except yaml.YAMLError:
- log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
- level=ERROR)
- return
-
- with open(sysctl_file, "w") as fd:
- for key, value in sysctl_dict_parsed.items():
- fd.write("{}={}\n".format(key, value))
-
- log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
- level=DEBUG)
-
- check_call(["sysctl", "-p", sysctl_file])
diff --git a/charms/trusty/ceilometer/charmhelpers/core/templating.py b/charms/trusty/ceilometer/charmhelpers/core/templating.py
deleted file mode 100644
index d2d8eaf..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/templating.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-def render(source, target, context, owner='root', group='root',
- perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
- """
- Render a template.
-
- The `source` path, if not absolute, is relative to the `templates_dir`.
-
- The `target` path should be absolute. It can also be `None`, in which
- case no file will be written.
-
- The context should be a dict containing the values to be replaced in the
- template.
-
- The `owner`, `group`, and `perms` options will be passed to `write_file`.
-
- If omitted, `templates_dir` defaults to the `templates` folder in the charm.
-
- The rendered template will be written to the file as well as being returned
- as a string.
-
- Note: Using this requires python-jinja2; if it is not installed, calling
- this will attempt to use charmhelpers.fetch.apt_install to install it.
- """
- try:
- from jinja2 import FileSystemLoader, Environment, exceptions
- except ImportError:
- try:
- from charmhelpers.fetch import apt_install
- except ImportError:
- hookenv.log('Could not import jinja2, and could not import '
- 'charmhelpers.fetch to install it',
- level=hookenv.ERROR)
- raise
- apt_install('python-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, Environment, exceptions
-
- if template_loader:
- template_env = Environment(loader=template_loader)
- else:
- if templates_dir is None:
- templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
- template_env = Environment(loader=FileSystemLoader(templates_dir))
- try:
- source = source
- template = template_env.get_template(source)
- except exceptions.TemplateNotFound as e:
- hookenv.log('Could not load template %s from %s.' %
- (source, templates_dir),
- level=hookenv.ERROR)
- raise e
- content = template.render(context)
- if target is not None:
- target_dir = os.path.dirname(target)
- if not os.path.exists(target_dir):
- # This is a terrible default directory permission, as the file
- # or its siblings will often contain secrets.
- host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
- host.write_file(target, content.encode(encoding), owner, group, perms)
- return content
diff --git a/charms/trusty/ceilometer/charmhelpers/core/unitdata.py b/charms/trusty/ceilometer/charmhelpers/core/unitdata.py
deleted file mode 100644
index 338104e..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/unitdata.py
+++ /dev/null
@@ -1,521 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-#
-#
-# Authors:
-# Kapil Thangavelu <kapil.foss@gmail.com>
-#
-"""
-Intro
------
-
-A simple way to store state in units. This provides a key value
-storage with support for versioned, transactional operation,
-and can calculate deltas from previous values to simplify unit logic
-when processing changes.
-
-
-Hook Integration
-----------------
-
-There are several extant frameworks for hook execution, including
-
- - charmhelpers.core.hookenv.Hooks
- - charmhelpers.core.services.ServiceManager
-
-The storage classes are framework agnostic, one simple integration is
-via the HookData contextmanager. It will record the current hook
-execution environment (including relation data, config data, etc.),
-setup a transaction and allow easy access to the changes from
-previously seen values. One consequence of the integration is the
-reservation of particular keys ('rels', 'unit', 'env', 'config',
-'charm_revisions') for their respective values.
-
-Here's a fully worked integration example using hookenv.Hooks::
-
- from charmhelper.core import hookenv, unitdata
-
- hook_data = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # Print all changes to configuration from previously seen
- # values.
- for changed, (prev, cur) in hook_data.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- # Directly access all charm config as a mapping.
- conf = db.getrange('config', True)
-
- # Directly access all relation data as a mapping
- rels = db.getrange('rels', True)
-
- if __name__ == '__main__':
- with hook_data():
- hook.execute()
-
-
-A more basic integration is via the hook_scope context manager which simply
-manages transaction scope (and records hook name, and timestamp)::
-
- >>> from unitdata import kv
- >>> db = kv()
- >>> with db.hook_scope('install'):
- ... # do work, in transactional scope.
- ... db.set('x', 1)
- >>> db.get('x')
- 1
-
-
-Usage
------
-
-Values are automatically json de/serialized to preserve basic typing
-and complex data struct capabilities (dicts, lists, ints, booleans, etc).
-
-Individual values can be manipulated via get/set::
-
- >>> kv.set('y', True)
- >>> kv.get('y')
- True
-
- # We can set complex values (dicts, lists) as a single key.
- >>> kv.set('config', {'a': 1, 'b': True'})
-
- # Also supports returning dictionaries as a record which
- # provides attribute access.
- >>> config = kv.get('config', record=True)
- >>> config.b
- True
-
-
-Groups of keys can be manipulated with update/getrange::
-
- >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
- >>> kv.getrange('gui.', strip=True)
- {'z': 1, 'y': 2}
-
-When updating values, its very helpful to understand which values
-have actually changed and how have they changed. The storage
-provides a delta method to provide for this::
-
- >>> data = {'debug': True, 'option': 2}
- >>> delta = kv.delta(data, 'config.')
- >>> delta.debug.previous
- None
- >>> delta.debug.current
- True
- >>> delta
- {'debug': (None, True), 'option': (None, 2)}
-
-Note the delta method does not persist the actual change, it needs to
-be explicitly saved via 'update' method::
-
- >>> kv.update(data, 'config.')
-
-Values modified in the context of a hook scope retain historical values
-associated to the hookname.
-
- >>> with db.hook_scope('config-changed'):
- ... db.set('x', 42)
- >>> db.gethistory('x')
- [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
- (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
-
-"""
-
-import collections
-import contextlib
-import datetime
-import itertools
-import json
-import os
-import pprint
-import sqlite3
-import sys
-
-__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
-
-
-class Storage(object):
- """Simple key value database for local unit state within charms.
-
- Modifications are not persisted unless :meth:`flush` is called.
-
- To support dicts, lists, integer, floats, and booleans values
- are automatically json encoded/decoded.
- """
- def __init__(self, path=None):
- self.db_path = path
- if path is None:
- if 'UNIT_STATE_DB' in os.environ:
- self.db_path = os.environ['UNIT_STATE_DB']
- else:
- self.db_path = os.path.join(
- os.environ.get('CHARM_DIR', ''), '.unit-state.db')
- self.conn = sqlite3.connect('%s' % self.db_path)
- self.cursor = self.conn.cursor()
- self.revision = None
- self._closed = False
- self._init()
-
- def close(self):
- if self._closed:
- return
- self.flush(False)
- self.cursor.close()
- self.conn.close()
- self._closed = True
-
- def get(self, key, default=None, record=False):
- self.cursor.execute('select data from kv where key=?', [key])
- result = self.cursor.fetchone()
- if not result:
- return default
- if record:
- return Record(json.loads(result[0]))
- return json.loads(result[0])
-
- def getrange(self, key_prefix, strip=False):
- """
- Get a range of keys starting with a common prefix as a mapping of
- keys to values.
-
- :param str key_prefix: Common prefix among all keys
- :param bool strip: Optionally strip the common prefix from the key
- names in the returned dict
- :return dict: A (possibly empty) dict of key-value mappings
- """
- self.cursor.execute("select key, data from kv where key like ?",
- ['%s%%' % key_prefix])
- result = self.cursor.fetchall()
-
- if not result:
- return {}
- if not strip:
- key_prefix = ''
- return dict([
- (k[len(key_prefix):], json.loads(v)) for k, v in result])
-
- def update(self, mapping, prefix=""):
- """
- Set the values of multiple keys at once.
-
- :param dict mapping: Mapping of keys to values
- :param str prefix: Optional prefix to apply to all keys in `mapping`
- before setting
- """
- for k, v in mapping.items():
- self.set("%s%s" % (prefix, k), v)
-
- def unset(self, key):
- """
- Remove a key from the database entirely.
- """
- self.cursor.execute('delete from kv where key=?', [key])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- [key, self.revision, json.dumps('DELETED')])
-
- def unsetrange(self, keys=None, prefix=""):
- """
- Remove a range of keys starting with a common prefix, from the database
- entirely.
-
- :param list keys: List of keys to remove.
- :param str prefix: Optional prefix to apply to all keys in ``keys``
- before removing.
- """
- if keys is not None:
- keys = ['%s%s' % (prefix, key) for key in keys]
- self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
- list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
- else:
- self.cursor.execute('delete from kv where key like ?',
- ['%s%%' % prefix])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
-
- def set(self, key, value):
- """
- Set a value in the database.
-
- :param str key: Key to set the value for
- :param value: Any JSON-serializable value to be set
- """
- serialized = json.dumps(value)
-
- self.cursor.execute('select data from kv where key=?', [key])
- exists = self.cursor.fetchone()
-
- # Skip mutations to the same value
- if exists:
- if exists[0] == serialized:
- return value
-
- if not exists:
- self.cursor.execute(
- 'insert into kv (key, data) values (?, ?)',
- (key, serialized))
- else:
- self.cursor.execute('''
- update kv
- set data = ?
- where key = ?''', [serialized, key])
-
- # Save
- if not self.revision:
- return value
-
- self.cursor.execute(
- 'select 1 from kv_revisions where key=? and revision=?',
- [key, self.revision])
- exists = self.cursor.fetchone()
-
- if not exists:
- self.cursor.execute(
- '''insert into kv_revisions (
- revision, key, data) values (?, ?, ?)''',
- (self.revision, key, serialized))
- else:
- self.cursor.execute(
- '''
- update kv_revisions
- set data = ?
- where key = ?
- and revision = ?''',
- [serialized, key, self.revision])
-
- return value
-
- def delta(self, mapping, prefix):
- """
- return a delta containing values that have changed.
- """
- previous = self.getrange(prefix, strip=True)
- if not previous:
- pk = set()
- else:
- pk = set(previous.keys())
- ck = set(mapping.keys())
- delta = DeltaSet()
-
- # added
- for k in ck.difference(pk):
- delta[k] = Delta(None, mapping[k])
-
- # removed
- for k in pk.difference(ck):
- delta[k] = Delta(previous[k], None)
-
- # changed
- for k in pk.intersection(ck):
- c = mapping[k]
- p = previous[k]
- if c != p:
- delta[k] = Delta(p, c)
-
- return delta
-
- @contextlib.contextmanager
- def hook_scope(self, name=""):
- """Scope all future interactions to the current hook execution
- revision."""
- assert not self.revision
- self.cursor.execute(
- 'insert into hooks (hook, date) values (?, ?)',
- (name or sys.argv[0],
- datetime.datetime.utcnow().isoformat()))
- self.revision = self.cursor.lastrowid
- try:
- yield self.revision
- self.revision = None
- except:
- self.flush(False)
- self.revision = None
- raise
- else:
- self.flush()
-
- def flush(self, save=True):
- if save:
- self.conn.commit()
- elif self._closed:
- return
- else:
- self.conn.rollback()
-
- def _init(self):
- self.cursor.execute('''
- create table if not exists kv (
- key text,
- data text,
- primary key (key)
- )''')
- self.cursor.execute('''
- create table if not exists kv_revisions (
- key text,
- revision integer,
- data text,
- primary key (key, revision)
- )''')
- self.cursor.execute('''
- create table if not exists hooks (
- version integer primary key autoincrement,
- hook text,
- date text
- )''')
- self.conn.commit()
-
- def gethistory(self, key, deserialize=False):
- self.cursor.execute(
- '''
- select kv.revision, kv.key, kv.data, h.hook, h.date
- from kv_revisions kv,
- hooks h
- where kv.key=?
- and kv.revision = h.version
- ''', [key])
- if deserialize is False:
- return self.cursor.fetchall()
- return map(_parse_history, self.cursor.fetchall())
-
- def debug(self, fh=sys.stderr):
- self.cursor.execute('select * from kv')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
- self.cursor.execute('select * from kv_revisions')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
-
-
-def _parse_history(d):
- return (d[0], d[1], json.loads(d[2]), d[3],
- datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
-
-
-class HookData(object):
- """Simple integration for existing hook exec frameworks.
-
- Records all unit information, and stores deltas for processing
- by the hook.
-
- Sample::
-
- from charmhelper.core import hookenv, unitdata
-
- changes = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # View all changes to configuration
- for changed, (prev, cur) in changes.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- if __name__ == '__main__':
- with changes():
- hook.execute()
-
- """
- def __init__(self):
- self.kv = kv()
- self.conf = None
- self.rels = None
-
- @contextlib.contextmanager
- def __call__(self):
- from charmhelpers.core import hookenv
- hook_name = hookenv.hook_name()
-
- with self.kv.hook_scope(hook_name):
- self._record_charm_version(hookenv.charm_dir())
- delta_config, delta_relation = self._record_hook(hookenv)
- yield self.kv, delta_config, delta_relation
-
- def _record_charm_version(self, charm_dir):
- # Record revisions.. charm revisions are meaningless
- # to charm authors as they don't control the revision.
- # so logic dependnent on revision is not particularly
- # useful, however it is useful for debugging analysis.
- charm_rev = open(
- os.path.join(charm_dir, 'revision')).read().strip()
- charm_rev = charm_rev or '0'
- revs = self.kv.get('charm_revisions', [])
- if charm_rev not in revs:
- revs.append(charm_rev.strip() or '0')
- self.kv.set('charm_revisions', revs)
-
- def _record_hook(self, hookenv):
- data = hookenv.execution_environment()
- self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
- self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
- self.kv.set('env', dict(data['env']))
- self.kv.set('unit', data['unit'])
- self.kv.set('relid', data.get('relid'))
- return conf_delta, rels_delta
-
-
-class Record(dict):
-
- __slots__ = ()
-
- def __getattr__(self, k):
- if k in self:
- return self[k]
- raise AttributeError(k)
-
-
-class DeltaSet(Record):
-
- __slots__ = ()
-
-
-Delta = collections.namedtuple('Delta', ['previous', 'current'])
-
-
-_KV = None
-
-
-def kv():
- global _KV
- if _KV is None:
- _KV = Storage()
- return _KV
diff --git a/charms/trusty/ceilometer/charmhelpers/fetch/__init__.py b/charms/trusty/ceilometer/charmhelpers/fetch/__init__.py
deleted file mode 100644
index db0d86a..0000000
--- a/charms/trusty/ceilometer/charmhelpers/fetch/__init__.py
+++ /dev/null
@@ -1,464 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import importlib
-from tempfile import NamedTemporaryFile
-import time
-from yaml import safe_load
-from charmhelpers.core.host import (
- lsb_release
-)
-import subprocess
-from charmhelpers.core.hookenv import (
- config,
- log,
-)
-import os
-
-import six
-if six.PY3:
- from urllib.parse import urlparse, urlunparse
-else:
- from urlparse import urlparse, urlunparse
-
-
-CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
-deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
-"""
-PROPOSED_POCKET = """# Proposed
-deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
-"""
-CLOUD_ARCHIVE_POCKETS = {
- # Folsom
- 'folsom': 'precise-updates/folsom',
- 'precise-folsom': 'precise-updates/folsom',
- 'precise-folsom/updates': 'precise-updates/folsom',
- 'precise-updates/folsom': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'precise-folsom/proposed': 'precise-proposed/folsom',
- 'precise-proposed/folsom': 'precise-proposed/folsom',
- # Grizzly
- 'grizzly': 'precise-updates/grizzly',
- 'precise-grizzly': 'precise-updates/grizzly',
- 'precise-grizzly/updates': 'precise-updates/grizzly',
- 'precise-updates/grizzly': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-proposed/grizzly': 'precise-proposed/grizzly',
- # Havana
- 'havana': 'precise-updates/havana',
- 'precise-havana': 'precise-updates/havana',
- 'precise-havana/updates': 'precise-updates/havana',
- 'precise-updates/havana': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'precise-havana/proposed': 'precise-proposed/havana',
- 'precise-proposed/havana': 'precise-proposed/havana',
- # Icehouse
- 'icehouse': 'precise-updates/icehouse',
- 'precise-icehouse': 'precise-updates/icehouse',
- 'precise-icehouse/updates': 'precise-updates/icehouse',
- 'precise-updates/icehouse': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-proposed/icehouse': 'precise-proposed/icehouse',
- # Juno
- 'juno': 'trusty-updates/juno',
- 'trusty-juno': 'trusty-updates/juno',
- 'trusty-juno/updates': 'trusty-updates/juno',
- 'trusty-updates/juno': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'trusty-juno/proposed': 'trusty-proposed/juno',
- 'trusty-proposed/juno': 'trusty-proposed/juno',
- # Kilo
- 'kilo': 'trusty-updates/kilo',
- 'trusty-kilo': 'trusty-updates/kilo',
- 'trusty-kilo/updates': 'trusty-updates/kilo',
- 'trusty-updates/kilo': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-proposed/kilo': 'trusty-proposed/kilo',
- # Liberty
- 'liberty': 'trusty-updates/liberty',
- 'trusty-liberty': 'trusty-updates/liberty',
- 'trusty-liberty/updates': 'trusty-updates/liberty',
- 'trusty-updates/liberty': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-proposed/liberty': 'trusty-proposed/liberty',
- # Mitaka
- 'mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka/updates': 'trusty-updates/mitaka',
- 'trusty-updates/mitaka': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
-}
-
-# The order of this list is very important. Handlers should be listed in from
-# least- to most-specific URL matching.
-FETCH_HANDLERS = (
- 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
- 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
- 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
-)
-
-APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
-APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
-APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
-
-
-class SourceConfigError(Exception):
- pass
-
-
-class UnhandledSource(Exception):
- pass
-
-
-class AptLockError(Exception):
- pass
-
-
-class BaseFetchHandler(object):
-
- """Base class for FetchHandler implementations in fetch plugins"""
-
- def can_handle(self, source):
- """Returns True if the source can be handled. Otherwise returns
- a string explaining why it cannot"""
- return "Wrong source type"
-
- def install(self, source):
- """Try to download and unpack the source. Return the path to the
- unpacked files or raise UnhandledSource."""
- raise UnhandledSource("Wrong source type {}".format(source))
-
- def parse_url(self, url):
- return urlparse(url)
-
- def base_url(self, url):
- """Return url without querystring or fragment"""
- parts = list(self.parse_url(url))
- parts[4:] = ['' for i in parts[4:]]
- return urlunparse(parts)
-
-
-def filter_installed_packages(packages):
- """Returns a list of packages that require installation"""
- cache = apt_cache()
- _pkgs = []
- for package in packages:
- try:
- p = cache[package]
- p.current_ver or _pkgs.append(package)
- except KeyError:
- log('Package {} has no installation candidate.'.format(package),
- level='WARNING')
- _pkgs.append(package)
- return _pkgs
-
-
-def apt_cache(in_memory=True):
- """Build and return an apt cache"""
- from apt import apt_pkg
- apt_pkg.init()
- if in_memory:
- apt_pkg.config.set("Dir::Cache::pkgcache", "")
- apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
- return apt_pkg.Cache()
-
-
-def apt_install(packages, options=None, fatal=False):
- """Install one or more packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- cmd.append('install')
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Installing {} with options: {}".format(packages,
- options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_upgrade(options=None, fatal=False, dist=False):
- """Upgrade all packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- if dist:
- cmd.append('dist-upgrade')
- else:
- cmd.append('upgrade')
- log("Upgrading with options: {}".format(options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_update(fatal=False):
- """Update local apt cache"""
- cmd = ['apt-get', 'update']
- _run_apt_command(cmd, fatal)
-
-
-def apt_purge(packages, fatal=False):
- """Purge one or more packages"""
- cmd = ['apt-get', '--assume-yes', 'purge']
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Purging {}".format(packages))
- _run_apt_command(cmd, fatal)
-
-
-def apt_mark(packages, mark, fatal=False):
- """Flag one or more packages using apt-mark"""
- log("Marking {} as {}".format(packages, mark))
- cmd = ['apt-mark', mark]
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
-
- if fatal:
- subprocess.check_call(cmd, universal_newlines=True)
- else:
- subprocess.call(cmd, universal_newlines=True)
-
-
-def apt_hold(packages, fatal=False):
- return apt_mark(packages, 'hold', fatal=fatal)
-
-
-def apt_unhold(packages, fatal=False):
- return apt_mark(packages, 'unhold', fatal=fatal)
-
-
-def add_source(source, key=None):
- """Add a package source to this system.
-
- @param source: a URL or sources.list entry, as supported by
- add-apt-repository(1). Examples::
-
- ppa:charmers/example
- deb https://stub:key@private.example.com/ubuntu trusty main
-
- In addition:
- 'proposed:' may be used to enable the standard 'proposed'
- pocket for the release.
- 'cloud:' may be used to activate official cloud archive pockets,
- such as 'cloud:icehouse'
- 'distro' may be used as a noop
-
- @param key: A key to be added to the system's APT keyring and used
- to verify the signatures on packages. Ideally, this should be an
- ASCII format GPG public key including the block headers. A GPG key
- id may also be used, but be aware that only insecure protocols are
- available to retrieve the actual public key from a public keyserver
- placing your Juju environment at risk. ppa and cloud archive keys
- are securely added automtically, so sould not be provided.
- """
- if source is None:
- log('Source is not present. Skipping')
- return
-
- if (source.startswith('ppa:') or
- source.startswith('http') or
- source.startswith('deb ') or
- source.startswith('cloud-archive:')):
- subprocess.check_call(['add-apt-repository', '--yes', source])
- elif source.startswith('cloud:'):
- apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
- fatal=True)
- pocket = source.split(':')[-1]
- if pocket not in CLOUD_ARCHIVE_POCKETS:
- raise SourceConfigError(
- 'Unsupported cloud: source option %s' %
- pocket)
- actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
- apt.write(CLOUD_ARCHIVE.format(actual_pocket))
- elif source == 'proposed':
- release = lsb_release()['DISTRIB_CODENAME']
- with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
- apt.write(PROPOSED_POCKET.format(release))
- elif source == 'distro':
- pass
- else:
- log("Unknown source: {!r}".format(source))
-
- if key:
- if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
- with NamedTemporaryFile('w+') as key_file:
- key_file.write(key)
- key_file.flush()
- key_file.seek(0)
- subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
- else:
- # Note that hkp: is in no way a secure protocol. Using a
- # GPG key id is pointless from a security POV unless you
- # absolutely trust your network and DNS.
- subprocess.check_call(['apt-key', 'adv', '--keyserver',
- 'hkp://keyserver.ubuntu.com:80', '--recv',
- key])
-
-
-def configure_sources(update=False,
- sources_var='install_sources',
- keys_var='install_keys'):
- """
- Configure multiple sources from charm configuration.
-
- The lists are encoded as yaml fragments in the configuration.
- The frament needs to be included as a string. Sources and their
- corresponding keys are of the types supported by add_source().
-
- Example config:
- install_sources: |
- - "ppa:foo"
- - "http://example.com/repo precise main"
- install_keys: |
- - null
- - "a1b2c3d4"
-
- Note that 'null' (a.k.a. None) should not be quoted.
- """
- sources = safe_load((config(sources_var) or '').strip()) or []
- keys = safe_load((config(keys_var) or '').strip()) or None
-
- if isinstance(sources, six.string_types):
- sources = [sources]
-
- if keys is None:
- for source in sources:
- add_source(source, None)
- else:
- if isinstance(keys, six.string_types):
- keys = [keys]
-
- if len(sources) != len(keys):
- raise SourceConfigError(
- 'Install sources and keys lists are different lengths')
- for source, key in zip(sources, keys):
- add_source(source, key)
- if update:
- apt_update(fatal=True)
-
-
-def install_remote(source, *args, **kwargs):
- """
- Install a file tree from a remote source
-
- The specified source should be a url of the form:
- scheme://[host]/path[#[option=value][&...]]
-
- Schemes supported are based on this modules submodules.
- Options supported are submodule-specific.
- Additional arguments are passed through to the submodule.
-
- For example::
-
- dest = install_remote('http://example.com/archive.tgz',
- checksum='deadbeef',
- hash_type='sha1')
-
- This will download `archive.tgz`, validate it using SHA1 and, if
- the file is ok, extract it and return the directory in which it
- was extracted. If the checksum fails, it will raise
- :class:`charmhelpers.core.host.ChecksumError`.
- """
- # We ONLY check for True here because can_handle may return a string
- # explaining why it can't handle a given source.
- handlers = [h for h in plugins() if h.can_handle(source) is True]
- installed_to = None
- for handler in handlers:
- try:
- installed_to = handler.install(source, *args, **kwargs)
- except UnhandledSource as e:
- log('Install source attempt unsuccessful: {}'.format(e),
- level='WARNING')
- if not installed_to:
- raise UnhandledSource("No handler found for source {}".format(source))
- return installed_to
-
-
-def install_from_config(config_var_name):
- charm_config = config()
- source = charm_config[config_var_name]
- return install_remote(source)
-
-
-def plugins(fetch_handlers=None):
- if not fetch_handlers:
- fetch_handlers = FETCH_HANDLERS
- plugin_list = []
- for handler_name in fetch_handlers:
- package, classname = handler_name.rsplit('.', 1)
- try:
- handler_class = getattr(
- importlib.import_module(package),
- classname)
- plugin_list.append(handler_class())
- except NotImplementedError:
- # Skip missing plugins so that they can be ommitted from
- # installation if desired
- log("FetchHandler {} not found, skipping plugin".format(
- handler_name))
- return plugin_list
-
-
-def _run_apt_command(cmd, fatal=False):
- """
- Run an APT command, checking output and retrying if the fatal flag is set
- to True.
-
- :param: cmd: str: The apt command to run.
- :param: fatal: bool: Whether the command's output should be checked and
- retried.
- """
- env = os.environ.copy()
-
- if 'DEBIAN_FRONTEND' not in env:
- env['DEBIAN_FRONTEND'] = 'noninteractive'
-
- if fatal:
- retry_count = 0
- result = None
-
- # If the command is considered "fatal", we need to retry if the apt
- # lock was not acquired.
-
- while result is None or result == APT_NO_LOCK:
- try:
- result = subprocess.check_call(cmd, env=env)
- except subprocess.CalledProcessError as e:
- retry_count = retry_count + 1
- if retry_count > APT_NO_LOCK_RETRY_COUNT:
- raise
- result = e.returncode
- log("Couldn't acquire DPKG lock. Will retry in {} seconds."
- "".format(APT_NO_LOCK_RETRY_DELAY))
- time.sleep(APT_NO_LOCK_RETRY_DELAY)
-
- else:
- subprocess.call(cmd, env=env)
diff --git a/charms/trusty/ceilometer/charmhelpers/fetch/archiveurl.py b/charms/trusty/ceilometer/charmhelpers/fetch/archiveurl.py
deleted file mode 100644
index b8e0943..0000000
--- a/charms/trusty/ceilometer/charmhelpers/fetch/archiveurl.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import hashlib
-import re
-
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.payload.archive import (
- get_archive_handler,
- extract,
-)
-from charmhelpers.core.host import mkdir, check_hash
-
-import six
-if six.PY3:
- from urllib.request import (
- build_opener, install_opener, urlopen, urlretrieve,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- )
- from urllib.parse import urlparse, urlunparse, parse_qs
- from urllib.error import URLError
-else:
- from urllib import urlretrieve
- from urllib2 import (
- build_opener, install_opener, urlopen,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- URLError
- )
- from urlparse import urlparse, urlunparse, parse_qs
-
-
-def splituser(host):
- '''urllib.splituser(), but six's support of this seems broken'''
- _userprog = re.compile('^(.*)@(.*)$')
- match = _userprog.match(host)
- if match:
- return match.group(1, 2)
- return None, host
-
-
-def splitpasswd(user):
- '''urllib.splitpasswd(), but six's support of this is missing'''
- _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
- match = _passwdprog.match(user)
- if match:
- return match.group(1, 2)
- return user, None
-
-
-class ArchiveUrlFetchHandler(BaseFetchHandler):
- """
- Handler to download archive files from arbitrary URLs.
-
- Can fetch from http, https, ftp, and file URLs.
-
- Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
-
- Installs the contents of the archive in $CHARM_DIR/fetched/.
- """
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
- # XXX: Why is this returning a boolean and a string? It's
- # doomed to fail since "bool(can_handle('foo://'))" will be True.
- return "Wrong source type"
- if get_archive_handler(self.base_url(source)):
- return True
- return False
-
- def download(self, source, dest):
- """
- Download an archive file.
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local path location to download archive file to.
- """
- # propogate all exceptions
- # URLError, OSError, etc
- proto, netloc, path, params, query, fragment = urlparse(source)
- if proto in ('http', 'https'):
- auth, barehost = splituser(netloc)
- if auth is not None:
- source = urlunparse((proto, barehost, path, params, query, fragment))
- username, password = splitpasswd(auth)
- passman = HTTPPasswordMgrWithDefaultRealm()
- # Realm is set to None in add_password to force the username and password
- # to be used whatever the realm
- passman.add_password(None, source, username, password)
- authhandler = HTTPBasicAuthHandler(passman)
- opener = build_opener(authhandler)
- install_opener(opener)
- response = urlopen(source)
- try:
- with open(dest, 'wb') as dest_file:
- dest_file.write(response.read())
- except Exception as e:
- if os.path.isfile(dest):
- os.unlink(dest)
- raise e
-
- # Mandatory file validation via Sha1 or MD5 hashing.
- def download_and_validate(self, url, hashsum, validate="sha1"):
- tempfile, headers = urlretrieve(url)
- check_hash(tempfile, hashsum, validate)
- return tempfile
-
- def install(self, source, dest=None, checksum=None, hash_type='sha1'):
- """
- Download and install an archive file, with optional checksum validation.
-
- The checksum can also be given on the `source` URL's fragment.
- For example::
-
- handler.install('http://example.com/file.tgz#sha1=deadbeef')
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local destination path to install to. If not given,
- installs to `$CHARM_DIR/archives/archive_file_name`.
- :param str checksum: If given, validate the archive file after download.
- :param str hash_type: Algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
-
- """
- url_parts = self.parse_url(source)
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
- try:
- self.download(source, dld_file)
- except URLError as e:
- raise UnhandledSource(e.reason)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- options = parse_qs(url_parts.fragment)
- for key, value in options.items():
- if not six.PY3:
- algorithms = hashlib.algorithms
- else:
- algorithms = hashlib.algorithms_available
- if key in algorithms:
- if len(value) != 1:
- raise TypeError(
- "Expected 1 hash value, not %d" % len(value))
- expected = value[0]
- check_hash(dld_file, expected, key)
- if checksum:
- check_hash(dld_file, checksum, hash_type)
- return extract(dld_file, dest)
diff --git a/charms/trusty/ceilometer/charmhelpers/fetch/bzrurl.py b/charms/trusty/ceilometer/charmhelpers/fetch/bzrurl.py
deleted file mode 100644
index cafd27f..0000000
--- a/charms/trusty/ceilometer/charmhelpers/fetch/bzrurl.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from subprocess import check_call
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource,
- filter_installed_packages,
- apt_install,
-)
-from charmhelpers.core.host import mkdir
-
-
-if filter_installed_packages(['bzr']) != []:
- apt_install(['bzr'])
- if filter_installed_packages(['bzr']) != []:
- raise NotImplementedError('Unable to install bzr')
-
-
-class BzrUrlFetchHandler(BaseFetchHandler):
- """Handler for bazaar branches via generic and lp URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
- return False
- elif not url_parts.scheme:
- return os.path.exists(os.path.join(source, '.bzr'))
- else:
- return True
-
- def branch(self, source, dest):
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
- if os.path.exists(dest):
- check_call(['bzr', 'pull', '--overwrite', '-d', dest, source])
- else:
- check_call(['bzr', 'branch', source, dest])
-
- def install(self, source, dest=None):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- if dest:
- dest_dir = os.path.join(dest, branch_name)
- else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
-
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- try:
- self.branch(source, dest_dir)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/ceilometer/charmhelpers/fetch/giturl.py b/charms/trusty/ceilometer/charmhelpers/fetch/giturl.py
deleted file mode 100644
index 65ed531..0000000
--- a/charms/trusty/ceilometer/charmhelpers/fetch/giturl.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from subprocess import check_call, CalledProcessError
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource,
- filter_installed_packages,
- apt_install,
-)
-
-if filter_installed_packages(['git']) != []:
- apt_install(['git'])
- if filter_installed_packages(['git']) != []:
- raise NotImplementedError('Unable to install git')
-
-
-class GitUrlFetchHandler(BaseFetchHandler):
- """Handler for git branches via generic and github URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- # TODO (mattyw) no support for ssh git@ yet
- if url_parts.scheme not in ('http', 'https', 'git', ''):
- return False
- elif not url_parts.scheme:
- return os.path.exists(os.path.join(source, '.git'))
- else:
- return True
-
- def clone(self, source, dest, branch="master", depth=None):
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
-
- if os.path.exists(dest):
- cmd = ['git', '-C', dest, 'pull', source, branch]
- else:
- cmd = ['git', 'clone', source, dest, '--branch', branch]
- if depth:
- cmd.extend(['--depth', depth])
- check_call(cmd)
-
- def install(self, source, branch="master", dest=None, depth=None):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- if dest:
- dest_dir = os.path.join(dest, branch_name)
- else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
- try:
- self.clone(source, dest_dir, branch, depth)
- except CalledProcessError as e:
- raise UnhandledSource(e)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/ceilometer/charmhelpers/payload/__init__.py b/charms/trusty/ceilometer/charmhelpers/payload/__init__.py
deleted file mode 100644
index e6f4249..0000000
--- a/charms/trusty/ceilometer/charmhelpers/payload/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"Tools for working with files injected into a charm just before deployment."
diff --git a/charms/trusty/ceilometer/charmhelpers/payload/execd.py b/charms/trusty/ceilometer/charmhelpers/payload/execd.py
deleted file mode 100644
index 4d4d81a..0000000
--- a/charms/trusty/ceilometer/charmhelpers/payload/execd.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import sys
-import subprocess
-from charmhelpers.core import hookenv
-
-
-def default_execd_dir():
- return os.path.join(os.environ['CHARM_DIR'], 'exec.d')
-
-
-def execd_module_paths(execd_dir=None):
- """Generate a list of full paths to modules within execd_dir."""
- if not execd_dir:
- execd_dir = default_execd_dir()
-
- if not os.path.exists(execd_dir):
- return
-
- for subpath in os.listdir(execd_dir):
- module = os.path.join(execd_dir, subpath)
- if os.path.isdir(module):
- yield module
-
-
-def execd_submodule_paths(command, execd_dir=None):
- """Generate a list of full paths to the specified command within exec_dir.
- """
- for module_path in execd_module_paths(execd_dir):
- path = os.path.join(module_path, command)
- if os.access(path, os.X_OK) and os.path.isfile(path):
- yield path
-
-
-def execd_run(command, execd_dir=None, die_on_error=False, stderr=None):
- """Run command for each module within execd_dir which defines it."""
- for submodule_path in execd_submodule_paths(command, execd_dir):
- try:
- subprocess.check_call(submodule_path, shell=True, stderr=stderr)
- except subprocess.CalledProcessError as e:
- hookenv.log("Error ({}) running {}. Output: {}".format(
- e.returncode, e.cmd, e.output))
- if die_on_error:
- sys.exit(e.returncode)
-
-
-def execd_preinstall(execd_dir=None):
- """Run charm-pre-install for each module within execd_dir."""
- execd_run('charm-pre-install', execd_dir=execd_dir)
diff --git a/charms/trusty/ceilometer/config.yaml b/charms/trusty/ceilometer/config.yaml
deleted file mode 100644
index e97ad1c..0000000
--- a/charms/trusty/ceilometer/config.yaml
+++ /dev/null
@@ -1,194 +0,0 @@
-options:
- debug:
- default: False
- type: boolean
- description: Enable debug logging.
- verbose:
- default: False
- type: boolean
- description: Enable verbose logging.
- use-syslog:
- type: boolean
- default: False
- description: |
- Setting this to True will allow supporting services to log to syslog.
- openstack-origin:
- default: distro
- type: string
- description: |
- Repository from which to install. May be one of the following:
- distro (default), ppa:somecustom/ppa, a deb url sources entry,
- or a supported Cloud Archive release pocket.
-
- Supported Cloud Archive sources include:
-
- cloud:<series>-<openstack-release>
- cloud:<series>-<openstack-release>/updates
- cloud:<series>-<openstack-release>/staging
- cloud:<series>-<openstack-release>/proposed
-
- For series=Precise we support cloud archives for openstack-release:
- * icehouse
-
- For series=Trusty we support cloud archives for openstack-release:
- * juno
- * kilo
- * ...
-
- NOTE: updating this setting to a source that is known to provide
- a later version of OpenStack will trigger a software upgrade.
-
- NOTE: when openstack-origin-git is specified, openstack specific
- packages will be installed from source rather than from the
- openstack-origin repository.
- region:
- default: RegionOne
- type: string
- description: OpenStack Region
- rabbit-user:
- default: ceilometer
- type: string
- description: Username to request access on rabbitmq-server.
- rabbit-vhost:
- default: openstack
- type: string
- description: RabbitMQ virtual host to request access on rabbitmq-server.
- ssl_cert:
- type: string
- default:
- description: |
- SSL certificate to install and use for API ports. Setting this value
- and ssl_key will enable reverse proxying, point Ceilometer's entry in the
- Keystone catalog to use https, and override any certficiate and key
- issued by Keystone (if it is configured to do so).
- ssl_key:
- type: string
- default:
- description: SSL key to use with certificate specified as ssl_cert.
- ssl_ca:
- type: string
- default:
- description: |
- SSL CA to use with the certificate and key provided - this is only
- required if you are providing a privately signed ssl_cert and ssl_key.
- nagios_context:
- default: "juju"
- type: string
- description: |
- Used by the nrpe-external-master subordinate charm.
- A string that will be prepended to instance name to set the host name
- in nagios. So for instance the hostname would be something like:
- juju-myservice-0
- If you're running multiple environments with the same services in them
- this allows you to differentiate between them.
- nagios_servicegroups:
- default: ""
- type: string
- description: |
- A comma-separated list of nagios servicegroups.
- If left empty, the nagios_context will be used as the servicegroup
- # Network configuration options
- # by default all access is over 'private-address'
- os-admin-network:
- type: string
- default:
- description: |
- The IP address and netmask of the OpenStack Admin network (e.g.
- 192.168.0.0/24)
-
- This network will be used for admin endpoints.
- os-internal-network:
- type: string
- default:
- description: |
- The IP address and netmask of the OpenStack Internal network (e.g.
- 192.168.0.0/24)
-
- This network will be used for internal endpoints.
- os-public-network:
- type: string
- default:
- description: |
- The IP address and netmask of the OpenStack Public network (e.g.
- 192.168.0.0/24)
-
- This network will be used for public endpoints.
- os-public-hostname:
- type: string
- default:
- description: |
- The hostname or address of the public endpoints created for ceilometer
- in the keystone identity provider.
-
- This value will be used for public endpoints. For example, an
- os-public-hostname set to 'ceilometer.example.com' with ssl enabled will
- create the following public endpoints for ceilometer:
-
- https://ceilometer.example.com:8777/
- # HA configuration settings
- vip:
- type: string
- default:
- description: |
- Virtual IP(s) to use to front API services in HA configuration.
-
- If multiple networks are being used, a VIP should be provided for each
- network, separated by spaces.
- ha-bindiface:
- type: string
- default: eth0
- description: |
- Default network interface on which HA cluster will bind to communication
- with the other members of the HA Cluster.
- ha-mcastport:
- type: int
- default: 5403
- description: |
- Default multicast port number that will be used to communicate between
- HA Cluster nodes.
- api-workers:
- type: int
- default: 1
- description: |
- Number of workers for Ceilometer API server. (>= Kilo).
- action-managed-upgrade:
- type: boolean
- default: False
- description: |
- If True enables openstack upgrades for this charm via juju actions.
- You will still need to set openstack-origin to the new repository but
- instead of an upgrade running automatically across all units, it will
- wait for you to execute the openstack-upgrade action for this charm on
- each unit. If False it will revert to existing behavior of upgrading
- all units on config change.
- haproxy-server-timeout:
- type: int
- default:
- description: |
- Server timeout configuration in ms for haproxy, used in HA
- configurations. If not provided, default value of 30000ms is used.
- haproxy-client-timeout:
- type: int
- default:
- description: |
- Client timeout configuration in ms for haproxy, used in HA
- configurations. If not provided, default value of 30000ms is used.
- haproxy-queue-timeout:
- type: int
- default:
- description: |
- Queue timeout configuration in ms for haproxy, used in HA
- configurations. If not provided, default value of 5000ms is used.
- haproxy-connect-timeout:
- type: int
- default:
- description: |
- Connect timeout configuration in ms for haproxy, used in HA
- configurations. If not provided, default value of 5000ms is used.
- harden:
- default:
- type: string
- description: |
- Apply system hardening. Supports a space-delimited list of modules
- to run. Supported modules currently include os, ssh, apache and mysql.
-
diff --git a/charms/trusty/ceilometer/copyright b/charms/trusty/ceilometer/copyright
deleted file mode 100644
index f65bac7..0000000
--- a/charms/trusty/ceilometer/copyright
+++ /dev/null
@@ -1,32 +0,0 @@
-Format: http://dep.debian.net/deps/dep5/
-
-Files: *
-Copyright: Copyright 2011, Canonical Ltd., All Rights Reserved.
-License: GPL-3
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
- .
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- .
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-Files: ocf/openstack/ceilometer-agent-central
-Copyright: Emilien Macchi
-License: Apache 2.0
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
diff --git a/charms/trusty/ceilometer/hardening.yaml b/charms/trusty/ceilometer/hardening.yaml
deleted file mode 100644
index 314bb38..0000000
--- a/charms/trusty/ceilometer/hardening.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-# Overrides file for contrib.hardening. See README.hardening in
-# contrib.hardening for info on how to use this file.
-ssh:
- server:
- use_pam: 'yes' # juju requires this
diff --git a/charms/trusty/ceilometer/hooks/amqp-relation-changed b/charms/trusty/ceilometer/hooks/amqp-relation-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/amqp-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/amqp-relation-departed b/charms/trusty/ceilometer/hooks/amqp-relation-departed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/amqp-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/amqp-relation-joined b/charms/trusty/ceilometer/hooks/amqp-relation-joined
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/amqp-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-broken b/charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-broken
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-changed b/charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-departed b/charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-departed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/ceilometer-service-relation-joined b/charms/trusty/ceilometer/hooks/ceilometer-service-relation-joined
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/ceilometer-service-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/ceilometer_contexts.py b/charms/trusty/ceilometer/hooks/ceilometer_contexts.py
deleted file mode 120000
index 6c03421..0000000
--- a/charms/trusty/ceilometer/hooks/ceilometer_contexts.py
+++ /dev/null
@@ -1 +0,0 @@
-../lib/ceilometer_contexts.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/ceilometer_hooks.py b/charms/trusty/ceilometer/hooks/ceilometer_hooks.py
deleted file mode 100755
index 1fd861c..0000000
--- a/charms/trusty/ceilometer/hooks/ceilometer_hooks.py
+++ /dev/null
@@ -1,366 +0,0 @@
-#!/usr/bin/python
-import base64
-import shutil
-import subprocess
-import sys
-import os
-
-from charmhelpers.fetch import (
- apt_install, filter_installed_packages,
- apt_update
-)
-from charmhelpers.core.hookenv import (
- open_port,
- relation_get,
- relation_set,
- relation_ids,
- related_units,
- config,
- Hooks, UnregisteredHookError,
- log,
- status_set,
-)
-from charmhelpers.core.host import (
- service_restart,
- lsb_release
-)
-from charmhelpers.contrib.openstack.utils import (
- configure_installation_source,
- openstack_upgrade_available,
- pausable_restart_on_change as restart_on_change,
- is_unit_paused_set,
-)
-from ceilometer_utils import (
- get_packages,
- CEILOMETER_DB,
- CEILOMETER_SERVICE,
- CEILOMETER_ROLE,
- register_configs,
- restart_map,
- services,
- get_ceilometer_context,
- get_shared_secret,
- do_openstack_upgrade,
- set_shared_secret,
- assess_status,
- configure_pipeline,
-)
-from ceilometer_contexts import CEILOMETER_PORT
-from charmhelpers.contrib.openstack.ip import (
- canonical_url,
- PUBLIC, INTERNAL, ADMIN
-)
-from charmhelpers.contrib.charmsupport import nrpe
-from charmhelpers.contrib.network.ip import (
- get_iface_for_address,
- get_netmask_for_address
-)
-from charmhelpers.contrib.hahelpers.cluster import (
- get_hacluster_config,
- is_elected_leader
-)
-from charmhelpers.contrib.peerstorage import (
- peer_retrieve,
- peer_store,
-)
-from charmhelpers.payload.execd import execd_preinstall
-from charmhelpers.contrib.hardening.harden import harden
-
-hooks = Hooks()
-CONFIGS = register_configs()
-
-
-@hooks.hook('install.real')
-@harden()
-def install():
- execd_preinstall()
- origin = config('openstack-origin')
- if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and origin == 'distro'):
- origin = 'cloud:precise-grizzly'
- configure_installation_source(origin)
- packages = filter_installed_packages(get_packages())
- if packages:
- status_set('maintenance', 'Installing packages')
- apt_update(fatal=True)
- apt_install(packages, fatal=True)
- open_port(CEILOMETER_PORT)
-
-
-@hooks.hook("amqp-relation-joined")
-def amqp_joined():
- relation_set(username=config('rabbit-user'),
- vhost=config('rabbit-vhost'))
-
-
-@hooks.hook("shared-db-relation-joined")
-def db_joined():
- relation_set(ceilometer_database=CEILOMETER_DB)
-
-
-@hooks.hook("amqp-relation-changed",
- "shared-db-relation-changed",
- "shared-db-relation-departed")
-@restart_on_change(restart_map())
-def any_changed():
- CONFIGS.write_all()
- configure_https()
- ceilometer_joined()
-
-
-@hooks.hook("identity-service-relation-changed")
-@restart_on_change(restart_map())
-def identity_service_relation_changed():
- CONFIGS.write_all()
- configure_https()
- keystone_joined()
- ceilometer_joined()
-
-
-@hooks.hook("amqp-relation-departed")
-@restart_on_change(restart_map())
-def amqp_departed():
- if 'amqp' not in CONFIGS.complete_contexts():
- log('amqp relation incomplete. Peer not ready?')
- return
- CONFIGS.write_all()
-
-
-def configure_https():
- """Enables SSL API Apache config if appropriate."""
- # need to write all to ensure changes to the entire request pipeline
- # propagate (c-api, haprxy, apache)
- CONFIGS.write_all()
- if 'https' in CONFIGS.complete_contexts():
- cmd = ['a2ensite', 'openstack_https_frontend']
- subprocess.check_call(cmd)
- else:
- cmd = ['a2dissite', 'openstack_https_frontend']
- subprocess.check_call(cmd)
-
- # TODO: improve this by checking if local CN certs are available
- # first then checking reload status (see LP #1433114).
- if not is_unit_paused_set():
- try:
- subprocess.check_call(['service', 'apache2', 'reload'])
- except subprocess.CalledProcessError:
- subprocess.call(['service', 'apache2', 'restart'])
-
-
-@hooks.hook('config-changed')
-@restart_on_change(restart_map())
-@harden()
-def config_changed():
- if not config('action-managed-upgrade'):
- if openstack_upgrade_available('ceilometer-common'):
- status_set('maintenance', 'Upgrading to new OpenStack release')
- do_openstack_upgrade(CONFIGS)
- update_nrpe_config()
- CONFIGS.write_all()
- ceilometer_joined()
- configure_https()
- for rid in relation_ids('identity-service'):
- keystone_joined(relid=rid)
-
-
-@hooks.hook('upgrade-charm')
-@harden()
-def upgrade_charm():
- install()
- update_nrpe_config()
- any_changed()
-
-
-def install_ceilometer_ocf():
- dest_file = "/usr/lib/ocf/resource.d/openstack/ceilometer-agent-central"
- src_file = 'ocf/openstack/ceilometer-agent-central'
-
- if not os.path.isdir(os.path.dirname(dest_file)):
- os.makedirs(os.path.dirname(dest_file))
- if not os.path.exists(dest_file):
- shutil.copy(src_file, dest_file)
-
-
-@hooks.hook('cluster-relation-joined')
-@restart_on_change(restart_map(), stopstart=True)
-def cluster_joined():
- install_ceilometer_ocf()
-
- # If this node is the elected leader then share our secret with other nodes
- if is_elected_leader('grp_ceilometer_vips'):
- peer_store('shared_secret', get_shared_secret())
-
- CONFIGS.write_all()
-
-
-@hooks.hook('cluster-relation-changed',
- 'cluster-relation-departed')
-@restart_on_change(restart_map(), stopstart=True)
-def cluster_changed():
- shared_secret = peer_retrieve('shared_secret')
- if shared_secret is None or shared_secret.strip() == '':
- log('waiting for shared secret to be provided by leader')
- elif not shared_secret == get_shared_secret():
- set_shared_secret(shared_secret)
-
- CONFIGS.write_all()
-
-
-@hooks.hook('ha-relation-joined')
-def ha_joined():
- cluster_config = get_hacluster_config()
-
- resources = {
- 'res_ceilometer_haproxy': 'lsb:haproxy',
- 'res_ceilometer_agent_central': ('ocf:openstack:'
- 'ceilometer-agent-central')
- }
-
- resource_params = {
- 'res_ceilometer_haproxy': 'op monitor interval="5s"',
- 'res_ceilometer_agent_central': 'op monitor interval="30s"'
- }
-
- amqp_ssl_port = None
- for rel_id in relation_ids('amqp'):
- for unit in related_units(rel_id):
- amqp_ssl_port = relation_get('ssl_port', unit, rel_id)
-
- if amqp_ssl_port:
- params = ('params amqp_server_port="%s" op monitor interval="30s"' %
- (amqp_ssl_port))
- resource_params['res_ceilometer_agent_central'] = params
-
- vip_group = []
- for vip in cluster_config['vip'].split():
- res_ceilometer_vip = 'ocf:heartbeat:IPaddr2'
- vip_params = 'ip'
-
- iface = get_iface_for_address(vip)
- if iface is not None:
- vip_key = 'res_ceilometer_{}_vip'.format(iface)
- resources[vip_key] = res_ceilometer_vip
- resource_params[vip_key] = (
- 'params {ip}="{vip}" cidr_netmask="{netmask}"'
- ' nic="{iface}"'.format(ip=vip_params,
- vip=vip,
- iface=iface,
- netmask=get_netmask_for_address(vip))
- )
- vip_group.append(vip_key)
-
- if len(vip_group) >= 1:
- relation_set(groups={'grp_ceilometer_vips': ' '.join(vip_group)})
-
- init_services = {
- 'res_ceilometer_haproxy': 'haproxy'
- }
- clones = {
- 'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'
- }
- relation_set(init_services=init_services,
- corosync_bindiface=cluster_config['ha-bindiface'],
- corosync_mcastport=cluster_config['ha-mcastport'],
- resources=resources,
- resource_params=resource_params,
- clones=clones)
-
-
-@hooks.hook('ha-relation-changed')
-def ha_changed():
- clustered = relation_get('clustered')
- if not clustered or clustered in [None, 'None', '']:
- log('ha_changed: hacluster subordinate not fully clustered.')
- else:
- log('Cluster configured, notifying other services and updating '
- 'keystone endpoint configuration')
- for rid in relation_ids('identity-service'):
- keystone_joined(relid=rid)
-
-
-@hooks.hook("identity-service-relation-joined")
-def keystone_joined(relid=None):
- public_url = "{}:{}".format(
- canonical_url(CONFIGS, PUBLIC),
- CEILOMETER_PORT
- )
- admin_url = "{}:{}".format(
- canonical_url(CONFIGS, ADMIN),
- CEILOMETER_PORT
- )
- internal_url = "{}:{}".format(
- canonical_url(CONFIGS, INTERNAL),
- CEILOMETER_PORT
- )
- region = config("region")
- relation_set(relation_id=relid,
- service=CEILOMETER_SERVICE,
- public_url=public_url,
- admin_url=admin_url,
- internal_url=internal_url,
- requested_roles=CEILOMETER_ROLE,
- region=region)
-
-
-@hooks.hook('identity-notifications-relation-changed')
-def identity_notifications_changed():
- """Receive notifications from keystone."""
- notifications = relation_get()
- if not notifications:
- return
-
- # Some ceilometer services will create a client and request
- # the service catalog from keystone on startup. So if
- # endpoints change we need to restart these services.
- key = '%s-endpoint-changed' % (CEILOMETER_SERVICE)
- if key in notifications:
- service_restart('ceilometer-alarm-evaluator')
- service_restart('ceilometer-alarm-notifier')
-
-
-@hooks.hook("ceilometer-service-relation-joined")
-def ceilometer_joined():
- # Pass local context data onto related agent services
- context = get_ceilometer_context()
- # This value gets tranformed to a path by the context we need to
- # pass the data to agents.
- if 'rabbit_ssl_ca' in context:
- with open(context['rabbit_ssl_ca']) as fh:
- context['rabbit_ssl_ca'] = base64.b64encode(fh.read())
- for relid in relation_ids('ceilometer-service'):
- relation_set(relid, context)
-
-
-@hooks.hook('nrpe-external-master-relation-joined',
- 'nrpe-external-master-relation-changed')
-def update_nrpe_config():
- # python-dbus is used by check_upstart_job
- apt_install('python-dbus')
- hostname = nrpe.get_nagios_hostname()
- current_unit = nrpe.get_nagios_unit_name()
- nrpe_setup = nrpe.NRPE(hostname=hostname)
- nrpe.copy_nrpe_checks()
- nrpe.add_init_service_checks(nrpe_setup, services(), current_unit)
- nrpe.add_haproxy_checks(nrpe_setup, current_unit)
- nrpe_setup.write()
-
-
-@hooks.hook('update-status')
-@harden()
-def update_status():
- log('Updating status.')
-
-
-@hooks.hook('ceilometer-plugin-relation-changed')
-@hooks.hook('ceilometer-plugin-relation-departed')
-@hooks.hook('ceilometer-plugin-relation-broken')
-@restart_on_change(restart_map())
-def ceilometer_plugin_relation():
- configure_pipeline()
-
-
-if __name__ == '__main__':
- try:
- hooks.execute(sys.argv)
- except UnregisteredHookError as e:
- log('Unknown hook {} - skipping.'.format(e))
- assess_status(CONFIGS)
diff --git a/charms/trusty/ceilometer/hooks/ceilometer_utils.py b/charms/trusty/ceilometer/hooks/ceilometer_utils.py
deleted file mode 120000
index e333253..0000000
--- a/charms/trusty/ceilometer/hooks/ceilometer_utils.py
+++ /dev/null
@@ -1 +0,0 @@
-../lib/ceilometer_utils.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/charmhelpers b/charms/trusty/ceilometer/hooks/charmhelpers
deleted file mode 120000
index 702de73..0000000
--- a/charms/trusty/ceilometer/hooks/charmhelpers
+++ /dev/null
@@ -1 +0,0 @@
-../charmhelpers \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/cluster-relation-changed b/charms/trusty/ceilometer/hooks/cluster-relation-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/cluster-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/cluster-relation-departed b/charms/trusty/ceilometer/hooks/cluster-relation-departed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/cluster-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/cluster-relation-joined b/charms/trusty/ceilometer/hooks/cluster-relation-joined
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/cluster-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/config-changed b/charms/trusty/ceilometer/hooks/config-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/config-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/ha-relation-changed b/charms/trusty/ceilometer/hooks/ha-relation-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/ha-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/ha-relation-joined b/charms/trusty/ceilometer/hooks/ha-relation-joined
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/ha-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/identity-notifications-relation-changed b/charms/trusty/ceilometer/hooks/identity-notifications-relation-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/identity-notifications-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/identity-service-relation-changed b/charms/trusty/ceilometer/hooks/identity-service-relation-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/identity-service-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/identity-service-relation-joined b/charms/trusty/ceilometer/hooks/identity-service-relation-joined
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/identity-service-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/install b/charms/trusty/ceilometer/hooks/install
deleted file mode 100755
index 83a9d3c..0000000
--- a/charms/trusty/ceilometer/hooks/install
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-# Wrapper to deal with newer Ubuntu versions that don't have py2 installed
-# by default.
-
-declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml')
-
-check_and_install() {
- pkg="${1}-${2}"
- if ! dpkg -s ${pkg} 2>&1 > /dev/null; then
- apt-get -y install ${pkg}
- fi
-}
-
-PYTHON="python"
-
-for dep in ${DEPS[@]}; do
- check_and_install ${PYTHON} ${dep}
-done
-
-exec ./hooks/install.real
diff --git a/charms/trusty/ceilometer/hooks/install.real b/charms/trusty/ceilometer/hooks/install.real
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/install.real
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/nrpe-external-master-relation-changed b/charms/trusty/ceilometer/hooks/nrpe-external-master-relation-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/nrpe-external-master-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/nrpe-external-master-relation-joined b/charms/trusty/ceilometer/hooks/nrpe-external-master-relation-joined
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/nrpe-external-master-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/shared-db-relation-changed b/charms/trusty/ceilometer/hooks/shared-db-relation-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/shared-db-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/shared-db-relation-departed b/charms/trusty/ceilometer/hooks/shared-db-relation-departed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/shared-db-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/shared-db-relation-joined b/charms/trusty/ceilometer/hooks/shared-db-relation-joined
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/shared-db-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/start b/charms/trusty/ceilometer/hooks/start
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/start
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/stop b/charms/trusty/ceilometer/hooks/stop
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/stop
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/update-status b/charms/trusty/ceilometer/hooks/update-status
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/update-status
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/upgrade-charm b/charms/trusty/ceilometer/hooks/upgrade-charm
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/upgrade-charm
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/icon.svg b/charms/trusty/ceilometer/icon.svg
deleted file mode 100644
index 84de61c..0000000
--- a/charms/trusty/ceilometer/icon.svg
+++ /dev/null
@@ -1,717 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:xlink="http://www.w3.org/1999/xlink"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- sodipodi:docname="openstack-ceilometer.svg"
- inkscape:version="0.48+devel r12825"
- version="1.1"
- id="svg6517"
- height="96"
- width="96">
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="4.0745362"
- inkscape:cx="2.7214607"
- inkscape:cy="63.792857"
- inkscape:document-units="px"
- inkscape:current-layer="layer1"
- showgrid="false"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:window-width="1920"
- inkscape:window-height="1029"
- inkscape:window-x="0"
- inkscape:window-y="24"
- inkscape:window-maximized="1"
- showborder="true"
- showguides="false"
- inkscape:guide-bbox="true"
- inkscape:showpageshadow="false"
- inkscape:snap-global="true"
- inkscape:snap-bbox="true"
- inkscape:bbox-paths="true"
- inkscape:bbox-nodes="true"
- inkscape:snap-bbox-edge-midpoints="true"
- inkscape:snap-bbox-midpoints="true"
- inkscape:object-paths="true"
- inkscape:snap-intersection-paths="true"
- inkscape:object-nodes="true"
- inkscape:snap-smooth-nodes="true"
- inkscape:snap-midpoints="true"
- inkscape:snap-object-midpoints="true"
- inkscape:snap-center="true"
- inkscape:snap-grids="false"
- inkscape:snap-nodes="true"
- inkscape:snap-others="true">
- <inkscape:grid
- id="grid821"
- type="xygrid" />
- <sodipodi:guide
- id="guide823"
- position="18.34962,45.78585"
- orientation="1,0" />
- <sodipodi:guide
- id="guide827"
- position="78.02001,46.32673"
- orientation="1,0" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4184"
- position="65.586619,19.307"
- orientation="-0.087155743,0.9961947" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4188"
- position="62.756032,71.583147"
- orientation="-0.087155743,0.9961947" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4190"
- position="47.812194,78.049658"
- orientation="-0.087155743,0.9961947" />
- <sodipodi:guide
- id="guide4194"
- position="25.60516,42.21665"
- orientation="1,0" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4202"
- position="25.60516,42.070975"
- orientation="-0.087155743,0.9961947" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4204"
- position="25.60516,42.070975"
- orientation="-0.70710678,-0.70710678" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4242"
- position="51.81985,44.36226"
- orientation="-0.70710678,-0.70710678" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4252"
- position="73.5625,75.210937"
- orientation="-0.70710678,-0.70710678" />
- <sodipodi:guide
- inkscape:label=""
- inkscape:color="rgb(140,140,240)"
- id="guide4254"
- position="18.34962,75.472017"
- orientation="-0.70710678,-0.70710678" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4288"
- position="21.871042,21.577512"
- orientation="-0.70710678,-0.70710678" />
- </sodipodi:namedview>
- <defs
- id="defs6519">
- <filter
- id="filter1121"
- inkscape:label="Inner Shadow"
- style="color-interpolation-filters:sRGB;">
- <feFlood
- id="feFlood1123"
- result="flood"
- flood-color="rgb(0,0,0)"
- flood-opacity="0.59999999999999998" />
- <feComposite
- id="feComposite1125"
- result="composite1"
- operator="out"
- in2="SourceGraphic"
- in="flood" />
- <feGaussianBlur
- id="feGaussianBlur1127"
- result="blur"
- stdDeviation="1"
- in="composite1" />
- <feOffset
- id="feOffset1129"
- result="offset"
- dy="2"
- dx="0" />
- <feComposite
- id="feComposite1131"
- result="composite2"
- operator="atop"
- in2="SourceGraphic"
- in="offset" />
- </filter>
- <filter
- id="filter950"
- inkscape:label="Drop Shadow"
- style="color-interpolation-filters:sRGB;">
- <feFlood
- id="feFlood952"
- result="flood"
- flood-color="rgb(0,0,0)"
- flood-opacity="0.25" />
- <feComposite
- id="feComposite954"
- result="composite1"
- operator="in"
- in2="SourceGraphic"
- in="flood" />
- <feGaussianBlur
- id="feGaussianBlur956"
- result="blur"
- stdDeviation="1"
- in="composite1" />
- <feOffset
- id="feOffset958"
- result="offset"
- dy="1"
- dx="0" />
- <feComposite
- id="feComposite960"
- result="composite2"
- operator="over"
- in2="offset"
- in="SourceGraphic" />
- </filter>
- <filter
- inkscape:label="Badge Shadow"
- id="filter891"
- inkscape:collect="always">
- <feGaussianBlur
- id="feGaussianBlur893"
- stdDeviation="0.71999962"
- inkscape:collect="always" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter3831">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.86309522"
- id="feGaussianBlur3833" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter3868"
- x="-0.17186206"
- width="1.3437241"
- y="-0.1643077"
- height="1.3286154">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.62628186"
- id="feGaussianBlur3870" />
- </filter>
- <linearGradient
- id="linearGradient4328"
- inkscape:collect="always">
- <stop
- id="stop4330"
- offset="0"
- style="stop-color:#871f1c;stop-opacity:1;" />
- <stop
- id="stop4332"
- offset="1"
- style="stop-color:#651715;stop-opacity:1" />
- </linearGradient>
- <linearGradient
- id="linearGradient902"
- inkscape:collect="always">
- <stop
- id="stop904"
- offset="0"
- style="stop-color:#cccccc;stop-opacity:1" />
- <stop
- id="stop906"
- offset="1"
- style="stop-color:#e6e6e6;stop-opacity:1" />
- </linearGradient>
- <linearGradient
- id="Background">
- <stop
- style="stop-color:#22779e;stop-opacity:1"
- offset="0"
- id="stop4178" />
- <stop
- style="stop-color:#2991c0;stop-opacity:1"
- offset="1"
- id="stop4180" />
- </linearGradient>
- <clipPath
- id="clipPath873"
- clipPathUnits="userSpaceOnUse">
- <g
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
- inkscape:label="Layer 1"
- id="g875"
- transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)">
- <path
- sodipodi:nodetypes="sssssssss"
- inkscape:connector-curvature="0"
- id="path877"
- d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline" />
- </g>
- </clipPath>
- <style
- type="text/css"
- id="style867">
- .fil0 {fill:#1F1A17}
- </style>
- <linearGradient
- gradientUnits="userSpaceOnUse"
- y2="635.29077"
- x2="-220"
- y1="731.29077"
- x1="-220"
- id="linearGradient908"
- xlink:href="#linearGradient902"
- inkscape:collect="always" />
- <clipPath
- id="clipPath16">
- <path
- d="m -9,-9 614,0 0,231 -614,0 0,-231 z"
- id="path18" />
- </clipPath>
- <clipPath
- id="clipPath116">
- <path
- d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129"
- id="path118" />
- </clipPath>
- <clipPath
- id="clipPath128">
- <path
- d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129"
- id="path130" />
- </clipPath>
- <linearGradient
- inkscape:collect="always"
- id="linearGradient3850">
- <stop
- style="stop-color:#000000;stop-opacity:1;"
- offset="0"
- id="stop3852" />
- <stop
- style="stop-color:#000000;stop-opacity:0;"
- offset="1"
- id="stop3854" />
- </linearGradient>
- <clipPath
- id="clipPath3095"
- clipPathUnits="userSpaceOnUse">
- <path
- inkscape:connector-curvature="0"
- id="path3097"
- d="m 976.648,389.551 -842.402,0 0,839.999 842.402,0 0,-839.999" />
- </clipPath>
- <clipPath
- id="clipPath3195"
- clipPathUnits="userSpaceOnUse">
- <path
- inkscape:connector-curvature="0"
- id="path3197"
- d="m 611.836,756.738 -106.34,105.207 c -8.473,8.289 -13.617,20.102 -13.598,33.379 L 598.301,790.207 c -0.031,-13.418 5.094,-25.031 13.535,-33.469" />
- </clipPath>
- <clipPath
- id="clipPath3235"
- clipPathUnits="userSpaceOnUse">
- <path
- inkscape:connector-curvature="0"
- id="path3237"
- d="m 1095.64,1501.81 c 35.46,-35.07 70.89,-70.11 106.35,-105.17 4.4,-4.38 7.11,-10.53 7.11,-17.55 l -106.37,105.21 c 0,7 -2.71,13.11 -7.09,17.51" />
- </clipPath>
- <linearGradient
- inkscape:collect="always"
- id="linearGradient4389">
- <stop
- style="stop-color:#871f1c;stop-opacity:1"
- offset="0"
- id="stop4391" />
- <stop
- style="stop-color:#c42e24;stop-opacity:1"
- offset="1"
- id="stop4393" />
- </linearGradient>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath4591">
- <path
- id="path4593"
- style="fill:#ff00ff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 1106.6009,730.43734 -0.036,21.648 c -0.01,3.50825 -2.8675,6.61375 -6.4037,6.92525 l -83.6503,7.33162 c -3.5205,0.30763 -6.3812,-2.29987 -6.3671,-5.8145 l 0.036,-21.6475 20.1171,-1.76662 -0.011,4.63775 c 0,1.83937 1.4844,3.19925 3.3262,3.0395 l 49.5274,-4.33975 c 1.8425,-0.166 3.3425,-1.78125 3.3538,-3.626 l 0.01,-4.63025 20.1,-1.7575"
- inkscape:connector-curvature="0" />
- </clipPath>
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3850"
- id="radialGradient3856"
- cx="-26.508606"
- cy="93.399292"
- fx="-26.508606"
- fy="93.399292"
- r="20.40658"
- gradientTransform="matrix(-1.4333926,-2.2742838,1.1731823,-0.73941125,-174.08025,98.374394)"
- gradientUnits="userSpaceOnUse" />
- <filter
- inkscape:collect="always"
- id="filter3885">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="5.7442192"
- id="feGaussianBlur3887" />
- </filter>
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3850"
- id="linearGradient3895"
- x1="348.20132"
- y1="593.11615"
- x2="-51.879555"
- y2="993.19702"
- gradientUnits="userSpaceOnUse"
- gradientTransform="translate(-318.48033,212.32022)" />
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3850"
- id="radialGradient3902"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(-1.4333926,-2.2742838,1.1731823,-0.73941125,-174.08025,98.374394)"
- cx="-26.508606"
- cy="93.399292"
- fx="-26.508606"
- fy="93.399292"
- r="20.40658" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3850"
- id="linearGradient3904"
- gradientUnits="userSpaceOnUse"
- gradientTransform="translate(-318.48033,212.32022)"
- x1="348.20132"
- y1="593.11615"
- x2="-51.879555"
- y2="993.19702" />
- <linearGradient
- gradientUnits="userSpaceOnUse"
- y2="23.383789"
- x2="25.217773"
- y1="27.095703"
- x1="21.505859"
- id="linearGradient4318"
- xlink:href="#linearGradient4389"
- inkscape:collect="always" />
- <linearGradient
- gradientUnits="userSpaceOnUse"
- y2="20.884073"
- x2="71.960243"
- y1="20.041777"
- x1="72.802544"
- id="linearGradient4326"
- xlink:href="#linearGradient4389"
- inkscape:collect="always" />
- <linearGradient
- gradientUnits="userSpaceOnUse"
- y2="74.246689"
- x2="21.69179"
- y1="73.643555"
- x1="22.294922"
- id="linearGradient4334"
- xlink:href="#linearGradient4328"
- inkscape:collect="always" />
- <linearGradient
- gradientUnits="userSpaceOnUse"
- y2="24.881023"
- x2="57.450542"
- y1="77.404816"
- x1="57.450542"
- id="linearGradient4319"
- xlink:href="#linearGradient4552"
- inkscape:collect="always" />
- <linearGradient
- id="linearGradient4552"
- inkscape:collect="always">
- <stop
- id="stop4554"
- offset="0"
- style="stop-color:#d93023;stop-opacity:1" />
- <stop
- id="stop4556"
- offset="1"
- style="stop-color:#e63f46;stop-opacity:1" />
- </linearGradient>
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4389"
- id="linearGradient4444"
- x1="-228.90239"
- y1="694.04291"
- x2="-223.99701"
- y2="687.45367"
- gradientUnits="userSpaceOnUse" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4389"
- id="linearGradient4469"
- x1="-216.46823"
- y1="693.81781"
- x2="-210.73438"
- y2="687.75952"
- gradientUnits="userSpaceOnUse" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4389"
- id="linearGradient4479"
- x1="-206.06966"
- y1="682.03033"
- x2="-199.5918"
- y2="675.95483"
- gradientUnits="userSpaceOnUse" />
- </defs>
- <metadata
- id="metadata6522">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- style="display:inline"
- transform="translate(268,-635.29076)"
- id="layer1"
- inkscape:groupmode="layer"
- inkscape:label="BACKGROUND">
- <path
- sodipodi:nodetypes="sssssssss"
- inkscape:connector-curvature="0"
- id="path6455"
- d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 C -264.11215,731.29077 -268,727.39888 -268,700.15563 Z"
- style="fill:url(#linearGradient908);fill-opacity:1;stroke:none;display:inline;filter:url(#filter1121)" />
- <g
- id="g4336">
- <g
- transform="matrix(0.06790711,0,0,-0.06790711,-239.0411,765.68623)"
- id="g3897"
- xml:space="default">
- <path
- inkscape:connector-curvature="0"
- style="opacity:0.7;color:#000000;fill:url(#radialGradient3902);fill-opacity:1;stroke:none;stroke-width:2;marker:none;visibility:visible;display:inline;overflow:visible;filter:url(#filter3831);enable-background:accumulate"
- d="m -48.09375,67.8125 c -0.873996,-0.0028 -2.089735,0.01993 -3.40625,0.09375 -2.633031,0.147647 -5.700107,0.471759 -7.78125,1.53125 a 1.0001,1.0001 0 0 0 -0.25,1.59375 L -38.8125,92.375 a 1.0001,1.0001 0 0 0 0.84375,0.3125 L -24,90.5625 a 1.0001,1.0001 0 0 0 0.53125,-1.71875 L -46.0625,68.125 a 1.0001,1.0001 0 0 0 -0.625,-0.28125 c 0,0 -0.532254,-0.02842 -1.40625,-0.03125 z"
- transform="matrix(10.616011,0,0,-10.616011,357.98166,1725.8152)"
- id="path3821"
- xml:space="default" />
- <path
- style="opacity:0.6;color:#000000;fill:none;stroke:#000000;stroke-width:2.77429962;stroke-linecap:round;marker:none;visibility:visible;display:inline;overflow:visible;filter:url(#filter3868);enable-background:accumulate"
- d="m -15.782705,81.725197 8.7458304,9.147937"
- id="path3858"
- inkscape:connector-curvature="0"
- transform="matrix(10.616011,0,0,-10.616011,39.50133,1725.8152)"
- xml:space="default" />
- <path
- style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:0.3;color:#000000;fill:url(#linearGradient3904);fill-opacity:1;stroke:none;stroke-width:2;marker:none;visibility:visible;display:inline;overflow:visible;filter:url(#filter3885);enable-background:accumulate;font-family:Sans;-inkscape-font-specification:Sans"
- d="m -95.18931,981.03569 a 10.617073,10.617073 0 0 1 -0.995251,-0.3318 l -42.795789,-5.308 a 10.617073,10.617073 0 0 1 -6.30326,-17.9145 L -4.2897203,812.5065 a 10.617073,10.617073 0 0 1 8.95726,-3.3175 l 49.0990503,7.63026 a 10.617073,10.617073 0 0 1 5.97151,17.91452 L -87.55905,978.04989 a 10.617073,10.617073 0 0 1 -7.63026,2.9858 z"
- id="path3874"
- inkscape:connector-curvature="0"
- xml:space="default" />
- </g>
- <path
- style="opacity:1;color:#000000;fill:#871f1c;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:0.1;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="M 20.697266 20.515625 C 19.336871 21.10204 18.348875 22.456253 18.345703 23.970703 L 18.351562 58.322266 L 28.798828 49.138672 C 28.677618 48.755492 28.614281 48.351433 28.613281 47.939453 C 28.613261 46.832713 29.052994 45.731919 29.833984 44.880859 C 30.614994 44.029779 31.672894 43.497091 32.777344 43.400391 C 34.625174 43.240001 36.250631 44.319081 36.769531 46.050781 L 42.583984 46.052734 C 42.966392 45.246167 43.574155 44.582164 44.298828 44.115234 L 20.697266 20.515625 z M 36.501953 49.099609 C 35.800103 50.580079 34.357634 51.603391 32.777344 51.744141 C 32.038304 51.807991 31.313171 51.674389 30.675781 51.355469 L 18.351562 62.191406 L 18.353516 69.601562 C 18.349848 70.477025 18.685456 71.239319 19.222656 71.802734 L 19.212891 71.8125 L 19.357422 71.955078 C 19.360505 71.957909 19.364093 71.960073 19.367188 71.962891 L 26.660156 79.126953 L 33.488281 71.738281 L 67.28125 68.777344 C 67.744386 68.736546 68.184049 68.603285 68.587891 68.404297 L 49.771484 49.589844 C 49.024593 50.774607 47.754946 51.625037 46.310547 51.751953 C 44.461497 51.913663 42.833613 50.834232 42.314453 49.101562 L 36.501953 49.099609 z "
- transform="translate(-268,635.29076)"
- id="path4308" />
- <path
- style="opacity:1;color:#000000;fill:#c42e24;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:0.1;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="M 67.320312 16.253906 L 21.822266 20.212891 C 21.426436 20.248017 21.052174 20.362637 20.697266 20.515625 L 44.298828 44.115234 C 44.9049 43.724723 45.592393 43.470157 46.3125 43.40625 C 46.79566 43.36596 47.274906 43.410199 47.728516 43.537109 L 53.324219 36.660156 C 53.017769 36.094616 52.857922 35.452456 52.857422 34.785156 C 52.857752 32.480326 54.723287 30.446511 57.023438 30.244141 C 59.119587 30.062831 60.885597 31.472453 61.148438 33.533203 L 70.771484 35.117188 L 70.771484 38.248047 L 70.775391 31.386719 L 77.232422 24.398438 L 69.892578 17.179688 L 69.884766 17.189453 C 69.251763 16.542736 68.342666 16.171306 67.320312 16.253906 z M 70.771484 38.248047 L 60.412109 36.541016 C 59.630809 37.708426 58.367804 38.472897 57.021484 38.591797 C 56.537844 38.632787 56.057726 38.589411 55.603516 38.462891 L 50.007812 45.337891 C 50.314462 45.903801 50.474339 46.547144 50.474609 47.214844 C 50.474197 48.071259 50.213409 48.888836 49.771484 49.589844 L 68.587891 68.404297 C 69.859183 67.777881 70.75673 66.462035 70.759766 65.015625 L 70.771484 38.248047 z "
- transform="translate(-268,635.29076)"
- id="path4233" />
- <rect
- xml:space="default"
- y="648.49109"
- x="-258.70667"
- height="69.20665"
- width="69.20665"
- id="rect3585-3"
- style="opacity:0.8;color:#000000;fill:none;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" />
- <path
- style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:1;color:#000000;color-interpolation:sRGB;color-interpolation-filters:sRGB;fill:url(#linearGradient4318);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:5.25;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate;clip-rule:nonzero;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;font-family:sans-serif;-inkscape-font-specification:sans-serif"
- d="M 22.029297 20.195312 L 21.822266 20.212891 C 19.919838 20.381715 18.370776 22.043134 18.349609 23.939453 L 24.662109 30.251953 L 25.605469 31.195312 L 25.605469 31.103516 C 25.609469 29.193966 27.168951 27.515473 29.082031 27.345703 L 29.171875 27.337891 L 28.373047 26.539062 L 22.029297 20.195312 z "
- transform="translate(-268,635.29076)"
- id="path4256" />
- <path
- style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:0.53600003;color:#000000;color-interpolation:sRGB;color-interpolation-filters:sRGB;fill:url(#linearGradient4326);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:2.4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate;clip-rule:nonzero;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;font-family:sans-serif;-inkscape-font-specification:sans-serif;stroke-miterlimit:4;stroke-dasharray:none"
- d="M 67.330078 16.253906 L 68.03125 16.955078 L 74.472656 23.396484 L 74.580078 23.386719 C 75.531927 23.309814 76.390588 23.620657 77.015625 24.185547 L 69.892578 17.179688 L 69.884766 17.189453 C 69.253843 16.544862 68.348328 16.174551 67.330078 16.253906 z M 77.054688 24.222656 C 77.115589 24.279686 77.164628 24.348282 77.220703 24.410156 L 77.232422 24.398438 L 77.054688 24.222656 z "
- transform="translate(-268,635.29076)"
- id="path4272" />
- <path
- style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:1;color:#000000;color-interpolation:sRGB;color-interpolation-filters:sRGB;fill:url(#linearGradient4334);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:1.7;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate;clip-rule:nonzero;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;font-family:sans-serif;-inkscape-font-specification:sans-serif;stroke-miterlimit:4;stroke-dasharray:none"
- d="M 18.363281 69.712891 C 18.387957 70.540342 18.709001 71.264013 19.222656 71.802734 L 19.212891 71.8125 L 19.357422 71.955078 C 19.360505 71.957909 19.364093 71.960073 19.367188 71.962891 L 26.599609 79.068359 C 26.044831 78.550125 25.698241 77.821152 25.638672 76.988281 L 18.951172 70.298828 L 18.363281 69.712891 z M 26.636719 79.103516 L 26.660156 79.126953 L 26.664062 79.123047 C 26.655656 79.11562 26.645042 79.111033 26.636719 79.103516 z "
- transform="translate(-268,635.29076)"
- id="path4290" />
- <path
- style="opacity:1;color:#000000;fill:#96231e;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -249.52901,697.37716 7.14034,7.23587 12.32422,-10.83594 -7.25977,-7.13086 z"
- id="path4428"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:#871f1c;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -249.64844,693.61303 7.25977,7.13085 5.06445,-14.09765 -7.65515,-5.41781 z"
- id="path4426"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:#96231e;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -231.49805,684.39037 7.25977,7.13086 5.8125,0.002 -7.25977,-7.13086 z"
- id="path4430"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:url(#linearGradient4469);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -225.68555,684.39232 7.25977,7.13091 -0.51715,5.74927 8.04214,0.24126 4.42318,-7.15751 -4.25676,-2.59674 -7.25782,-7.13086 z"
- id="path4446"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:#9d241f;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -210.73437,687.75951 -7.25782,-7.13086 5.59571,-6.875 7.25976,7.13086 z"
- id="path4432"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:#9d241f;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -207.58789,671.83178 7.25781,7.13281 10.36133,1.70508 -7.25977,-7.13086 z"
- id="path4434"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:url(#linearGradient4444);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -231.49805,684.39037 7.25977,7.13086 -0.0125,5.42958 -5.81371,-3.17372 -7.25977,-7.13086 z"
- id="path4436"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:url(#linearGradient4479);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -205.13672,680.88451 -7.25976,-7.13086 4.80859,-1.92187 7.25781,7.13281 -0.59845,5.11879 z"
- id="path4471"
- inkscape:connector-curvature="0" />
- <path
- style="fill:url(#linearGradient4319);fill-opacity:1;fill-rule:nonzero;stroke:none;display:inline;opacity:1"
- d="M 74.580078 23.386719 L 29.082031 27.345703 C 27.168951 27.515473 25.609469 29.193966 25.605469 31.103516 L 25.611328 65.453125 L 36.058594 56.269531 C 35.937384 55.886351 35.874047 55.482292 35.873047 55.070312 C 35.873027 53.963572 36.31276 52.862779 37.09375 52.011719 C 37.87476 51.160639 38.932659 50.62795 40.037109 50.53125 C 41.884939 50.37086 43.510397 51.449941 44.029297 53.181641 L 49.84375 53.183594 C 50.54598 51.702464 51.989182 50.677429 53.570312 50.537109 C 54.053473 50.496819 54.532718 50.541059 54.986328 50.667969 L 60.583984 43.792969 C 60.277534 43.227429 60.117687 42.583316 60.117188 41.916016 C 60.117518 39.611186 61.983053 37.579323 64.283203 37.376953 C 66.379353 37.195643 68.145363 38.603313 68.408203 40.664062 L 78.03125 42.248047 L 78.03125 45.378906 L 78.041016 26.554688 C 78.044016 24.640387 76.496418 23.231889 74.580078 23.386719 z M 78.03125 45.378906 L 67.669922 43.673828 C 66.888622 44.841238 65.625617 45.603756 64.279297 45.722656 C 63.795657 45.763646 63.317491 45.72027 62.863281 45.59375 L 57.265625 52.46875 C 57.572275 53.03466 57.734105 53.678003 57.734375 54.345703 C 57.733265 56.649533 55.869342 58.680803 53.570312 58.882812 C 51.721263 59.044523 50.093379 57.965092 49.574219 56.232422 L 43.761719 56.230469 C 43.059869 57.710939 41.617399 58.736203 40.037109 58.876953 C 39.298069 58.940803 38.572937 58.805248 37.935547 58.486328 L 25.611328 69.322266 L 25.613281 76.734375 C 25.605281 78.643715 27.159736 80.061651 29.072266 79.894531 L 74.541016 75.910156 C 76.462106 75.740926 78.015531 74.054318 78.019531 72.148438 L 78.03125 45.378906 z "
- transform="translate(-268,635.29076)"
- id="path4633-5" />
- <path
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none;display:inline;opacity:0.3"
- d="M 74.580078 23.386719 L 29.082031 27.345703 C 27.168951 27.515473 25.609469 29.193966 25.605469 31.103516 L 25.605469 31.302734 C 25.609469 29.393184 27.168951 27.714692 29.082031 27.544922 L 74.580078 23.585938 C 76.495748 23.431162 78.042931 24.838676 78.041016 26.751953 L 78.041016 26.554688 C 78.044016 24.640387 76.496418 23.231889 74.580078 23.386719 z M 60.125 42.041016 C 60.124551 42.066189 60.117191 42.089993 60.117188 42.115234 C 60.117651 42.734094 60.260989 43.33044 60.525391 43.865234 L 60.583984 43.792969 C 60.296842 43.263061 60.144234 42.663329 60.125 42.041016 z M 67.669922 43.673828 C 66.888622 44.841238 65.625617 45.603756 64.279297 45.722656 C 63.795657 45.763646 63.317491 45.72027 62.863281 45.59375 L 57.265625 52.46875 C 57.287847 52.509761 57.303524 52.553899 57.324219 52.595703 L 62.863281 45.792969 C 63.317491 45.919879 63.795657 45.962875 64.279297 45.921875 C 65.625617 45.802975 66.888622 45.040457 67.669922 43.873047 L 78.03125 45.578125 L 78.03125 45.378906 L 67.669922 43.673828 z M 57.726562 54.419922 C 57.68528 56.692433 55.844165 58.683016 53.570312 58.882812 C 51.721263 59.044523 50.093379 57.965092 49.574219 56.232422 L 43.761719 56.230469 C 43.059869 57.710939 41.617399 58.736203 40.037109 58.876953 C 39.298069 58.940803 38.572937 58.805248 37.935547 58.486328 L 25.611328 69.322266 L 25.611328 69.521484 L 37.935547 58.685547 C 38.572937 59.004467 39.298069 59.140072 40.037109 59.076172 C 41.617399 58.935422 43.059869 57.910157 43.761719 56.429688 L 49.574219 56.431641 C 50.093379 58.164311 51.721262 59.243741 53.570312 59.082031 C 55.869343 58.880021 57.733375 56.848752 57.734375 54.544922 C 57.734358 54.502688 57.727868 54.461948 57.726562 54.419922 z M 35.880859 55.181641 C 35.880238 55.211108 35.873046 55.24005 35.873047 55.269531 C 35.873907 55.623997 35.929222 55.970202 36.019531 56.304688 L 36.058594 56.269531 C 35.948415 55.921224 35.892049 55.554719 35.880859 55.181641 z "
- transform="translate(-268,635.29076)"
- id="path4481" />
- </g>
- <path
- sodipodi:nodetypes="ccccc"
- xml:space="default"
- inkscape:connector-curvature="0"
- id="path4181"
- d="m -99.97999,710.89598 0.0188,-52.54136 -52.43365,4.60327 0,52.52379 z"
- style="color:#000000;fill:#ffff00;fill-opacity:0.47839511;fill-rule:nonzero;stroke:none;stroke-width:1.70000005;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" />
- <rect
- xml:space="default"
- style="opacity:0.8;color:#000000;fill:#ff00ff;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="rect3908"
- width="69.206657"
- height="69.206657"
- x="279.50037"
- y="648.49109"
- transform="scale(-1,1)" />
- <path
- id="path100"
- d="m -113.71761,672.66709 c -2.30015,0.20237 -4.16481,2.23468 -4.16514,4.53951 5e-4,0.6673 0.16069,1.31071 0.46714,1.87625 0,0 -5.59737,6.87651 -5.59737,6.87651 -0.45361,-0.12691 -0.93271,-0.1712 -1.41587,-0.13091 -1.58113,0.14032 -3.02501,1.16568 -3.72724,2.64681 0,0 -5.81396,-0.002 -5.81396,-0.002 -0.5189,-1.7317 -2.14454,-2.81099 -3.99237,-2.6506 -1.10445,0.0967 -2.16372,0.62919 -2.94473,1.48027 -0.78099,0.85106 -1.21974,1.95097 -1.21972,3.05771 0.001,0.41198 0.0636,0.81614 0.18481,1.19932 0,0 -10.45278,9.18929 -10.45278,9.18929 0,0 0,3.8696 0,3.8696 0,0 12.3297,-10.84125 12.3297,-10.84125 0.63739,0.31892 1.36286,0.45336 2.1019,0.38951 1.58029,-0.14075 3.02321,-1.16576 3.72506,-2.64623 0,0 5.81233,0.002 5.81233,0.002 0.51916,1.73267 2.14635,2.81208 3.9954,2.65037 2.29903,-0.20201 4.16306,-2.23263 4.16417,-4.53646 -2.7e-4,-0.6677 -0.16047,-1.31155 -0.46712,-1.87746 0,0 5.59606,-6.87475 5.59606,-6.87475 0.45421,0.12652 0.93388,0.17026 1.41752,0.12927 1.34632,-0.1189 2.60939,-0.8825 3.39069,-2.04991 0,0 10.35996,1.70595 10.35996,1.70595 0,0 0.001,-3.13044 0.001,-3.13044 0,0 -9.62326,-1.58511 -9.62326,-1.58511 -0.26284,-2.06075 -2.03015,-3.46879 -4.1263,-3.28748 0,0 0,0 0,0"
- style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:0.3;color:#000000;color-interpolation:sRGB;color-interpolation-filters:linearRGB;fill:#000000;fill-opacity:1;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate;clip-rule:nonzero;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;font-family:sans-serif;-inkscape-font-specification:sans-serif"
- inkscape:connector-curvature="0" />
- </g>
- <g
- style="display:inline"
- inkscape:label="PLACE YOUR PICTOGRAM HERE"
- id="layer3"
- inkscape:groupmode="layer" />
- <g
- sodipodi:insensitive="true"
- style="display:none"
- inkscape:label="BADGE"
- id="layer2"
- inkscape:groupmode="layer">
- <g
- clip-path="none"
- id="g4394"
- transform="translate(-340.00001,-581)"
- style="display:inline">
- <g
- id="g855">
- <g
- style="opacity:0.6;filter:url(#filter891)"
- clip-path="url(#clipPath873)"
- id="g870"
- inkscape:groupmode="maskhelper">
- <path
- sodipodi:type="arc"
- style="color:#000000;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path844"
- sodipodi:cx="252"
- sodipodi:cy="552.36218"
- sodipodi:rx="12"
- sodipodi:ry="12"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)" />
- </g>
- <g
- id="g862">
- <path
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path4398"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- <path
- sodipodi:type="arc"
- style="color:#000000;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4400"
- sodipodi:cx="252"
- sodipodi:cy="552.36218"
- sodipodi:rx="12"
- sodipodi:ry="12"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- transform="matrix(1.25,0,0,1.25,33,-100.45273)" />
- <path
- transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)"
- d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 -0.18379,0.41279 0.0427,4.27917 -0.34859,4.5051 z"
- inkscape:randomized="0"
- inkscape:rounded="0.1"
- inkscape:flatsided="false"
- sodipodi:arg2="1.6755161"
- sodipodi:arg1="1.0471976"
- sodipodi:r2="4.3458705"
- sodipodi:r1="7.2431178"
- sodipodi:cy="589.50385"
- sodipodi:cx="666.19574"
- sodipodi:sides="5"
- id="path4459"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="star" />
- </g>
- </g>
- </g>
- </g>
-</svg>
diff --git a/charms/trusty/ceilometer/lib/ceilometer_contexts.py b/charms/trusty/ceilometer/lib/ceilometer_contexts.py
deleted file mode 100644
index 72aea39..0000000
--- a/charms/trusty/ceilometer/lib/ceilometer_contexts.py
+++ /dev/null
@@ -1,122 +0,0 @@
-from charmhelpers.core.hookenv import (
- relation_ids,
- relation_get,
- related_units,
- config
-)
-
-from charmhelpers.contrib.openstack.utils import os_release
-
-from charmhelpers.contrib.openstack.context import (
- OSContextGenerator,
- context_complete,
- ApacheSSLContext as SSLContext,
-)
-
-from charmhelpers.contrib.hahelpers.cluster import (
- determine_apache_port,
- determine_api_port
-)
-
-CEILOMETER_DB = 'ceilometer'
-
-
-class LoggingConfigContext(OSContextGenerator):
- def __call__(self):
- return {'debug': config('debug'), 'verbose': config('verbose')}
-
-
-class MongoDBContext(OSContextGenerator):
- interfaces = ['mongodb']
-
- def __call__(self):
- mongo_servers = []
- replset = None
- use_replset = os_release('ceilometer-api') >= 'icehouse'
-
- for relid in relation_ids('shared-db'):
- rel_units = related_units(relid)
- use_replset = use_replset and (len(rel_units) > 1)
-
- for unit in rel_units:
- host = relation_get('hostname', unit, relid)
- port = relation_get('port', unit, relid)
-
- conf = {
- "db_host": host,
- "db_port": port,
- "db_name": CEILOMETER_DB
- }
-
- if not context_complete(conf):
- continue
-
- if not use_replset:
- return conf
-
- if replset is None:
- replset = relation_get('replset', unit, relid)
-
- mongo_servers.append('{}:{}'.format(host, port))
-
- if mongo_servers:
- return {
- 'db_mongo_servers': ','.join(mongo_servers),
- 'db_name': CEILOMETER_DB,
- 'db_replset': replset
- }
-
- return {}
-
-
-CEILOMETER_PORT = 8777
-
-
-class CeilometerContext(OSContextGenerator):
- def __call__(self):
- # Lazy-import to avoid a circular dependency in the imports
- from ceilometer_utils import get_shared_secret
-
- ctxt = {
- 'api_workers': config('api-workers'),
- 'port': CEILOMETER_PORT,
- 'metering_secret': get_shared_secret()
- }
- return ctxt
-
-
-class CeilometerServiceContext(OSContextGenerator):
- interfaces = ['ceilometer-service']
-
- def __call__(self):
- for relid in relation_ids('ceilometer-service'):
- for unit in related_units(relid):
- conf = relation_get(unit=unit, rid=relid)
- if context_complete(conf):
- return conf
- return {}
-
-
-class HAProxyContext(OSContextGenerator):
- interfaces = ['ceilometer-haproxy']
-
- def __call__(self):
- '''Extends the main charmhelpers HAProxyContext with a port mapping
- specific to this charm.
- '''
- haproxy_port = CEILOMETER_PORT
- api_port = determine_api_port(CEILOMETER_PORT, singlenode_mode=True)
- apache_port = determine_apache_port(CEILOMETER_PORT,
- singlenode_mode=True)
-
- ctxt = {
- 'service_ports': {'ceilometer_api': [haproxy_port, apache_port]},
- 'port': api_port
- }
- return ctxt
-
-
-class ApacheSSLContext(SSLContext):
-
- external_ports = [CEILOMETER_PORT]
- service_namespace = "ceilometer"
diff --git a/charms/trusty/ceilometer/lib/ceilometer_utils.py b/charms/trusty/ceilometer/lib/ceilometer_utils.py
deleted file mode 100644
index eca9c15..0000000
--- a/charms/trusty/ceilometer/lib/ceilometer_utils.py
+++ /dev/null
@@ -1,391 +0,0 @@
-import os
-import shutil
-import uuid
-
-from collections import OrderedDict
-
-import yaml
-
-from charmhelpers.contrib.openstack import (
- templating,
- context,
-)
-from ceilometer_contexts import (
- ApacheSSLContext,
- LoggingConfigContext,
- MongoDBContext,
- CeilometerContext,
- HAProxyContext,
- CEILOMETER_PORT,
-)
-from charmhelpers.contrib.openstack.utils import (
- get_os_codename_package,
- get_os_codename_install_source,
- configure_installation_source,
- pause_unit,
- resume_unit,
- make_assess_status_func,
-)
-from charmhelpers.core.hookenv import (
- config,
- log,
- related_units,
- relation_get,
- relation_ids,
-)
-from charmhelpers.fetch import apt_update, apt_install, apt_upgrade
-from copy import deepcopy
-
-HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
-CEILOMETER_CONF_DIR = "/etc/ceilometer"
-CEILOMETER_CONF = "%s/ceilometer.conf" % CEILOMETER_CONF_DIR
-CEILOMETER_PIPELINE_CONF = '%s/pipeline.yaml' % CEILOMETER_CONF_DIR
-CEILOMETER_PIPELINE_CONF_ORIG = '%s.orig' % CEILOMETER_PIPELINE_CONF
-HTTPS_APACHE_CONF = "/etc/apache2/sites-available/openstack_https_frontend"
-HTTPS_APACHE_24_CONF = "/etc/apache2/sites-available/" \
- "openstack_https_frontend.conf"
-CLUSTER_RES = 'grp_ceilometer_vips'
-
-CEILOMETER_BASE_SERVICES = [
- 'ceilometer-agent-central',
- 'ceilometer-collector',
- 'ceilometer-api',
-]
-
-ICEHOUSE_SERVICES = [
- 'ceilometer-alarm-notifier',
- 'ceilometer-alarm-evaluator',
- 'ceilometer-agent-notification'
-]
-
-MITAKA_SERVICES = [
- 'ceilometer-agent-notification'
-]
-
-CEILOMETER_DB = "ceilometer"
-CEILOMETER_SERVICE = "ceilometer"
-
-CEILOMETER_BASE_PACKAGES = [
- 'haproxy',
- 'apache2',
- 'ceilometer-agent-central',
- 'ceilometer-collector',
- 'ceilometer-api',
- 'python-pymongo',
-]
-
-ICEHOUSE_PACKAGES = [
- 'ceilometer-alarm-notifier',
- 'ceilometer-alarm-evaluator',
- 'ceilometer-agent-notification'
-]
-
-MITAKA_PACKAGES = [
- 'ceilometer-agent-notification'
-]
-
-REQUIRED_INTERFACES = {
- 'database': ['mongodb'],
- 'messaging': ['amqp'],
- 'identity': ['identity-service'],
-}
-
-CEILOMETER_ROLE = "ResellerAdmin"
-SVC = 'ceilometer'
-
-CONFIG_FILES = OrderedDict([
- (CEILOMETER_CONF, {
- 'hook_contexts': [context.IdentityServiceContext(service=SVC,
- service_user=SVC),
- context.AMQPContext(ssl_dir=CEILOMETER_CONF_DIR),
- LoggingConfigContext(),
- MongoDBContext(),
- CeilometerContext(),
- context.SyslogContext(),
- HAProxyContext()],
- 'services': CEILOMETER_BASE_SERVICES
- }),
- (HAPROXY_CONF, {
- 'hook_contexts': [context.HAProxyContext(singlenode_mode=True),
- HAProxyContext()],
- 'services': ['haproxy'],
- }),
- (HTTPS_APACHE_CONF, {
- 'hook_contexts': [ApacheSSLContext()],
- 'services': ['apache2'],
- }),
- (HTTPS_APACHE_24_CONF, {
- 'hook_contexts': [ApacheSSLContext()],
- 'services': ['apache2'],
- })
-])
-
-TEMPLATES = 'templates'
-
-JUJU_HEADER = '# [ WARNING ] config file maintained by Juju, local changes may be overwritten.'
-
-SHARED_SECRET = "/etc/ceilometer/secret.txt"
-
-
-def ordereddict_constructor(loader, node):
- return OrderedDict(loader.construct_pairs(node))
-
-
-def ordereddict_representer(dumper, data):
- return dumper.represent_mapping('tag:yaml.org,2002:map', data.items())
-
-
-yaml.add_constructor('tag:yaml.org,2002:map', ordereddict_constructor)
-yaml.add_representer(OrderedDict, ordereddict_representer)
-
-
-def register_configs():
- """
- Register config files with their respective contexts.
- Regstration of some configs may not be required depending on
- existing of certain relations.
- """
- # if called without anything installed (eg during install hook)
- # just default to earliest supported release. configs dont get touched
- # till post-install, anyway.
- release = get_os_codename_package('ceilometer-common', fatal=False) \
- or 'grizzly'
- configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
- openstack_release=release)
-
- for conf in CONFIG_FILES:
- configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])
-
- if os.path.exists('/etc/apache2/conf-available'):
- configs.register(HTTPS_APACHE_24_CONF,
- CONFIG_FILES[HTTPS_APACHE_24_CONF]['hook_contexts'])
- else:
- configs.register(HTTPS_APACHE_CONF,
- CONFIG_FILES[HTTPS_APACHE_CONF]['hook_contexts'])
- return configs
-
-
-def restart_map():
- """
- Determine the correct resource map to be passed to
- charmhelpers.core.restart_on_change() based on the services configured.
-
- :returns: dict: A dictionary mapping config file to lists of services
- that should be restarted when file changes.
- """
- _map = {}
- for f, ctxt in CONFIG_FILES.iteritems():
- svcs = []
- for svc in ctxt['services']:
- svcs.append(svc)
- if f == CEILOMETER_CONF:
- for svc in ceilometer_release_services():
- svcs.append(svc)
- if svcs:
- _map[f] = svcs
-
- _map[CEILOMETER_PIPELINE_CONF] = CEILOMETER_BASE_SERVICES
-
- return _map
-
-
-def services():
- """ Returns a list of services associate with this charm """
- _services = []
- for v in restart_map().values():
- _services = _services + v
- return list(set(_services))
-
-
-def determine_ports():
- """Assemble a list of API ports for services the charm is managing
-
- @returns [ports] - list of ports that the charm manages.
- """
- # TODO(ajkavanagh) - determine what other ports the service listens on
- # apart from the main CEILOMETER port
- ports = [CEILOMETER_PORT]
- return ports
-
-
-def get_ceilometer_context():
- """ Retrieve a map of all current relation data for agent configuration """
- ctxt = {}
- for hcontext in CONFIG_FILES[CEILOMETER_CONF]['hook_contexts']:
- ctxt.update(hcontext())
- return ctxt
-
-
-def do_openstack_upgrade(configs):
- """
- Perform an upgrade. Takes care of upgrading packages, rewriting
- configs, database migrations and potentially any other post-upgrade
- actions.
-
- :param configs: The charms main OSConfigRenderer object.
- """
- new_src = config('openstack-origin')
- new_os_rel = get_os_codename_install_source(new_src)
-
- log('Performing OpenStack upgrade to %s.' % (new_os_rel))
-
- configure_installation_source(new_src)
- dpkg_opts = [
- '--option', 'Dpkg::Options::=--force-confnew',
- '--option', 'Dpkg::Options::=--force-confdef',
- ]
- apt_update(fatal=True)
- apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
- apt_install(packages=get_packages(),
- options=dpkg_opts,
- fatal=True)
-
- # set CONFIGS to load templates from new release
- configs.set_release(openstack_release=new_os_rel)
-
-
-def ceilometer_release_services():
- codename = get_os_codename_install_source(config('openstack-origin'))
- if codename >= 'mitaka':
- return MITAKA_SERVICES
- elif codename >= 'icehouse':
- return ICEHOUSE_SERVICES
- else:
- return []
-
-
-def ceilometer_release_packages():
- codename = get_os_codename_install_source(config('openstack-origin'))
- if codename >= 'mitaka':
- return MITAKA_PACKAGES
- elif codename >= 'icehouse':
- return ICEHOUSE_PACKAGES
- else:
- return []
-
-
-def get_packages():
- packages = (deepcopy(CEILOMETER_BASE_PACKAGES) +
- ceilometer_release_packages())
- return packages
-
-
-def get_shared_secret():
- """
- Returns the current shared secret for the ceilometer node. If the shared
- secret does not exist, this method will generate one.
- """
- secret = None
- if not os.path.exists(SHARED_SECRET):
- secret = str(uuid.uuid4())
- set_shared_secret(secret)
- else:
- with open(SHARED_SECRET, 'r') as secret_file:
- secret = secret_file.read().strip()
- return secret
-
-
-def set_shared_secret(secret):
- """
- Sets the shared secret which is used to sign ceilometer messages.
-
- :param secret: the secret to set
- """
- with open(SHARED_SECRET, 'w') as secret_file:
- secret_file.write(secret)
-
-
-def assess_status(configs):
- """Assess status of current unit
-
- Decides what the state of the unit should be based on the current
- configuration.
-
- SIDE EFFECT: calls set_os_workload_status(...) which sets the workload
- status of the unit.
- Also calls status_set(...) directly if paused state isn't complete.
-
- @param configs: a templating.OSConfigRenderer() object
- @returns None - this function is executed for its side-effect
- """
- assess_status_func(configs)()
-
-
-def assess_status_func(configs):
- """Helper function to create the function that will assess_status() for
- the unit.
- Uses charmhelpers.contrib.openstack.utils.make_assess_status_func() to
- create the appropriate status function and then returns it.
- Used directly by assess_status() and also for pausing and resuming
- the unit.
-
- @param configs: a templating.OSConfigRenderer() object
- @return f() -> None : a function that assesses the unit's workload status
- """
- return make_assess_status_func(
- configs, REQUIRED_INTERFACES,
- services=services(), ports=determine_ports())
-
-
-def pause_unit_helper(configs):
- """Helper function to pause a unit, and then call assess_status(...) in
- effect, so that the status is correctly updated.
- Uses charmhelpers.contrib.openstack.utils.pause_unit() to do the work.
-
- @param configs: a templating.OSConfigRenderer() object
- @returns None - this function is executed for its side-effect
- """
- _pause_resume_helper(pause_unit, configs)
-
-
-def resume_unit_helper(configs):
- """Helper function to resume a unit, and then call assess_status(...) in
- effect, so that the status is correctly updated.
- Uses charmhelpers.contrib.openstack.utils.resume_unit() to do the work.
-
- @param configs: a templating.OSConfigRenderer() object
- @returns None - this function is executed for its side-effect
- """
- _pause_resume_helper(resume_unit, configs)
-
-
-def _pause_resume_helper(f, configs):
- """Helper function that uses the make_assess_status_func(...) from
- charmhelpers.contrib.openstack.utils to create an assess_status(...)
- function that can be used with the pause/resume of the unit
-
- @param f: the function to be used with the assess_status(...) function
- @returns None - this function is executed for its side-effect
- """
- # TODO(ajkavanagh) - ports= has been left off because of the race hazard
- # that exists due to service_start()
- f(assess_status_func(configs),
- services=services(),
- ports=determine_ports())
-
-
-def configure_pipeline():
- if not os.path.exists(CEILOMETER_PIPELINE_CONF_ORIG):
- shutil.copy(CEILOMETER_PIPELINE_CONF,
- CEILOMETER_PIPELINE_CONF_ORIG)
- with open(CEILOMETER_PIPELINE_CONF_ORIG) as f:
- conf = yaml.load(f)
-
- sources = conf['sources']
- sinks = conf['sinks']
- for rid in relation_ids('ceilometer-plugin'):
- for unit in related_units(rid):
- srcs = relation_get('meter-sources', unit, rid)
- if srcs:
- srcs = yaml.load(srcs)
- sources.extend(srcs)
- sks = relation_get('meter-sinks', unit, rid)
- if sks:
- sks = yaml.load(sks)
- sinks.extend(sks)
-
- log('Writing config %s' % CEILOMETER_PIPELINE_CONF)
- with open(CEILOMETER_PIPELINE_CONF, 'w') as f:
- f.write(JUJU_HEADER)
- f.write('\n')
- yaml.dump(conf, f, default_flow_style=False, explicit_start=True)
diff --git a/charms/trusty/ceilometer/metadata.yaml b/charms/trusty/ceilometer/metadata.yaml
deleted file mode 100644
index 6b594a2..0000000
--- a/charms/trusty/ceilometer/metadata.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-name: ceilometer
-summary: OpenStack Telemetry
-maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
-description: |
- Ceilometer project aims to become the infrastructure to collect measurements
- within OpenStack so that no two agents would need to be written to collect
- the same data. It's primary targets are monitoring and metering, but the
- framework should be easily expandable to collect for other needs. To that
- effect, Ceilometer should be able to share collected data with a variety
- of consumers.
-tags:
- - openstack
- - telemetry
- - misc
-extra-bindings:
- public:
- admin:
- internal:
-provides:
- nrpe-external-master:
- interface: nrpe-external-master
- scope: container
- ceilometer-service:
- interface: ceilometer
-requires:
- shared-db:
- interface: mongodb
- amqp:
- interface: rabbitmq
- identity-service:
- interface: keystone
- identity-notifications:
- interface: keystone-notifications
- ha:
- interface: hacluster
- scope: container
- ceilometer-plugin:
- interface: ceilometer-plugin
- scope: container
-peers:
- cluster:
- interface: ceilometer-ha
diff --git a/charms/trusty/ceilometer/ocf/openstack/ceilometer-agent-central b/charms/trusty/ceilometer/ocf/openstack/ceilometer-agent-central
deleted file mode 100755
index 9c460a3..0000000
--- a/charms/trusty/ceilometer/ocf/openstack/ceilometer-agent-central
+++ /dev/null
@@ -1,345 +0,0 @@
-#!/bin/sh
-#
-#
-# OpenStack Ceilometer Central Agent Service (ceilometer-agent-central)
-#
-# Description: Manages an OpenStack Ceilometer Central Agent Service (ceilometer-agent-central) process as an HA resource
-#
-# Authors: Emilien Macchi
-# Mainly inspired by the Nova Scheduler resource agent written by Sebastien Han
-#
-# Support: openstack@lists.launchpad.net
-# License: Apache Software License (ASL) 2.0
-#
-#
-# See usage() function below for more details ...
-#
-# OCF instance parameters:
-# OCF_RESKEY_binary
-# OCF_RESKEY_config
-# OCF_RESKEY_user
-# OCF_RESKEY_pid
-# OCF_RESKEY_monitor_binary
-# OCF_RESKEY_amqp_server_port
-# OCF_RESKEY_additional_parameters
-#######################################################################
-# Initialization:
-
-: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
-. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
-
-#######################################################################
-
-# Fill in some defaults if no values are specified
-
-OCF_RESKEY_binary_default="ceilometer-agent-central"
-OCF_RESKEY_config_default="/etc/ceilometer/ceilometer.conf"
-OCF_RESKEY_user_default="ceilometer"
-OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid"
-OCF_RESKEY_amqp_server_port_default="5672"
-
-: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}}
-: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}}
-: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}}
-: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}}
-: ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}}
-
-#######################################################################
-
-usage() {
- cat <<UEND
- usage: $0 (start|stop|validate-all|meta-data|status|monitor)
-
- $0 manages an OpenStack Ceilometer Central Agent Service (ceilometer-agent-central) process as an HA resource
-
- The 'start' operation starts the scheduler service.
- The 'stop' operation stops the scheduler service.
- The 'validate-all' operation reports whether the parameters are valid
- The 'meta-data' operation reports this RA's meta-data information
- The 'status' operation reports whether the scheduler service is running
- The 'monitor' operation reports whether the scheduler service seems to be working
-
-UEND
-}
-
-meta_data() {
- cat <<END
-<?xml version="1.0"?>
-<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
-<resource-agent name="ceilometer-agent-central">
-<version>1.0</version>
-
-<longdesc lang="en">
-Resource agent for the OpenStack Ceilometer Central Agent Service (ceilometer-agent-central)
-May manage a ceilometer-agent-central instance or a clone set that
-creates a distributed ceilometer-agent-central cluster.
-</longdesc>
-<shortdesc lang="en">Manages the OpenStack Ceilometer Central Agent Service (ceilometer-agent-central)</shortdesc>
-<parameters>
-
-<parameter name="binary" unique="0" required="0">
-<longdesc lang="en">
-Location of the OpenStack Ceilometer Central Agent server binary (ceilometer-agent-central)
-</longdesc>
-<shortdesc lang="en">OpenStack Ceilometer Central Agent server binary (ceilometer-agent-central)</shortdesc>
-<content type="string" default="${OCF_RESKEY_binary_default}" />
-</parameter>
-
-<parameter name="config" unique="0" required="0">
-<longdesc lang="en">
-Location of the OpenStack Ceilometer Central Agent Service (ceilometer-agent-central) configuration file
-</longdesc>
-<shortdesc lang="en">OpenStack Ceilometer Central Agent (ceilometer-agent-central registry) config file</shortdesc>
-<content type="string" default="${OCF_RESKEY_config_default}" />
-</parameter>
-
-<parameter name="user" unique="0" required="0">
-<longdesc lang="en">
-User running OpenStack Ceilometer Central Agent Service (ceilometer-agent-central)
-</longdesc>
-<shortdesc lang="en">OpenStack Ceilometer Central Agent Service (ceilometer-agent-central) user</shortdesc>
-<content type="string" default="${OCF_RESKEY_user_default}" />
-</parameter>
-
-<parameter name="pid" unique="0" required="0">
-<longdesc lang="en">
-The pid file to use for this OpenStack Ceilometer Central Agent Service (ceilometer-agent-central) instance
-</longdesc>
-<shortdesc lang="en">OpenStack Ceilometer Central Agent Service (ceilometer-agent-central) pid file</shortdesc>
-<content type="string" default="${OCF_RESKEY_pid_default}" />
-</parameter>
-
-<parameter name="amqp_server_port" unique="0" required="0">
-<longdesc lang="en">
-The listening port number of the AMQP server. Use for monitoring purposes
-</longdesc>
-<shortdesc lang="en">AMQP listening port</shortdesc>
-<content type="integer" default="${OCF_RESKEY_amqp_server_port_default}" />
-</parameter>
-
-
-<parameter name="additional_parameters" unique="0" required="0">
-<longdesc lang="en">
-Additional parameters to pass on to the OpenStack Ceilometer Central Agent Service (ceilometer-agent-central)
-</longdesc>
-<shortdesc lang="en">Additional parameters for ceilometer-agent-central</shortdesc>
-<content type="string" />
-</parameter>
-
-</parameters>
-
-<actions>
-<action name="start" timeout="20" />
-<action name="stop" timeout="20" />
-<action name="status" timeout="20" />
-<action name="monitor" timeout="30" interval="20" />
-<action name="validate-all" timeout="5" />
-<action name="meta-data" timeout="5" />
-</actions>
-</resource-agent>
-END
-}
-
-#######################################################################
-# Functions invoked by resource manager actions
-
-ceilometer_agent_central_check_port() {
-# This function has been taken from the squid RA and improved a bit
-# The length of the integer must be 4
-# Examples of valid port: "1080", "0080"
-# Examples of invalid port: "1080bad", "0", "0000", ""
-
- local int
- local cnt
-
- int="$1"
- cnt=${#int}
- echo $int |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*'
-
- if [ $? -ne 0 ] || [ $cnt -ne 4 ]; then
- ocf_log err "Invalid port number: $1"
- exit $OCF_ERR_CONFIGURED
- fi
-}
-
-ceilometer_agent_central_validate() {
- local rc
-
- check_binary $OCF_RESKEY_binary
- check_binary netstat
- ceilometer_agent_central_check_port $OCF_RESKEY_amqp_server_port
-
- # A config file on shared storage that is not available
- # during probes is OK.
- if [ ! -f $OCF_RESKEY_config ]; then
- if ! ocf_is_probe; then
- ocf_log err "Config $OCF_RESKEY_config doesn't exist"
- return $OCF_ERR_INSTALLED
- fi
- ocf_log_warn "Config $OCF_RESKEY_config not available during a probe"
- fi
-
- getent passwd $OCF_RESKEY_user >/dev/null 2>&1
- rc=$?
- if [ $rc -ne 0 ]; then
- ocf_log err "User $OCF_RESKEY_user doesn't exist"
- return $OCF_ERR_INSTALLED
- fi
-
- true
-}
-
-ceilometer_agent_central_status() {
- local pid
- local rc
-
- if [ ! -f $OCF_RESKEY_pid ]; then
- ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-central) is not running"
- return $OCF_NOT_RUNNING
- else
- pid=`cat $OCF_RESKEY_pid`
- fi
-
- ocf_run -warn kill -s 0 $pid
- rc=$?
- if [ $rc -eq 0 ]; then
- return $OCF_SUCCESS
- else
- ocf_log info "Old PID file found, but OpenStack Ceilometer Central Agent (ceilometer-agent-central) is not running"
- return $OCF_NOT_RUNNING
- fi
-}
-
-ceilometer_agent_central_monitor() {
- local rc
- local pid
- local scheduler_amqp_check
-
- ceilometer_agent_central_status
- rc=$?
-
- # If status returned anything but success, return that immediately
- if [ $rc -ne $OCF_SUCCESS ]; then
- return $rc
- fi
-
- # Check the connections according to the PID.
- # We are sure to hit the scheduler process and not other Cinder process with the same connection behavior (for example cinder-api)
- pid=`cat $OCF_RESKEY_pid`
- scheduler_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_amqp_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"`
- rc=$?
- if [ $rc -ne 0 ]; then
- ocf_log err "Central Agent is not connected to the AMQP server : $rc"
- return $OCF_NOT_RUNNING
- fi
-
- ocf_log debug "OpenStack Ceilometer Central Agent (ceilometer-agent-central) monitor succeeded"
- return $OCF_SUCCESS
-}
-
-ceilometer_agent_central_start() {
- local rc
-
- ceilometer_agent_central_status
- rc=$?
- if [ $rc -eq $OCF_SUCCESS ]; then
- ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-central) already running"
- return $OCF_SUCCESS
- fi
-
- # run the actual ceilometer-agent-central daemon. Don't use ocf_run as we're sending the tool's output
- # straight to /dev/null anyway and using ocf_run would break stdout-redirection here.
- su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \
- $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid
-
- # Spin waiting for the server to come up.
- while true; do
- ceilometer_agent_central_monitor
- rc=$?
- [ $rc -eq $OCF_SUCCESS ] && break
- if [ $rc -ne $OCF_NOT_RUNNING ]; then
- ocf_log err "OpenStack Ceilometer Central Agent (ceilometer-agent-central) start failed"
- exit $OCF_ERR_GENERIC
- fi
- sleep 1
- done
-
- ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-central) started"
- return $OCF_SUCCESS
-}
-
-ceilometer_agent_central_stop() {
- local rc
- local pid
-
- ceilometer_agent_central_status
- rc=$?
- if [ $rc -eq $OCF_NOT_RUNNING ]; then
- ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-central) already stopped"
- return $OCF_SUCCESS
- fi
-
- # Try SIGTERM
- pid=`cat $OCF_RESKEY_pid`
- ocf_run kill -s TERM $pid
- rc=$?
- if [ $rc -ne 0 ]; then
- ocf_log err "OpenStack Ceilometer Central Agent (ceilometer-agent-central) couldn't be stopped"
- exit $OCF_ERR_GENERIC
- fi
-
- # stop waiting
- shutdown_timeout=15
- if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then
- shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5))
- fi
- count=0
- while [ $count -lt $shutdown_timeout ]; do
- ceilometer_agent_central_status
- rc=$?
- if [ $rc -eq $OCF_NOT_RUNNING ]; then
- break
- fi
- count=`expr $count + 1`
- sleep 1
- ocf_log debug "OpenStack Ceilometer Central Agent (ceilometer-agent-central) still hasn't stopped yet. Waiting ..."
- done
-
- ceilometer_agent_central_status
- rc=$?
- if [ $rc -ne $OCF_NOT_RUNNING ]; then
- # SIGTERM didn't help either, try SIGKILL
- ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-central) failed to stop after ${shutdown_timeout}s \
- using SIGTERM. Trying SIGKILL ..."
- ocf_run kill -s KILL $pid
- fi
-
- ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-central) stopped"
-
- rm -f $OCF_RESKEY_pid
-
- return $OCF_SUCCESS
-}
-
-#######################################################################
-
-case "$1" in
- meta-data) meta_data
- exit $OCF_SUCCESS;;
- usage|help) usage
- exit $OCF_SUCCESS;;
-esac
-
-# Anything except meta-data and help must pass validation
-ceilometer_agent_central_validate || exit $?
-
-# What kind of method was invoked?
-case "$1" in
- start) ceilometer_agent_central_start;;
- stop) ceilometer_agent_central_stop;;
- status) ceilometer_agent_central_status;;
- monitor) ceilometer_agent_central_monitor;;
- validate-all) ;;
- *) usage
- exit $OCF_ERR_UNIMPLEMENTED;;
-esac
diff --git a/charms/trusty/ceilometer/requirements.txt b/charms/trusty/ceilometer/requirements.txt
deleted file mode 100644
index 426002d..0000000
--- a/charms/trusty/ceilometer/requirements.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-# The order of packages is significant, because pip processes them in the order
-# of appearance. Changing the order has an impact on the overall integration
-# process, which may cause wedges in the gate later.
-PyYAML>=3.1.0
-simplejson>=2.2.0
-netifaces>=0.10.4
-netaddr>=0.7.12,!=0.7.16
-Jinja2>=2.6 # BSD License (3 clause)
-six>=1.9.0
-dnspython>=1.12.0
-psutil>=1.1.1,<2.0.0
diff --git a/charms/trusty/ceilometer/revision b/charms/trusty/ceilometer/revision
deleted file mode 100644
index c739b42..0000000
--- a/charms/trusty/ceilometer/revision
+++ /dev/null
@@ -1 +0,0 @@
-44
diff --git a/charms/trusty/ceilometer/setup.cfg b/charms/trusty/ceilometer/setup.cfg
deleted file mode 100644
index 3f7bd91..0000000
--- a/charms/trusty/ceilometer/setup.cfg
+++ /dev/null
@@ -1,6 +0,0 @@
-[nosetests]
-verbosity=2
-with-coverage=1
-cover-erase=1
-cover-package=hooks
-
diff --git a/charms/trusty/ceilometer/templates/icehouse/ceilometer.conf b/charms/trusty/ceilometer/templates/icehouse/ceilometer.conf
deleted file mode 100644
index 4b6b6d1..0000000
--- a/charms/trusty/ceilometer/templates/icehouse/ceilometer.conf
+++ /dev/null
@@ -1,42 +0,0 @@
-# icehouse
-###############################################################################
-# [ WARNING ]
-# ceilometer configuration file maintained by Juju
-# local changes may be overwritten.
-###############################################################################
-[DEFAULT]
-debug = {{ debug }}
-verbose = {{ verbose }}
-use_syslog = {{ use_syslog }}
-
-{% include "parts/rabbitmq" -%}
-
-[api]
-port = {{ port }}
-
-[service_credentials]
-os_auth_url = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v2.0
-os_tenant_name = {{ admin_tenant_name }}
-os_username = {{ admin_user }}
-os_password = {{ admin_password }}
-
-[database]
-{% if db_replset: -%}
-connection = mongodb://{{ db_mongo_servers }}/{{ db_name }}?readPreference=primaryPreferred&replicaSet={{ db_replset }}
-mongodb_replica_set = {{ db_replset }}
-{% else -%}
-connection = mongodb://{{ db_host }}:{{ db_port }}/{{ db_name }}
-{% endif %}
-
-[publisher_rpc]
-metering_secret = {{ metering_secret }}
-
-[keystone_authtoken]
-auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/
-auth_host = {{ auth_host }}
-auth_port = {{ auth_port }}
-auth_protocol = {{ auth_protocol }}
-admin_tenant_name = {{ admin_tenant_name }}
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
-signing_dir = {{ signing_dir }}
diff --git a/charms/trusty/ceilometer/templates/kilo/ceilometer.conf b/charms/trusty/ceilometer/templates/kilo/ceilometer.conf
deleted file mode 100644
index da44948..0000000
--- a/charms/trusty/ceilometer/templates/kilo/ceilometer.conf
+++ /dev/null
@@ -1,43 +0,0 @@
-# kilo
-###############################################################################
-# [ WARNING ]
-# ceilometer configuration file maintained by Juju
-# local changes may be overwritten.
-###############################################################################
-[DEFAULT]
-debug = {{ debug }}
-verbose = {{ verbose }}
-use_syslog = {{ use_syslog }}
-api_workers = {{ api_workers }}
-
-{% include "parts/rabbitmq" -%}
-
-[api]
-port = {{ port }}
-
-[service_credentials]
-os_auth_url = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v2.0
-os_tenant_name = {{ admin_tenant_name }}
-os_username = {{ admin_user }}
-os_password = {{ admin_password }}
-
-[database]
-{% if db_replset: -%}
-connection = mongodb://{{ db_mongo_servers }}/{{ db_name }}?readPreference=primaryPreferred&replicaSet={{ db_replset }}
-mongodb_replica_set = {{ db_replset }}
-{% else -%}
-connection = mongodb://{{ db_host }}:{{ db_port }}/{{ db_name }}
-{% endif %}
-
-[publisher_rpc]
-metering_secret = {{ metering_secret }}
-
-[keystone_authtoken]
-auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/
-auth_host = {{ auth_host }}
-auth_port = {{ auth_port }}
-auth_protocol = {{ auth_protocol }}
-admin_tenant_name = {{ admin_tenant_name }}
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
-signing_dir = {{ signing_dir }}
diff --git a/charms/trusty/ceilometer/templates/mitaka/ceilometer.conf b/charms/trusty/ceilometer/templates/mitaka/ceilometer.conf
deleted file mode 100644
index d026133..0000000
--- a/charms/trusty/ceilometer/templates/mitaka/ceilometer.conf
+++ /dev/null
@@ -1,42 +0,0 @@
-# mitaka
-###############################################################################
-# [ WARNING ]
-# ceilometer configuration file maintained by Juju
-# local changes may be overwritten.
-###############################################################################
-[DEFAULT]
-debug = {{ debug }}
-verbose = {{ verbose }}
-use_syslog = {{ use_syslog }}
-
-[api]
-port = {{ port }}
-workers = {{ api_workers }}
-
-[service_credentials]
-os_auth_url = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v2.0
-os_tenant_name = {{ admin_tenant_name }}
-os_username = {{ admin_user }}
-os_password = {{ admin_password }}
-
-[database]
-{% if db_replset: -%}
-connection = mongodb://{{ db_mongo_servers }}/{{ db_name }}?readPreference=primaryPreferred&replicaSet={{ db_replset }}
-mongodb_replica_set = {{ db_replset }}
-{% else -%}
-connection = mongodb://{{ db_host }}:{{ db_port }}/{{ db_name }}
-{% endif %}
-
-[publisher]
-telemetry_secret = {{ metering_secret }}
-
-[keystone_authtoken]
-auth_type = password
-auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/
-auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/
-project_name = {{ admin_tenant_name }}
-username = {{ admin_user }}
-password = {{ admin_password }}
-signing_dir = {{ signing_dir }}
-
-{% include "section-rabbitmq-oslo" %}
diff --git a/charms/trusty/ceilometer/templates/parts/rabbitmq b/charms/trusty/ceilometer/templates/parts/rabbitmq
deleted file mode 100644
index bbd0371..0000000
--- a/charms/trusty/ceilometer/templates/parts/rabbitmq
+++ /dev/null
@@ -1,21 +0,0 @@
-{% if rabbitmq_host or rabbitmq_hosts -%}
-rabbit_userid = {{ rabbitmq_user }}
-rabbit_virtual_host = {{ rabbitmq_virtual_host }}
-rabbit_password = {{ rabbitmq_password }}
-{% if rabbitmq_hosts -%}
-rabbit_hosts = {{ rabbitmq_hosts }}
-{% if rabbitmq_ha_queues -%}
-rabbit_ha_queues = True
-rabbit_durable_queues = False
-{% endif -%}
-{% else -%}
-rabbit_host = {{ rabbitmq_host }}
-{% endif -%}
-{% if rabbit_ssl_port -%}
-rabbit_use_ssl = True
-rabbit_port = {{ rabbit_ssl_port }}
-{% if rabbit_ssl_ca -%}
-kombu_ssl_ca_certs = {{ rabbit_ssl_ca }}
-{% endif -%}
-{% endif -%}
-{% endif -%} \ No newline at end of file
diff --git a/charms/trusty/ceilometer/test-requirements.txt b/charms/trusty/ceilometer/test-requirements.txt
deleted file mode 100644
index 4faf254..0000000
--- a/charms/trusty/ceilometer/test-requirements.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# The order of packages is significant, because pip processes them in the order
-# of appearance. Changing the order has an impact on the overall integration
-# process, which may cause wedges in the gate later.
-coverage>=3.6
-mock>=1.2
-flake8>=2.2.4,<=2.4.1
-os-testr>=0.4.1
-charm-tools>=2.0.0
-requests==2.6.0
diff --git a/charms/trusty/ceilometer/tests/014-basic-precise-icehouse b/charms/trusty/ceilometer/tests/014-basic-precise-icehouse
deleted file mode 100755
index 0f5c590..0000000
--- a/charms/trusty/ceilometer/tests/014-basic-precise-icehouse
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on precise-icehouse."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='precise',
- openstack='cloud:precise-icehouse',
- source='cloud:precise-updates/icehouse')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/015-basic-trusty-icehouse b/charms/trusty/ceilometer/tests/015-basic-trusty-icehouse
deleted file mode 100755
index 8530390..0000000
--- a/charms/trusty/ceilometer/tests/015-basic-trusty-icehouse
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on trusty-icehouse."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='trusty')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/016-basic-trusty-juno b/charms/trusty/ceilometer/tests/016-basic-trusty-juno
deleted file mode 100755
index f1ca57d..0000000
--- a/charms/trusty/ceilometer/tests/016-basic-trusty-juno
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on trusty-juno."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='trusty',
- openstack='cloud:trusty-juno',
- source='cloud:trusty-updates/juno')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/017-basic-trusty-kilo b/charms/trusty/ceilometer/tests/017-basic-trusty-kilo
deleted file mode 100755
index cc89564..0000000
--- a/charms/trusty/ceilometer/tests/017-basic-trusty-kilo
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on trusty-kilo."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='trusty',
- openstack='cloud:trusty-kilo',
- source='cloud:trusty-updates/kilo')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/018-basic-trusty-liberty b/charms/trusty/ceilometer/tests/018-basic-trusty-liberty
deleted file mode 100755
index 18b9500..0000000
--- a/charms/trusty/ceilometer/tests/018-basic-trusty-liberty
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on trusty-liberty."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='trusty',
- openstack='cloud:trusty-liberty',
- source='cloud:trusty-updates/liberty')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/019-basic-trusty-mitaka b/charms/trusty/ceilometer/tests/019-basic-trusty-mitaka
deleted file mode 100755
index 06c0849..0000000
--- a/charms/trusty/ceilometer/tests/019-basic-trusty-mitaka
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on trusty-mitaka."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='trusty',
- openstack='cloud:trusty-mitaka',
- source='cloud:trusty-updates/mitaka')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/020-basic-wily-liberty b/charms/trusty/ceilometer/tests/020-basic-wily-liberty
deleted file mode 100755
index 7b5908f..0000000
--- a/charms/trusty/ceilometer/tests/020-basic-wily-liberty
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on wily-liberty."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='wily')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/021-basic-xenial-mitaka b/charms/trusty/ceilometer/tests/021-basic-xenial-mitaka
deleted file mode 100755
index 1706607..0000000
--- a/charms/trusty/ceilometer/tests/021-basic-xenial-mitaka
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on xenial-mitaka."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='xenial')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/README b/charms/trusty/ceilometer/tests/README
deleted file mode 100644
index 79c5b06..0000000
--- a/charms/trusty/ceilometer/tests/README
+++ /dev/null
@@ -1,113 +0,0 @@
-This directory provides Amulet tests to verify basic deployment functionality
-from the perspective of this charm, its requirements and its features, as
-exercised in a subset of the full OpenStack deployment test bundle topology.
-
-Reference: lp:openstack-charm-testing for full test bundles.
-
-A single topology and configuration is defined and deployed, once for each of
-the defined Ubuntu:OpenStack release combos. The ongoing goal is for this
-charm to always possess tests and combo definitions for all currently-supported
-release combinations of U:OS.
-
-test_* methods are called in lexical sort order, as with most runners. However,
-each individual test method should be idempotent and expected to pass regardless
-of run order or Ubuntu:OpenStack combo. When writing or modifying tests,
-ensure that every individual test is not dependent on another test_ method.
-
-Test naming convention, purely for code organization purposes:
- 1xx service and endpoint checks
- 2xx relation checks
- 3xx config checks
- 4xx functional checks
- 9xx restarts, config changes, actions and other final checks
-
-In order to run tests, charm-tools and juju must be installed:
- sudo add-apt-repository ppa:juju/stable
- sudo apt-get update
- sudo apt-get install charm-tools juju juju-deployer amulet
-
-Alternatively, tests may be exercised with proposed or development versions
-of juju and related tools:
-
- # juju proposed version
- sudo add-apt-repository ppa:juju/proposed
- sudo apt-get update
- sudo apt-get install charm-tools juju juju-deployer
-
- # juju development version
- sudo add-apt-repository ppa:juju/devel
- sudo apt-get update
- sudo apt-get install charm-tools juju juju-deployer
-
-Some tests may need to download files. If a web proxy server is required in
-the environment, the AMULET_HTTP_PROXY environment variable must be set and
-passed into the juju test command. This is unrelated to juju's http proxy
-settings or behavior.
-
-The following examples demonstrate different ways that tests can be executed.
-All examples are run from the charm's root directory.
-
- * To run all +x tests in the tests directory:
-
- bzr branch lp:charms/trusty/foo
- cd foo
- make functional_test
-
- * To run the tests against a specific release combo as defined in tests/:
-
- bzr branch lp:charms/trusty/foo
- cd foo
- juju test -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse
-
- * To run tests and keep the juju environment deployed after a failure:
-
- bzr branch lp:charms/trusty/foo
- cd foo
- juju test --set-e -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse
-
- * To re-run a test module against an already deployed environment (one
- that was deployed by a previous call to 'juju test --set-e'):
-
- ./tests/015-basic-trusty-icehouse
-
- * Even with --set-e, `juju test` will tear down the deployment when all
- tests pass. The following work flow may be more effective when
- iterating on test writing.
-
- bzr branch lp:charms/trusty/foo
- cd foo
- ./tests/setup/00-setup
- juju bootstrap
- ./tests/015-basic-trusty-icehouse
- # make some changes, run tests again
- ./tests/015-basic-trusty-icehouse
- # make some changes, run tests again
- ./tests/015-basic-trusty-icehouse
-
- * There may be test definitions in the tests/ dir which are not set +x
- executable. This is generally true for deprecated releases, or for
- upcoming releases which are not yet validated and enabled. To enable
- and run these tests:
- bzr branch lp:charms/trusty/foo
- cd foo
- ls tests
- chmod +x tests/017-basic-trusty-kilo
- ./tests/setup/00-setup
- juju bootstrap
- ./tests/017-basic-trusty-kilo
-
-
-Additional notes:
-
- * Use DEBUG to turn on debug logging, use ERROR otherwise.
- u = OpenStackAmuletUtils(ERROR)
- u = OpenStackAmuletUtils(DEBUG)
-
- * To interact with the deployed environment:
- export OS_USERNAME=admin
- export OS_PASSWORD=openstack
- export OS_TENANT_NAME=admin
- export OS_REGION_NAME=RegionOne
- export OS_AUTH_URL=${OS_AUTH_PROTOCOL:-http}://`juju-deployer -e trusty -f keystone`:5000/v2.0
- keystone user-list
- glance image-list
diff --git a/charms/trusty/ceilometer/tests/basic_deployment.py b/charms/trusty/ceilometer/tests/basic_deployment.py
deleted file mode 100644
index 9769759..0000000
--- a/charms/trusty/ceilometer/tests/basic_deployment.py
+++ /dev/null
@@ -1,664 +0,0 @@
-import subprocess
-import amulet
-import json
-import time
-import ceilometerclient.v2.client as ceilo_client
-
-from charmhelpers.contrib.openstack.amulet.deployment import (
- OpenStackAmuletDeployment
-)
-
-from charmhelpers.contrib.openstack.amulet.utils import (
- OpenStackAmuletUtils,
- DEBUG,
- # ERROR
-)
-
-# Use DEBUG to turn on debug logging
-u = OpenStackAmuletUtils(DEBUG)
-
-
-class CeilometerBasicDeployment(OpenStackAmuletDeployment):
- """Amulet tests on a basic ceilometer deployment."""
-
- def __init__(self, series, openstack=None, source=None, stable=True):
- """Deploy the entire test environment."""
- super(CeilometerBasicDeployment, self).__init__(series, openstack,
- source, stable)
- self._add_services()
- self._add_relations()
- self._configure_services()
- self._deploy()
-
- u.log.info('Waiting on extended status checks...')
- exclude_services = ['mysql', 'mongodb']
- self._auto_wait_for_status(exclude_services=exclude_services)
-
- self._initialize_tests()
-
- def _add_services(self):
- """Add services
-
- Add the services that we're testing, where ceilometer is local,
- and the rest of the service are from lp branches that are
- compatible with the local charm (e.g. stable or next).
- """
- this_service = {'name': 'ceilometer'}
- other_services = [{'name': 'mysql'},
- {'name': 'rabbitmq-server'},
- {'name': 'keystone'},
- {'name': 'mongodb'},
- {'name': 'glance'}, # to satisfy workload status
- {'name': 'ceilometer-agent'},
- {'name': 'nova-compute'}]
- super(CeilometerBasicDeployment, self)._add_services(this_service,
- other_services)
-
- def _add_relations(self):
- """Add all of the relations for the services."""
- relations = {
- 'ceilometer:shared-db': 'mongodb:database',
- 'ceilometer:amqp': 'rabbitmq-server:amqp',
- 'ceilometer:identity-service': 'keystone:identity-service',
- 'ceilometer:identity-notifications': 'keystone:'
- 'identity-notifications',
- 'keystone:shared-db': 'mysql:shared-db',
- 'ceilometer:ceilometer-service': 'ceilometer-agent:'
- 'ceilometer-service',
- 'nova-compute:nova-ceilometer': 'ceilometer-agent:nova-ceilometer',
- 'nova-compute:shared-db': 'mysql:shared-db',
- 'nova-compute:amqp': 'rabbitmq-server:amqp',
- 'glance:identity-service': 'keystone:identity-service',
- 'glance:shared-db': 'mysql:shared-db',
- 'glance:amqp': 'rabbitmq-server:amqp',
- 'nova-compute:image-service': 'glance:image-service'
- }
- super(CeilometerBasicDeployment, self)._add_relations(relations)
-
- def _configure_services(self):
- """Configure all of the services."""
- keystone_config = {'admin-password': 'openstack',
- 'admin-token': 'ubuntutesting'}
- configs = {'keystone': keystone_config}
- super(CeilometerBasicDeployment, self)._configure_services(configs)
-
- def _get_token(self):
- return self.keystone.service_catalog.catalog['token']['id']
-
- def _initialize_tests(self):
- """Perform final initialization before tests get run."""
- # Access the sentries for inspecting service units
- self.ceil_sentry = self.d.sentry.unit['ceilometer/0']
- self.ceil_agent_sentry = self.d.sentry.unit['ceilometer-agent/0']
- self.mysql_sentry = self.d.sentry.unit['mysql/0']
- self.keystone_sentry = self.d.sentry.unit['keystone/0']
- self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
- self.mongodb_sentry = self.d.sentry.unit['mongodb/0']
- self.nova_sentry = self.d.sentry.unit['nova-compute/0']
- u.log.debug('openstack release val: {}'.format(
- self._get_openstack_release()))
- u.log.debug('openstack release str: {}'.format(
- self._get_openstack_release_string()))
-
- # Authenticate admin with keystone endpoint
- self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
- user='admin',
- password='openstack',
- tenant='admin')
-
- # Authenticate admin with ceilometer endpoint
- ep = self.keystone.service_catalog.url_for(service_type='metering',
- endpoint_type='publicURL')
- os_token = self.keystone.auth_token
- self.log.debug('Instantiating ceilometer client...')
- self.ceil = ceilo_client.Client(endpoint=ep, token=os_token)
-
- def _run_action(self, unit_id, action, *args):
- command = ["juju", "action", "do", "--format=json", unit_id, action]
- command.extend(args)
- print("Running command: %s\n" % " ".join(command))
- output = subprocess.check_output(command)
- output_json = output.decode(encoding="UTF-8")
- data = json.loads(output_json)
- action_id = data[u'Action queued with id']
- return action_id
-
- def _wait_on_action(self, action_id):
- command = ["juju", "action", "fetch", "--format=json", action_id]
- while True:
- try:
- output = subprocess.check_output(command)
- except Exception as e:
- print(e)
- return False
- output_json = output.decode(encoding="UTF-8")
- data = json.loads(output_json)
- if data[u"status"] == "completed":
- return True
- elif data[u"status"] == "failed":
- return False
- time.sleep(2)
-
- def test_100_services(self):
- """Verify the expected services are running on the corresponding
- service units."""
- u.log.debug('Checking system services on units...')
-
- ceilometer_svcs = [
- 'ceilometer-agent-central',
- 'ceilometer-collector',
- 'ceilometer-api',
- 'ceilometer-agent-notification',
- ]
-
- if self._get_openstack_release() < self.trusty_mitaka:
- ceilometer_svcs.append('ceilometer-alarm-evaluator')
- ceilometer_svcs.append('ceilometer-alarm-notifier')
-
- service_names = {
- self.ceil_sentry: ceilometer_svcs,
- }
-
- ret = u.validate_services_by_name(service_names)
- if ret:
- amulet.raise_status(amulet.FAIL, msg=ret)
-
- u.log.debug('OK')
-
- def test_110_service_catalog(self):
- """Verify that the service catalog endpoint data is valid."""
- u.log.debug('Checking keystone service catalog data...')
- endpoint_check = {
- 'adminURL': u.valid_url,
- 'id': u.not_null,
- 'region': 'RegionOne',
- 'publicURL': u.valid_url,
- 'internalURL': u.valid_url
- }
- expected = {
- 'metering': [endpoint_check],
- 'identity': [endpoint_check]
- }
- actual = self.keystone.service_catalog.get_endpoints()
-
- ret = u.validate_svc_catalog_endpoint_data(expected, actual)
- if ret:
- amulet.raise_status(amulet.FAIL, msg=ret)
-
- u.log.debug('OK')
-
- def test_112_keystone_api_endpoint(self):
- """Verify the ceilometer api endpoint data."""
- u.log.debug('Checking keystone api endpoint data...')
- endpoints = self.keystone.endpoints.list()
- u.log.debug(endpoints)
- internal_port = public_port = '5000'
- admin_port = '35357'
- expected = {'id': u.not_null,
- 'region': 'RegionOne',
- 'adminurl': u.valid_url,
- 'internalurl': u.valid_url,
- 'publicurl': u.valid_url,
- 'service_id': u.not_null}
-
- ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
- public_port, expected)
- if ret:
- message = 'Keystone endpoint: {}'.format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_114_ceilometer_api_endpoint(self):
- """Verify the ceilometer api endpoint data."""
- u.log.debug('Checking ceilometer api endpoint data...')
- endpoints = self.keystone.endpoints.list()
- u.log.debug(endpoints)
- admin_port = internal_port = public_port = '8777'
- expected = {'id': u.not_null,
- 'region': 'RegionOne',
- 'adminurl': u.valid_url,
- 'internalurl': u.valid_url,
- 'publicurl': u.valid_url,
- 'service_id': u.not_null}
-
- ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
- public_port, expected)
- if ret:
- message = 'Ceilometer endpoint: {}'.format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_200_ceilometer_identity_relation(self):
- """Verify the ceilometer to keystone identity-service relation data"""
- u.log.debug('Checking ceilometer to keystone identity-service '
- 'relation data...')
- unit = self.ceil_sentry
- relation = ['identity-service', 'keystone:identity-service']
- ceil_ip = unit.relation('identity-service',
- 'keystone:identity-service')['private-address']
- ceil_endpoint = "http://%s:8777" % (ceil_ip)
-
- expected = {
- 'admin_url': ceil_endpoint,
- 'internal_url': ceil_endpoint,
- 'private-address': ceil_ip,
- 'public_url': ceil_endpoint,
- 'region': 'RegionOne',
- 'requested_roles': 'ResellerAdmin',
- 'service': 'ceilometer',
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer identity-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_201_keystone_ceilometer_identity_relation(self):
- """Verify the keystone to ceilometer identity-service relation data"""
- u.log.debug('Checking keystone:ceilometer identity relation data...')
- unit = self.keystone_sentry
- relation = ['identity-service', 'ceilometer:identity-service']
- id_relation = unit.relation('identity-service',
- 'ceilometer:identity-service')
- id_ip = id_relation['private-address']
- expected = {
- 'admin_token': 'ubuntutesting',
- 'auth_host': id_ip,
- 'auth_port': "35357",
- 'auth_protocol': 'http',
- 'private-address': id_ip,
- 'service_host': id_ip,
- 'service_password': u.not_null,
- 'service_port': "5000",
- 'service_protocol': 'http',
- 'service_tenant': 'services',
- 'service_tenant_id': u.not_null,
- 'service_username': 'ceilometer',
- }
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('keystone identity-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_202_keystone_ceilometer_identity_notes_relation(self):
- """Verify ceilometer to keystone identity-notifications relation"""
- u.log.debug('Checking keystone:ceilometer '
- 'identity-notifications relation data...')
-
- # Relation data may vary depending on timing of hooks and relations.
- # May be glance- or keystone- or another endpoint-changed value, so
- # check that at least one ???-endpoint-changed value exists.
- unit = self.keystone_sentry
- relation_data = unit.relation('identity-service',
- 'ceilometer:identity-notifications')
-
- expected = '-endpoint-changed'
- found = 0
- for key in relation_data.keys():
- if expected in key and relation_data[key]:
- found += 1
- u.log.debug('{}: {}'.format(key, relation_data[key]))
-
- if not found:
- message = ('keystone:ceilometer identity-notification relation '
- 'error\n expected something like: {}\n actual: '
- '{}'.format(expected, relation_data))
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_203_ceilometer_amqp_relation(self):
- """Verify the ceilometer to rabbitmq-server amqp relation data"""
- u.log.debug('Checking ceilometer:rabbitmq amqp relation data...')
- unit = self.ceil_sentry
- relation = ['amqp', 'rabbitmq-server:amqp']
- expected = {
- 'username': 'ceilometer',
- 'private-address': u.valid_ip,
- 'vhost': 'openstack'
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer amqp', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_204_amqp_ceilometer_relation(self):
- """Verify the rabbitmq-server to ceilometer amqp relation data"""
- u.log.debug('Checking rabbitmq:ceilometer amqp relation data...')
- unit = self.rabbitmq_sentry
- relation = ['amqp', 'ceilometer:amqp']
- expected = {
- 'hostname': u.valid_ip,
- 'private-address': u.valid_ip,
- 'password': u.not_null,
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('rabbitmq amqp', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_205_ceilometer_to_mongodb_relation(self):
- """Verify the ceilometer to mongodb relation data"""
- u.log.debug('Checking ceilometer:mongodb relation data...')
- unit = self.ceil_sentry
- relation = ['shared-db', 'mongodb:database']
- expected = {
- 'ceilometer_database': 'ceilometer',
- 'private-address': u.valid_ip,
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer shared-db', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_206_mongodb_to_ceilometer_relation(self):
- """Verify the mongodb to ceilometer relation data"""
- u.log.debug('Checking mongodb:ceilometer relation data...')
- unit = self.mongodb_sentry
- relation = ['database', 'ceilometer:shared-db']
- expected = {
- 'hostname': u.valid_ip,
- 'port': '27017',
- 'private-address': u.valid_ip,
- 'type': 'database',
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('mongodb database', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_207_ceilometer_ceilometer_agent_relation(self):
- """Verify the ceilometer to ceilometer-agent relation data"""
- u.log.debug('Checking ceilometer:ceilometer-agent relation data...')
- unit = self.ceil_sentry
- relation = ['ceilometer-service',
- 'ceilometer-agent:ceilometer-service']
- expected = {
- 'rabbitmq_user': 'ceilometer',
- 'verbose': 'False',
- 'rabbitmq_host': u.valid_ip,
- 'service_ports': "{'ceilometer_api': [8777, 8767]}",
- 'use_syslog': 'False',
- 'metering_secret': u.not_null,
- 'rabbitmq_virtual_host': 'openstack',
- 'db_port': '27017',
- 'private-address': u.valid_ip,
- 'db_name': 'ceilometer',
- 'db_host': u.valid_ip,
- 'debug': 'False',
- 'rabbitmq_password': u.not_null,
- 'port': '8767'
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_208_ceilometer_agent_ceilometer_relation(self):
- """Verify the ceilometer-agent to ceilometer relation data"""
- u.log.debug('Checking ceilometer-agent:ceilometer relation data...')
- unit = self.ceil_agent_sentry
- relation = ['ceilometer-service', 'ceilometer:ceilometer-service']
- expected = {'private-address': u.valid_ip}
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_209_nova_compute_ceilometer_agent_relation(self):
- """Verify the nova-compute to ceilometer relation data"""
- u.log.debug('Checking nova-compute:ceilometer relation data...')
- unit = self.nova_sentry
- relation = ['nova-ceilometer', 'ceilometer-agent:nova-ceilometer']
- expected = {'private-address': u.valid_ip}
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_210_ceilometer_agent_nova_compute_relation(self):
- """Verify the ceilometer to nova-compute relation data"""
- u.log.debug('Checking ceilometer:nova-compute relation data...')
- unit = self.ceil_agent_sentry
- relation = ['nova-ceilometer', 'nova-compute:nova-ceilometer']
- sub = ('{"nova": {"/etc/nova/nova.conf": {"sections": {"DEFAULT": '
- '[["instance_usage_audit", "True"], '
- '["instance_usage_audit_period", "hour"], '
- '["notify_on_state_change", "vm_and_task_state"], '
- '["notification_driver", "ceilometer.compute.nova_notifier"], '
- '["notification_driver", '
- '"nova.openstack.common.notifier.rpc_notifier"]]}}}}')
- expected = {
- 'subordinate_configuration': sub,
- 'private-address': u.valid_ip
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_300_ceilometer_config(self):
- """Verify the data in the ceilometer config file."""
- u.log.debug('Checking ceilometer config file data...')
- unit = self.ceil_sentry
- ks_rel = self.keystone_sentry.relation('identity-service',
- 'ceilometer:identity-service')
- auth_uri = '%s://%s:%s/' % (ks_rel['service_protocol'],
- ks_rel['service_host'],
- ks_rel['service_port'])
- db_relation = self.mongodb_sentry.relation('database',
- 'ceilometer:shared-db')
- db_conn = 'mongodb://%s:%s/ceilometer' % (db_relation['hostname'],
- db_relation['port'])
- conf = '/etc/ceilometer/ceilometer.conf'
- expected = {
- 'DEFAULT': {
- 'verbose': 'False',
- 'debug': 'False',
- 'use_syslog': 'False',
- },
- 'api': {
- 'port': '8767',
- },
- 'service_credentials': {
- 'os_auth_url': auth_uri + 'v2.0',
- 'os_tenant_name': 'services',
- 'os_username': 'ceilometer',
- 'os_password': ks_rel['service_password'],
- },
- 'database': {
- 'connection': db_conn,
- },
- }
-
- for section, pairs in expected.iteritems():
- ret = u.validate_config_data(unit, conf, section, pairs)
- if ret:
- message = "ceilometer config error: {}".format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_301_nova_config(self):
- """Verify data in the nova compute nova config file"""
- u.log.debug('Checking nova compute config file...')
- unit = self.nova_sentry
- conf = '/etc/nova/nova.conf'
- expected = {
- 'DEFAULT': {
- 'verbose': 'False',
- 'debug': 'False',
- 'use_syslog': 'False',
- 'my_ip': u.valid_ip,
- }
- }
-
- # NOTE(beisner): notification_driver is not checked like the
- # others, as configparser does not support duplicate config
- # options, and dicts cant have duplicate keys.
- # Ex. from conf file:
- # notification_driver = ceilometer.compute.nova_notifier
- # notification_driver = nova.openstack.common.notifier.rpc_notifier
- for section, pairs in expected.iteritems():
- ret = u.validate_config_data(unit, conf, section, pairs)
- if ret:
- message = "ceilometer config error: {}".format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- # Check notification_driver existence via simple grep cmd
- lines = [('notification_driver = '
- 'ceilometer.compute.nova_notifier'),
- ('notification_driver = '
- 'nova.openstack.common.notifier.rpc_notifier')]
-
- sentry_units = [unit]
- cmds = []
- for line in lines:
- cmds.append('grep "{}" {}'.format(line, conf))
-
- ret = u.check_commands_on_units(cmds, sentry_units)
- if ret:
- amulet.raise_status(amulet.FAIL, msg=ret)
-
- u.log.debug('OK')
-
- def test_302_nova_ceilometer_config(self):
- """Verify data in the ceilometer config file on the
- nova-compute (ceilometer-agent) unit."""
- u.log.debug('Checking nova ceilometer config file...')
- unit = self.nova_sentry
- conf = '/etc/ceilometer/ceilometer.conf'
- expected = {
- 'DEFAULT': {
- 'logdir': '/var/log/ceilometer'
- },
- 'database': {
- 'backend': 'sqlalchemy',
- 'connection': 'sqlite:////var/lib/ceilometer/$sqlite_db'
- }
- }
-
- for section, pairs in expected.iteritems():
- ret = u.validate_config_data(unit, conf, section, pairs)
- if ret:
- message = "ceilometer config error: {}".format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_400_api_connection(self):
- """Simple api calls to check service is up and responding"""
- u.log.debug('Checking api functionality...')
- assert(self.ceil.samples.list() == [])
- assert(self.ceil.meters.list() == [])
- u.log.debug('OK')
-
- # NOTE(beisner): need to add more functional tests
-
- def test_900_restart_on_config_change(self):
- """Verify that the specified services are restarted when the config
- is changed.
- """
- sentry = self.ceil_sentry
- juju_service = 'ceilometer'
-
- # Expected default and alternate values
- set_default = {'debug': 'False'}
- set_alternate = {'debug': 'True'}
-
- # Services which are expected to restart upon config change,
- # and corresponding config files affected by the change
- conf_file = '/etc/ceilometer/ceilometer.conf'
- services = {
- 'ceilometer-collector': conf_file,
- 'ceilometer-api': conf_file,
- 'ceilometer-agent-notification': conf_file,
- }
-
- if self._get_openstack_release() < self.trusty_mitaka:
- services['ceilometer-alarm-notifier'] = conf_file
- services['ceilometer-alarm-evaluator'] = conf_file
-
- if self._get_openstack_release() == self.trusty_liberty or \
- self._get_openstack_release() >= self.wily_liberty:
- # Liberty and later
- services['ceilometer-polling'] = conf_file
- else:
- # Juno and earlier
- services['ceilometer-agent-central'] = conf_file
-
- # Make config change, check for service restarts
- u.log.debug('Making config change on {}...'.format(juju_service))
- mtime = u.get_sentry_time(sentry)
- self.d.configure(juju_service, set_alternate)
-
- sleep_time = 40
- for s, conf_file in services.iteritems():
- u.log.debug("Checking that service restarted: {}".format(s))
- if not u.validate_service_config_changed(sentry, mtime, s,
- conf_file,
- retry_count=4,
- retry_sleep_time=20,
- sleep_time=sleep_time):
- self.d.configure(juju_service, set_default)
- msg = "service {} didn't restart after config change".format(s)
- amulet.raise_status(amulet.FAIL, msg=msg)
- sleep_time = 0
-
- self.d.configure(juju_service, set_default)
- u.log.debug('OK')
-
- def test_910_pause_and_resume(self):
- """The services can be paused and resumed. """
- u.log.debug('Checking pause and resume actions...')
- unit_name = "ceilometer/0"
- unit = self.d.sentry.unit[unit_name]
- juju_service = 'ceilometer'
-
- assert u.status_get(unit)[0] == "active"
-
- action_id = self._run_action(unit_name, "pause")
- assert self._wait_on_action(action_id), "Pause action failed."
- assert u.status_get(unit)[0] == "maintenance"
-
- # trigger config-changed to ensure that services are still stopped
- u.log.debug("Making config change on ceilometer ...")
- self.d.configure(juju_service, {'debug': 'True'})
- assert u.status_get(unit)[0] == "maintenance"
- self.d.configure(juju_service, {'debug': 'False'})
- assert u.status_get(unit)[0] == "maintenance"
-
- action_id = self._run_action(unit_name, "resume")
- assert self._wait_on_action(action_id), "Resume action failed."
- assert u.status_get(unit)[0] == "active"
- u.log.debug('OK')
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/__init__.py b/charms/trusty/ceilometer/tests/charmhelpers/__init__.py
deleted file mode 100644
index f72e7f8..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Bootstrap charm-helpers, installing its dependencies if necessary using
-# only standard libraries.
-import subprocess
-import sys
-
-try:
- import six # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
- import six # flake8: noqa
-
-try:
- import yaml # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
- import yaml # flake8: noqa
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/__init__.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/__init__.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/deployment.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/deployment.py
deleted file mode 100644
index d451698..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/deployment.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import amulet
-import os
-import six
-
-
-class AmuletDeployment(object):
- """Amulet deployment.
-
- This class provides generic Amulet deployment and test runner
- methods.
- """
-
- def __init__(self, series=None):
- """Initialize the deployment environment."""
- self.series = None
-
- if series:
- self.series = series
- self.d = amulet.Deployment(series=self.series)
- else:
- self.d = amulet.Deployment()
-
- def _add_services(self, this_service, other_services):
- """Add services.
-
- Add services to the deployment where this_service is the local charm
- that we're testing and other_services are the other services that
- are being used in the local amulet tests.
- """
- if this_service['name'] != os.path.basename(os.getcwd()):
- s = this_service['name']
- msg = "The charm's root directory name needs to be {}".format(s)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- if 'units' not in this_service:
- this_service['units'] = 1
-
- self.d.add(this_service['name'], units=this_service['units'],
- constraints=this_service.get('constraints'))
-
- for svc in other_services:
- if 'location' in svc:
- branch_location = svc['location']
- elif self.series:
- branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
- else:
- branch_location = None
-
- if 'units' not in svc:
- svc['units'] = 1
-
- self.d.add(svc['name'], charm=branch_location, units=svc['units'],
- constraints=svc.get('constraints'))
-
- def _add_relations(self, relations):
- """Add all of the relations for the services."""
- for k, v in six.iteritems(relations):
- self.d.relate(k, v)
-
- def _configure_services(self, configs):
- """Configure all of the services."""
- for service, config in six.iteritems(configs):
- self.d.configure(service, config)
-
- def _deploy(self):
- """Deploy environment and wait for all hooks to finish executing."""
- try:
- self.d.setup(timeout=900)
- self.d.sentry.wait(timeout=900)
- except amulet.helpers.TimeoutError:
- amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
- except Exception:
- raise
-
- def run_tests(self):
- """Run all of the methods that are prefixed with 'test_'."""
- for test in dir(self):
- if test.startswith('test_'):
- getattr(self, test)()
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/utils.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/utils.py
deleted file mode 100644
index 7e5c25a..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/utils.py
+++ /dev/null
@@ -1,829 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import io
-import json
-import logging
-import os
-import re
-import socket
-import subprocess
-import sys
-import time
-import uuid
-
-import amulet
-import distro_info
-import six
-from six.moves import configparser
-if six.PY3:
- from urllib import parse as urlparse
-else:
- import urlparse
-
-
-class AmuletUtils(object):
- """Amulet utilities.
-
- This class provides common utility functions that are used by Amulet
- tests.
- """
-
- def __init__(self, log_level=logging.ERROR):
- self.log = self.get_logger(level=log_level)
- self.ubuntu_releases = self.get_ubuntu_releases()
-
- def get_logger(self, name="amulet-logger", level=logging.DEBUG):
- """Get a logger object that will log to stdout."""
- log = logging
- logger = log.getLogger(name)
- fmt = log.Formatter("%(asctime)s %(funcName)s "
- "%(levelname)s: %(message)s")
-
- handler = log.StreamHandler(stream=sys.stdout)
- handler.setLevel(level)
- handler.setFormatter(fmt)
-
- logger.addHandler(handler)
- logger.setLevel(level)
-
- return logger
-
- def valid_ip(self, ip):
- if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
- return True
- else:
- return False
-
- def valid_url(self, url):
- p = re.compile(
- r'^(?:http|ftp)s?://'
- r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
- r'localhost|'
- r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
- r'(?::\d+)?'
- r'(?:/?|[/?]\S+)$',
- re.IGNORECASE)
- if p.match(url):
- return True
- else:
- return False
-
- def get_ubuntu_release_from_sentry(self, sentry_unit):
- """Get Ubuntu release codename from sentry unit.
-
- :param sentry_unit: amulet sentry/service unit pointer
- :returns: list of strings - release codename, failure message
- """
- msg = None
- cmd = 'lsb_release -cs'
- release, code = sentry_unit.run(cmd)
- if code == 0:
- self.log.debug('{} lsb_release: {}'.format(
- sentry_unit.info['unit_name'], release))
- else:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, release, code))
- if release not in self.ubuntu_releases:
- msg = ("Release ({}) not found in Ubuntu releases "
- "({})".format(release, self.ubuntu_releases))
- return release, msg
-
- def validate_services(self, commands):
- """Validate that lists of commands succeed on service units. Can be
- used to verify system services are running on the corresponding
- service units.
-
- :param commands: dict with sentry keys and arbitrary command list vals
- :returns: None if successful, Failure string message otherwise
- """
- self.log.debug('Checking status of system services...')
-
- # /!\ DEPRECATION WARNING (beisner):
- # New and existing tests should be rewritten to use
- # validate_services_by_name() as it is aware of init systems.
- self.log.warn('DEPRECATION WARNING: use '
- 'validate_services_by_name instead of validate_services '
- 'due to init system differences.')
-
- for k, v in six.iteritems(commands):
- for cmd in v:
- output, code = k.run(cmd)
- self.log.debug('{} `{}` returned '
- '{}'.format(k.info['unit_name'],
- cmd, code))
- if code != 0:
- return "command `{}` returned {}".format(cmd, str(code))
- return None
-
- def validate_services_by_name(self, sentry_services):
- """Validate system service status by service name, automatically
- detecting init system based on Ubuntu release codename.
-
- :param sentry_services: dict with sentry keys and svc list values
- :returns: None if successful, Failure string message otherwise
- """
- self.log.debug('Checking status of system services...')
-
- # Point at which systemd became a thing
- systemd_switch = self.ubuntu_releases.index('vivid')
-
- for sentry_unit, services_list in six.iteritems(sentry_services):
- # Get lsb_release codename from unit
- release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
- if ret:
- return ret
-
- for service_name in services_list:
- if (self.ubuntu_releases.index(release) >= systemd_switch or
- service_name in ['rabbitmq-server', 'apache2']):
- # init is systemd (or regular sysv)
- cmd = 'sudo service {} status'.format(service_name)
- output, code = sentry_unit.run(cmd)
- service_running = code == 0
- elif self.ubuntu_releases.index(release) < systemd_switch:
- # init is upstart
- cmd = 'sudo status {}'.format(service_name)
- output, code = sentry_unit.run(cmd)
- service_running = code == 0 and "start/running" in output
-
- self.log.debug('{} `{}` returned '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code))
- if not service_running:
- return u"command `{}` returned {} {}".format(
- cmd, output, str(code))
- return None
-
- def _get_config(self, unit, filename):
- """Get a ConfigParser object for parsing a unit's config file."""
- file_contents = unit.file_contents(filename)
-
- # NOTE(beisner): by default, ConfigParser does not handle options
- # with no value, such as the flags used in the mysql my.cnf file.
- # https://bugs.python.org/issue7005
- config = configparser.ConfigParser(allow_no_value=True)
- config.readfp(io.StringIO(file_contents))
- return config
-
- def validate_config_data(self, sentry_unit, config_file, section,
- expected):
- """Validate config file data.
-
- Verify that the specified section of the config file contains
- the expected option key:value pairs.
-
- Compare expected dictionary data vs actual dictionary data.
- The values in the 'expected' dictionary can be strings, bools, ints,
- longs, or can be a function that evaluates a variable and returns a
- bool.
- """
- self.log.debug('Validating config file data ({} in {} on {})'
- '...'.format(section, config_file,
- sentry_unit.info['unit_name']))
- config = self._get_config(sentry_unit, config_file)
-
- if section != 'DEFAULT' and not config.has_section(section):
- return "section [{}] does not exist".format(section)
-
- for k in expected.keys():
- if not config.has_option(section, k):
- return "section [{}] is missing option {}".format(section, k)
-
- actual = config.get(section, k)
- v = expected[k]
- if (isinstance(v, six.string_types) or
- isinstance(v, bool) or
- isinstance(v, six.integer_types)):
- # handle explicit values
- if actual != v:
- return "section [{}] {}:{} != expected {}:{}".format(
- section, k, actual, k, expected[k])
- # handle function pointers, such as not_null or valid_ip
- elif not v(actual):
- return "section [{}] {}:{} != expected {}:{}".format(
- section, k, actual, k, expected[k])
- return None
-
- def _validate_dict_data(self, expected, actual):
- """Validate dictionary data.
-
- Compare expected dictionary data vs actual dictionary data.
- The values in the 'expected' dictionary can be strings, bools, ints,
- longs, or can be a function that evaluates a variable and returns a
- bool.
- """
- self.log.debug('actual: {}'.format(repr(actual)))
- self.log.debug('expected: {}'.format(repr(expected)))
-
- for k, v in six.iteritems(expected):
- if k in actual:
- if (isinstance(v, six.string_types) or
- isinstance(v, bool) or
- isinstance(v, six.integer_types)):
- # handle explicit values
- if v != actual[k]:
- return "{}:{}".format(k, actual[k])
- # handle function pointers, such as not_null or valid_ip
- elif not v(actual[k]):
- return "{}:{}".format(k, actual[k])
- else:
- return "key '{}' does not exist".format(k)
- return None
-
- def validate_relation_data(self, sentry_unit, relation, expected):
- """Validate actual relation data based on expected relation data."""
- actual = sentry_unit.relation(relation[0], relation[1])
- return self._validate_dict_data(expected, actual)
-
- def _validate_list_data(self, expected, actual):
- """Compare expected list vs actual list data."""
- for e in expected:
- if e not in actual:
- return "expected item {} not found in actual list".format(e)
- return None
-
- def not_null(self, string):
- if string is not None:
- return True
- else:
- return False
-
- def _get_file_mtime(self, sentry_unit, filename):
- """Get last modification time of file."""
- return sentry_unit.file_stat(filename)['mtime']
-
- def _get_dir_mtime(self, sentry_unit, directory):
- """Get last modification time of directory."""
- return sentry_unit.directory_stat(directory)['mtime']
-
- def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
- """Get start time of a process based on the last modification time
- of the /proc/pid directory.
-
- :sentry_unit: The sentry unit to check for the service on
- :service: service name to look for in process table
- :pgrep_full: [Deprecated] Use full command line search mode with pgrep
- :returns: epoch time of service process start
- :param commands: list of bash commands
- :param sentry_units: list of sentry unit pointers
- :returns: None if successful; Failure message otherwise
- """
- if pgrep_full is not None:
- # /!\ DEPRECATION WARNING (beisner):
- # No longer implemented, as pidof is now used instead of pgrep.
- # https://bugs.launchpad.net/charm-helpers/+bug/1474030
- self.log.warn('DEPRECATION WARNING: pgrep_full bool is no '
- 'longer implemented re: lp 1474030.')
-
- pid_list = self.get_process_id_list(sentry_unit, service)
- pid = pid_list[0]
- proc_dir = '/proc/{}'.format(pid)
- self.log.debug('Pid for {} on {}: {}'.format(
- service, sentry_unit.info['unit_name'], pid))
-
- return self._get_dir_mtime(sentry_unit, proc_dir)
-
- def service_restarted(self, sentry_unit, service, filename,
- pgrep_full=None, sleep_time=20):
- """Check if service was restarted.
-
- Compare a service's start time vs a file's last modification time
- (such as a config file for that service) to determine if the service
- has been restarted.
- """
- # /!\ DEPRECATION WARNING (beisner):
- # This method is prone to races in that no before-time is known.
- # Use validate_service_config_changed instead.
-
- # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
- # used instead of pgrep. pgrep_full is still passed through to ensure
- # deprecation WARNS. lp1474030
- self.log.warn('DEPRECATION WARNING: use '
- 'validate_service_config_changed instead of '
- 'service_restarted due to known races.')
-
- time.sleep(sleep_time)
- if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
- self._get_file_mtime(sentry_unit, filename)):
- return True
- else:
- return False
-
- def service_restarted_since(self, sentry_unit, mtime, service,
- pgrep_full=None, sleep_time=20,
- retry_count=30, retry_sleep_time=10):
- """Check if service was been started after a given time.
-
- Args:
- sentry_unit (sentry): The sentry unit to check for the service on
- mtime (float): The epoch time to check against
- service (string): service name to look for in process table
- pgrep_full: [Deprecated] Use full command line search mode with pgrep
- sleep_time (int): Initial sleep time (s) before looking for file
- retry_sleep_time (int): Time (s) to sleep between retries
- retry_count (int): If file is not found, how many times to retry
-
- Returns:
- bool: True if service found and its start time it newer than mtime,
- False if service is older than mtime or if service was
- not found.
- """
- # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
- # used instead of pgrep. pgrep_full is still passed through to ensure
- # deprecation WARNS. lp1474030
-
- unit_name = sentry_unit.info['unit_name']
- self.log.debug('Checking that %s service restarted since %s on '
- '%s' % (service, mtime, unit_name))
- time.sleep(sleep_time)
- proc_start_time = None
- tries = 0
- while tries <= retry_count and not proc_start_time:
- try:
- proc_start_time = self._get_proc_start_time(sentry_unit,
- service,
- pgrep_full)
- self.log.debug('Attempt {} to get {} proc start time on {} '
- 'OK'.format(tries, service, unit_name))
- except IOError as e:
- # NOTE(beisner) - race avoidance, proc may not exist yet.
- # https://bugs.launchpad.net/charm-helpers/+bug/1474030
- self.log.debug('Attempt {} to get {} proc start time on {} '
- 'failed\n{}'.format(tries, service,
- unit_name, e))
- time.sleep(retry_sleep_time)
- tries += 1
-
- if not proc_start_time:
- self.log.warn('No proc start time found, assuming service did '
- 'not start')
- return False
- if proc_start_time >= mtime:
- self.log.debug('Proc start time is newer than provided mtime'
- '(%s >= %s) on %s (OK)' % (proc_start_time,
- mtime, unit_name))
- return True
- else:
- self.log.warn('Proc start time (%s) is older than provided mtime '
- '(%s) on %s, service did not '
- 'restart' % (proc_start_time, mtime, unit_name))
- return False
-
- def config_updated_since(self, sentry_unit, filename, mtime,
- sleep_time=20, retry_count=30,
- retry_sleep_time=10):
- """Check if file was modified after a given time.
-
- Args:
- sentry_unit (sentry): The sentry unit to check the file mtime on
- filename (string): The file to check mtime of
- mtime (float): The epoch time to check against
- sleep_time (int): Initial sleep time (s) before looking for file
- retry_sleep_time (int): Time (s) to sleep between retries
- retry_count (int): If file is not found, how many times to retry
-
- Returns:
- bool: True if file was modified more recently than mtime, False if
- file was modified before mtime, or if file not found.
- """
- unit_name = sentry_unit.info['unit_name']
- self.log.debug('Checking that %s updated since %s on '
- '%s' % (filename, mtime, unit_name))
- time.sleep(sleep_time)
- file_mtime = None
- tries = 0
- while tries <= retry_count and not file_mtime:
- try:
- file_mtime = self._get_file_mtime(sentry_unit, filename)
- self.log.debug('Attempt {} to get {} file mtime on {} '
- 'OK'.format(tries, filename, unit_name))
- except IOError as e:
- # NOTE(beisner) - race avoidance, file may not exist yet.
- # https://bugs.launchpad.net/charm-helpers/+bug/1474030
- self.log.debug('Attempt {} to get {} file mtime on {} '
- 'failed\n{}'.format(tries, filename,
- unit_name, e))
- time.sleep(retry_sleep_time)
- tries += 1
-
- if not file_mtime:
- self.log.warn('Could not determine file mtime, assuming '
- 'file does not exist')
- return False
-
- if file_mtime >= mtime:
- self.log.debug('File mtime is newer than provided mtime '
- '(%s >= %s) on %s (OK)' % (file_mtime,
- mtime, unit_name))
- return True
- else:
- self.log.warn('File mtime is older than provided mtime'
- '(%s < on %s) on %s' % (file_mtime,
- mtime, unit_name))
- return False
-
- def validate_service_config_changed(self, sentry_unit, mtime, service,
- filename, pgrep_full=None,
- sleep_time=20, retry_count=30,
- retry_sleep_time=10):
- """Check service and file were updated after mtime
-
- Args:
- sentry_unit (sentry): The sentry unit to check for the service on
- mtime (float): The epoch time to check against
- service (string): service name to look for in process table
- filename (string): The file to check mtime of
- pgrep_full: [Deprecated] Use full command line search mode with pgrep
- sleep_time (int): Initial sleep in seconds to pass to test helpers
- retry_count (int): If service is not found, how many times to retry
- retry_sleep_time (int): Time in seconds to wait between retries
-
- Typical Usage:
- u = OpenStackAmuletUtils(ERROR)
- ...
- mtime = u.get_sentry_time(self.cinder_sentry)
- self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'})
- if not u.validate_service_config_changed(self.cinder_sentry,
- mtime,
- 'cinder-api',
- '/etc/cinder/cinder.conf')
- amulet.raise_status(amulet.FAIL, msg='update failed')
- Returns:
- bool: True if both service and file where updated/restarted after
- mtime, False if service is older than mtime or if service was
- not found or if filename was modified before mtime.
- """
-
- # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
- # used instead of pgrep. pgrep_full is still passed through to ensure
- # deprecation WARNS. lp1474030
-
- service_restart = self.service_restarted_since(
- sentry_unit, mtime,
- service,
- pgrep_full=pgrep_full,
- sleep_time=sleep_time,
- retry_count=retry_count,
- retry_sleep_time=retry_sleep_time)
-
- config_update = self.config_updated_since(
- sentry_unit,
- filename,
- mtime,
- sleep_time=sleep_time,
- retry_count=retry_count,
- retry_sleep_time=retry_sleep_time)
-
- return service_restart and config_update
-
- def get_sentry_time(self, sentry_unit):
- """Return current epoch time on a sentry"""
- cmd = "date +'%s'"
- return float(sentry_unit.run(cmd)[0])
-
- def relation_error(self, name, data):
- return 'unexpected relation data in {} - {}'.format(name, data)
-
- def endpoint_error(self, name, data):
- return 'unexpected endpoint data in {} - {}'.format(name, data)
-
- def get_ubuntu_releases(self):
- """Return a list of all Ubuntu releases in order of release."""
- _d = distro_info.UbuntuDistroInfo()
- _release_list = _d.all
- return _release_list
-
- def file_to_url(self, file_rel_path):
- """Convert a relative file path to a file URL."""
- _abs_path = os.path.abspath(file_rel_path)
- return urlparse.urlparse(_abs_path, scheme='file').geturl()
-
- def check_commands_on_units(self, commands, sentry_units):
- """Check that all commands in a list exit zero on all
- sentry units in a list.
-
- :param commands: list of bash commands
- :param sentry_units: list of sentry unit pointers
- :returns: None if successful; Failure message otherwise
- """
- self.log.debug('Checking exit codes for {} commands on {} '
- 'sentry units...'.format(len(commands),
- len(sentry_units)))
- for sentry_unit in sentry_units:
- for cmd in commands:
- output, code = sentry_unit.run(cmd)
- if code == 0:
- self.log.debug('{} `{}` returned {} '
- '(OK)'.format(sentry_unit.info['unit_name'],
- cmd, code))
- else:
- return ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- return None
-
- def get_process_id_list(self, sentry_unit, process_name,
- expect_success=True):
- """Get a list of process ID(s) from a single sentry juju unit
- for a single process name.
-
- :param sentry_unit: Amulet sentry instance (juju unit)
- :param process_name: Process name
- :param expect_success: If False, expect the PID to be missing,
- raise if it is present.
- :returns: List of process IDs
- """
- cmd = 'pidof -x {}'.format(process_name)
- if not expect_success:
- cmd += " || exit 0 && exit 1"
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return str(output).split()
-
- def get_unit_process_ids(self, unit_processes, expect_success=True):
- """Construct a dict containing unit sentries, process names, and
- process IDs.
-
- :param unit_processes: A dictionary of Amulet sentry instance
- to list of process names.
- :param expect_success: if False expect the processes to not be
- running, raise if they are.
- :returns: Dictionary of Amulet sentry instance to dictionary
- of process names to PIDs.
- """
- pid_dict = {}
- for sentry_unit, process_list in six.iteritems(unit_processes):
- pid_dict[sentry_unit] = {}
- for process in process_list:
- pids = self.get_process_id_list(
- sentry_unit, process, expect_success=expect_success)
- pid_dict[sentry_unit].update({process: pids})
- return pid_dict
-
- def validate_unit_process_ids(self, expected, actual):
- """Validate process id quantities for services on units."""
- self.log.debug('Checking units for running processes...')
- self.log.debug('Expected PIDs: {}'.format(expected))
- self.log.debug('Actual PIDs: {}'.format(actual))
-
- if len(actual) != len(expected):
- return ('Unit count mismatch. expected, actual: {}, '
- '{} '.format(len(expected), len(actual)))
-
- for (e_sentry, e_proc_names) in six.iteritems(expected):
- e_sentry_name = e_sentry.info['unit_name']
- if e_sentry in actual.keys():
- a_proc_names = actual[e_sentry]
- else:
- return ('Expected sentry ({}) not found in actual dict data.'
- '{}'.format(e_sentry_name, e_sentry))
-
- if len(e_proc_names.keys()) != len(a_proc_names.keys()):
- return ('Process name count mismatch. expected, actual: {}, '
- '{}'.format(len(expected), len(actual)))
-
- for (e_proc_name, e_pids), (a_proc_name, a_pids) in \
- zip(e_proc_names.items(), a_proc_names.items()):
- if e_proc_name != a_proc_name:
- return ('Process name mismatch. expected, actual: {}, '
- '{}'.format(e_proc_name, a_proc_name))
-
- a_pids_length = len(a_pids)
- fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
- '{}, {} ({})'.format(e_sentry_name, e_proc_name,
- e_pids, a_pids_length,
- a_pids))
-
- # If expected is a list, ensure at least one PID quantity match
- if isinstance(e_pids, list) and \
- a_pids_length not in e_pids:
- return fail_msg
- # If expected is not bool and not list,
- # ensure PID quantities match
- elif not isinstance(e_pids, bool) and \
- not isinstance(e_pids, list) and \
- a_pids_length != e_pids:
- return fail_msg
- # If expected is bool True, ensure 1 or more PIDs exist
- elif isinstance(e_pids, bool) and \
- e_pids is True and a_pids_length < 1:
- return fail_msg
- # If expected is bool False, ensure 0 PIDs exist
- elif isinstance(e_pids, bool) and \
- e_pids is False and a_pids_length != 0:
- return fail_msg
- else:
- self.log.debug('PID check OK: {} {} {}: '
- '{}'.format(e_sentry_name, e_proc_name,
- e_pids, a_pids))
- return None
-
- def validate_list_of_identical_dicts(self, list_of_dicts):
- """Check that all dicts within a list are identical."""
- hashes = []
- for _dict in list_of_dicts:
- hashes.append(hash(frozenset(_dict.items())))
-
- self.log.debug('Hashes: {}'.format(hashes))
- if len(set(hashes)) == 1:
- self.log.debug('Dicts within list are identical')
- else:
- return 'Dicts within list are not identical'
-
- return None
-
- def validate_sectionless_conf(self, file_contents, expected):
- """A crude conf parser. Useful to inspect configuration files which
- do not have section headers (as would be necessary in order to use
- the configparser). Such as openstack-dashboard or rabbitmq confs."""
- for line in file_contents.split('\n'):
- if '=' in line:
- args = line.split('=')
- if len(args) <= 1:
- continue
- key = args[0].strip()
- value = args[1].strip()
- if key in expected.keys():
- if expected[key] != value:
- msg = ('Config mismatch. Expected, actual: {}, '
- '{}'.format(expected[key], value))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- def get_unit_hostnames(self, units):
- """Return a dict of juju unit names to hostnames."""
- host_names = {}
- for unit in units:
- host_names[unit.info['unit_name']] = \
- str(unit.file_contents('/etc/hostname').strip())
- self.log.debug('Unit host names: {}'.format(host_names))
- return host_names
-
- def run_cmd_unit(self, sentry_unit, cmd):
- """Run a command on a unit, return the output and exit code."""
- output, code = sentry_unit.run(cmd)
- if code == 0:
- self.log.debug('{} `{}` command returned {} '
- '(OK)'.format(sentry_unit.info['unit_name'],
- cmd, code))
- else:
- msg = ('{} `{}` command returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return str(output), code
-
- def file_exists_on_unit(self, sentry_unit, file_name):
- """Check if a file exists on a unit."""
- try:
- sentry_unit.file_stat(file_name)
- return True
- except IOError:
- return False
- except Exception as e:
- msg = 'Error checking file {}: {}'.format(file_name, e)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- def file_contents_safe(self, sentry_unit, file_name,
- max_wait=60, fatal=False):
- """Get file contents from a sentry unit. Wrap amulet file_contents
- with retry logic to address races where a file checks as existing,
- but no longer exists by the time file_contents is called.
- Return None if file not found. Optionally raise if fatal is True."""
- unit_name = sentry_unit.info['unit_name']
- file_contents = False
- tries = 0
- while not file_contents and tries < (max_wait / 4):
- try:
- file_contents = sentry_unit.file_contents(file_name)
- except IOError:
- self.log.debug('Attempt {} to open file {} from {} '
- 'failed'.format(tries, file_name,
- unit_name))
- time.sleep(4)
- tries += 1
-
- if file_contents:
- return file_contents
- elif not fatal:
- return None
- elif fatal:
- msg = 'Failed to get file contents from unit.'
- amulet.raise_status(amulet.FAIL, msg)
-
- def port_knock_tcp(self, host="localhost", port=22, timeout=15):
- """Open a TCP socket to check for a listening sevice on a host.
-
- :param host: host name or IP address, default to localhost
- :param port: TCP port number, default to 22
- :param timeout: Connect timeout, default to 15 seconds
- :returns: True if successful, False if connect failed
- """
-
- # Resolve host name if possible
- try:
- connect_host = socket.gethostbyname(host)
- host_human = "{} ({})".format(connect_host, host)
- except socket.error as e:
- self.log.warn('Unable to resolve address: '
- '{} ({}) Trying anyway!'.format(host, e))
- connect_host = host
- host_human = connect_host
-
- # Attempt socket connection
- try:
- knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- knock.settimeout(timeout)
- knock.connect((connect_host, port))
- knock.close()
- self.log.debug('Socket connect OK for host '
- '{} on port {}.'.format(host_human, port))
- return True
- except socket.error as e:
- self.log.debug('Socket connect FAIL for'
- ' {} port {} ({})'.format(host_human, port, e))
- return False
-
- def port_knock_units(self, sentry_units, port=22,
- timeout=15, expect_success=True):
- """Open a TCP socket to check for a listening sevice on each
- listed juju unit.
-
- :param sentry_units: list of sentry unit pointers
- :param port: TCP port number, default to 22
- :param timeout: Connect timeout, default to 15 seconds
- :expect_success: True by default, set False to invert logic
- :returns: None if successful, Failure message otherwise
- """
- for unit in sentry_units:
- host = unit.info['public-address']
- connected = self.port_knock_tcp(host, port, timeout)
- if not connected and expect_success:
- return 'Socket connect failed.'
- elif connected and not expect_success:
- return 'Socket connected unexpectedly.'
-
- def get_uuid_epoch_stamp(self):
- """Returns a stamp string based on uuid4 and epoch time. Useful in
- generating test messages which need to be unique-ish."""
- return '[{}-{}]'.format(uuid.uuid4(), time.time())
-
-# amulet juju action helpers:
- def run_action(self, unit_sentry, action,
- _check_output=subprocess.check_output,
- params=None):
- """Run the named action on a given unit sentry.
-
- params a dict of parameters to use
- _check_output parameter is used for dependency injection.
-
- @return action_id.
- """
- unit_id = unit_sentry.info["unit_name"]
- command = ["juju", "action", "do", "--format=json", unit_id, action]
- if params is not None:
- for key, value in params.iteritems():
- command.append("{}={}".format(key, value))
- self.log.info("Running command: %s\n" % " ".join(command))
- output = _check_output(command, universal_newlines=True)
- data = json.loads(output)
- action_id = data[u'Action queued with id']
- return action_id
-
- def wait_on_action(self, action_id, _check_output=subprocess.check_output):
- """Wait for a given action, returning if it completed or not.
-
- _check_output parameter is used for dependency injection.
- """
- command = ["juju", "action", "fetch", "--format=json", "--wait=0",
- action_id]
- output = _check_output(command, universal_newlines=True)
- data = json.loads(output)
- return data.get(u"status") == "completed"
-
- def status_get(self, unit):
- """Return the current service status of this unit."""
- raw_status, return_code = unit.run(
- "status-get --format=json --include-data")
- if return_code != 0:
- return ("unknown", "")
- status = json.loads(raw_status)
- return (status["status"], status["message"])
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/__init__.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/deployment.py
deleted file mode 100644
index d21c9c7..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/deployment.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import logging
-import re
-import sys
-import six
-from collections import OrderedDict
-from charmhelpers.contrib.amulet.deployment import (
- AmuletDeployment
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-
-class OpenStackAmuletDeployment(AmuletDeployment):
- """OpenStack amulet deployment.
-
- This class inherits from AmuletDeployment and has additional support
- that is specifically for use by OpenStack charms.
- """
-
- def __init__(self, series=None, openstack=None, source=None,
- stable=True, log_level=DEBUG):
- """Initialize the deployment environment."""
- super(OpenStackAmuletDeployment, self).__init__(series)
- self.log = self.get_logger(level=log_level)
- self.log.info('OpenStackAmuletDeployment: init')
- self.openstack = openstack
- self.source = source
- self.stable = stable
- # Note(coreycb): this needs to be changed when new next branches come
- # out.
- self.current_next = "trusty"
-
- def get_logger(self, name="deployment-logger", level=logging.DEBUG):
- """Get a logger object that will log to stdout."""
- log = logging
- logger = log.getLogger(name)
- fmt = log.Formatter("%(asctime)s %(funcName)s "
- "%(levelname)s: %(message)s")
-
- handler = log.StreamHandler(stream=sys.stdout)
- handler.setLevel(level)
- handler.setFormatter(fmt)
-
- logger.addHandler(handler)
- logger.setLevel(level)
-
- return logger
-
- def _determine_branch_locations(self, other_services):
- """Determine the branch locations for the other services.
-
- Determine if the local branch being tested is derived from its
- stable or next (dev) branch, and based on this, use the corresonding
- stable or next branches for the other_services."""
-
- self.log.info('OpenStackAmuletDeployment: determine branch locations')
-
- # Charms outside the lp:~openstack-charmers namespace
- base_charms = ['mysql', 'mongodb', 'nrpe']
-
- # Force these charms to current series even when using an older series.
- # ie. Use trusty/nrpe even when series is precise, as the P charm
- # does not possess the necessary external master config and hooks.
- force_series_current = ['nrpe']
-
- if self.series in ['precise', 'trusty']:
- base_series = self.series
- else:
- base_series = self.current_next
-
- for svc in other_services:
- if svc['name'] in force_series_current:
- base_series = self.current_next
- # If a location has been explicitly set, use it
- if svc.get('location'):
- continue
- if self.stable:
- temp = 'lp:charms/{}/{}'
- svc['location'] = temp.format(base_series,
- svc['name'])
- else:
- if svc['name'] in base_charms:
- temp = 'lp:charms/{}/{}'
- svc['location'] = temp.format(base_series,
- svc['name'])
- else:
- temp = 'lp:~openstack-charmers/charms/{}/{}/next'
- svc['location'] = temp.format(self.current_next,
- svc['name'])
-
- return other_services
-
- def _add_services(self, this_service, other_services):
- """Add services to the deployment and set openstack-origin/source."""
- self.log.info('OpenStackAmuletDeployment: adding services')
-
- other_services = self._determine_branch_locations(other_services)
-
- super(OpenStackAmuletDeployment, self)._add_services(this_service,
- other_services)
-
- services = other_services
- services.append(this_service)
-
- # Charms which should use the source config option
- use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
- 'ceph-osd', 'ceph-radosgw', 'ceph-mon']
-
- # Charms which can not use openstack-origin, ie. many subordinates
- no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
- 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
- 'cinder-backup', 'nexentaedge-data',
- 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
- 'cinder-nexentaedge', 'nexentaedge-mgmt']
-
- if self.openstack:
- for svc in services:
- if svc['name'] not in use_source + no_origin:
- config = {'openstack-origin': self.openstack}
- self.d.configure(svc['name'], config)
-
- if self.source:
- for svc in services:
- if svc['name'] in use_source and svc['name'] not in no_origin:
- config = {'source': self.source}
- self.d.configure(svc['name'], config)
-
- def _configure_services(self, configs):
- """Configure all of the services."""
- self.log.info('OpenStackAmuletDeployment: configure services')
- for service, config in six.iteritems(configs):
- self.d.configure(service, config)
-
- def _auto_wait_for_status(self, message=None, exclude_services=None,
- include_only=None, timeout=1800):
- """Wait for all units to have a specific extended status, except
- for any defined as excluded. Unless specified via message, any
- status containing any case of 'ready' will be considered a match.
-
- Examples of message usage:
-
- Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
- message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
-
- Wait for all units to reach this status (exact match):
- message = re.compile('^Unit is ready and clustered$')
-
- Wait for all units to reach any one of these (exact match):
- message = re.compile('Unit is ready|OK|Ready')
-
- Wait for at least one unit to reach this status (exact match):
- message = {'ready'}
-
- See Amulet's sentry.wait_for_messages() for message usage detail.
- https://github.com/juju/amulet/blob/master/amulet/sentry.py
-
- :param message: Expected status match
- :param exclude_services: List of juju service names to ignore,
- not to be used in conjuction with include_only.
- :param include_only: List of juju service names to exclusively check,
- not to be used in conjuction with exclude_services.
- :param timeout: Maximum time in seconds to wait for status match
- :returns: None. Raises if timeout is hit.
- """
- self.log.info('Waiting for extended status on units...')
-
- all_services = self.d.services.keys()
-
- if exclude_services and include_only:
- raise ValueError('exclude_services can not be used '
- 'with include_only')
-
- if message:
- if isinstance(message, re._pattern_type):
- match = message.pattern
- else:
- match = message
-
- self.log.debug('Custom extended status wait match: '
- '{}'.format(match))
- else:
- self.log.debug('Default extended status wait match: contains '
- 'READY (case-insensitive)')
- message = re.compile('.*ready.*', re.IGNORECASE)
-
- if exclude_services:
- self.log.debug('Excluding services from extended status match: '
- '{}'.format(exclude_services))
- else:
- exclude_services = []
-
- if include_only:
- services = include_only
- else:
- services = list(set(all_services) - set(exclude_services))
-
- self.log.debug('Waiting up to {}s for extended status on services: '
- '{}'.format(timeout, services))
- service_messages = {service: message for service in services}
- self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
- self.log.info('OK')
-
- def _get_openstack_release(self):
- """Get openstack release.
-
- Return an integer representing the enum value of the openstack
- release.
- """
- # Must be ordered by OpenStack release (not by Ubuntu release):
- (self.precise_essex, self.precise_folsom, self.precise_grizzly,
- self.precise_havana, self.precise_icehouse,
- self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
- self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
- self.wily_liberty, self.trusty_mitaka,
- self.xenial_mitaka) = range(14)
-
- releases = {
- ('precise', None): self.precise_essex,
- ('precise', 'cloud:precise-folsom'): self.precise_folsom,
- ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
- ('precise', 'cloud:precise-havana'): self.precise_havana,
- ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
- ('trusty', None): self.trusty_icehouse,
- ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
- ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
- ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
- ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
- ('utopic', None): self.utopic_juno,
- ('vivid', None): self.vivid_kilo,
- ('wily', None): self.wily_liberty,
- ('xenial', None): self.xenial_mitaka}
- return releases[(self.series, self.openstack)]
-
- def _get_openstack_release_string(self):
- """Get openstack release string.
-
- Return a string representing the openstack release.
- """
- releases = OrderedDict([
- ('precise', 'essex'),
- ('quantal', 'folsom'),
- ('raring', 'grizzly'),
- ('saucy', 'havana'),
- ('trusty', 'icehouse'),
- ('utopic', 'juno'),
- ('vivid', 'kilo'),
- ('wily', 'liberty'),
- ('xenial', 'mitaka'),
- ])
- if self.openstack:
- os_origin = self.openstack.split(':')[1]
- return os_origin.split('%s-' % self.series)[1].split('/')[0]
- else:
- return releases[self.series]
-
- def get_ceph_expected_pools(self, radosgw=False):
- """Return a list of expected ceph pools in a ceph + cinder + glance
- test scenario, based on OpenStack release and whether ceph radosgw
- is flagged as present or not."""
-
- if self._get_openstack_release() >= self.trusty_kilo:
- # Kilo or later
- pools = [
- 'rbd',
- 'cinder',
- 'glance'
- ]
- else:
- # Juno or earlier
- pools = [
- 'data',
- 'metadata',
- 'rbd',
- 'cinder',
- 'glance'
- ]
-
- if radosgw:
- pools.extend([
- '.rgw.root',
- '.rgw.control',
- '.rgw',
- '.rgw.gc',
- '.users.uid'
- ])
-
- return pools
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/utils.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/utils.py
deleted file mode 100644
index ef3bdcc..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/utils.py
+++ /dev/null
@@ -1,1012 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import amulet
-import json
-import logging
-import os
-import re
-import six
-import time
-import urllib
-
-import cinderclient.v1.client as cinder_client
-import glanceclient.v1.client as glance_client
-import heatclient.v1.client as heat_client
-import keystoneclient.v2_0 as keystone_client
-from keystoneclient.auth.identity import v3 as keystone_id_v3
-from keystoneclient import session as keystone_session
-from keystoneclient.v3 import client as keystone_client_v3
-
-import novaclient.client as nova_client
-import pika
-import swiftclient
-
-from charmhelpers.contrib.amulet.utils import (
- AmuletUtils
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-NOVA_CLIENT_VERSION = "2"
-
-
-class OpenStackAmuletUtils(AmuletUtils):
- """OpenStack amulet utilities.
-
- This class inherits from AmuletUtils and has additional support
- that is specifically for use by OpenStack charm tests.
- """
-
- def __init__(self, log_level=ERROR):
- """Initialize the deployment environment."""
- super(OpenStackAmuletUtils, self).__init__(log_level)
-
- def validate_endpoint_data(self, endpoints, admin_port, internal_port,
- public_port, expected):
- """Validate endpoint data.
-
- Validate actual endpoint data vs expected endpoint data. The ports
- are used to find the matching endpoint.
- """
- self.log.debug('Validating endpoint data...')
- self.log.debug('actual: {}'.format(repr(endpoints)))
- found = False
- for ep in endpoints:
- self.log.debug('endpoint: {}'.format(repr(ep)))
- if (admin_port in ep.adminurl and
- internal_port in ep.internalurl and
- public_port in ep.publicurl):
- found = True
- actual = {'id': ep.id,
- 'region': ep.region,
- 'adminurl': ep.adminurl,
- 'internalurl': ep.internalurl,
- 'publicurl': ep.publicurl,
- 'service_id': ep.service_id}
- ret = self._validate_dict_data(expected, actual)
- if ret:
- return 'unexpected endpoint data - {}'.format(ret)
-
- if not found:
- return 'endpoint not found'
-
- def validate_svc_catalog_endpoint_data(self, expected, actual):
- """Validate service catalog endpoint data.
-
- Validate a list of actual service catalog endpoints vs a list of
- expected service catalog endpoints.
- """
- self.log.debug('Validating service catalog endpoint data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for k, v in six.iteritems(expected):
- if k in actual:
- ret = self._validate_dict_data(expected[k][0], actual[k][0])
- if ret:
- return self.endpoint_error(k, ret)
- else:
- return "endpoint {} does not exist".format(k)
- return ret
-
- def validate_tenant_data(self, expected, actual):
- """Validate tenant data.
-
- Validate a list of actual tenant data vs list of expected tenant
- data.
- """
- self.log.debug('Validating tenant data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'enabled': act.enabled, 'description': act.description,
- 'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected tenant data - {}".format(ret)
- if not found:
- return "tenant {} does not exist".format(e['name'])
- return ret
-
- def validate_role_data(self, expected, actual):
- """Validate role data.
-
- Validate a list of actual role data vs a list of expected role
- data.
- """
- self.log.debug('Validating role data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected role data - {}".format(ret)
- if not found:
- return "role {} does not exist".format(e['name'])
- return ret
-
- def validate_user_data(self, expected, actual, api_version=None):
- """Validate user data.
-
- Validate a list of actual user data vs a list of expected user
- data.
- """
- self.log.debug('Validating user data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- if e['name'] == act.name:
- a = {'enabled': act.enabled, 'name': act.name,
- 'email': act.email, 'id': act.id}
- if api_version == 3:
- a['default_project_id'] = getattr(act,
- 'default_project_id',
- 'none')
- else:
- a['tenantId'] = act.tenantId
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected user data - {}".format(ret)
- if not found:
- return "user {} does not exist".format(e['name'])
- return ret
-
- def validate_flavor_data(self, expected, actual):
- """Validate flavor data.
-
- Validate a list of actual flavors vs a list of expected flavors.
- """
- self.log.debug('Validating flavor data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- act = [a.name for a in actual]
- return self._validate_list_data(expected, act)
-
- def tenant_exists(self, keystone, tenant):
- """Return True if tenant exists."""
- self.log.debug('Checking if tenant exists ({})...'.format(tenant))
- return tenant in [t.name for t in keystone.tenants.list()]
-
- def authenticate_cinder_admin(self, keystone_sentry, username,
- password, tenant):
- """Authenticates admin user with cinder."""
- # NOTE(beisner): cinder python client doesn't accept tokens.
- service_ip = \
- keystone_sentry.relation('shared-db',
- 'mysql:shared-db')['private-address']
- ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
- return cinder_client.Client(username, password, tenant, ept)
-
- def authenticate_keystone_admin(self, keystone_sentry, user, password,
- tenant=None, api_version=None,
- keystone_ip=None):
- """Authenticates admin user with the keystone admin endpoint."""
- self.log.debug('Authenticating keystone admin...')
- unit = keystone_sentry
- if not keystone_ip:
- keystone_ip = unit.relation('shared-db',
- 'mysql:shared-db')['private-address']
- base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8'))
- if not api_version or api_version == 2:
- ep = base_ep + "/v2.0"
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
- else:
- ep = base_ep + "/v3"
- auth = keystone_id_v3.Password(
- user_domain_name='admin_domain',
- username=user,
- password=password,
- domain_name='admin_domain',
- auth_url=ep,
- )
- sess = keystone_session.Session(auth=auth)
- return keystone_client_v3.Client(session=sess)
-
- def authenticate_keystone_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with the keystone public endpoint."""
- self.log.debug('Authenticating keystone user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
-
- def authenticate_glance_admin(self, keystone):
- """Authenticates admin user with glance."""
- self.log.debug('Authenticating glance admin...')
- ep = keystone.service_catalog.url_for(service_type='image',
- endpoint_type='adminURL')
- return glance_client.Client(ep, token=keystone.auth_token)
-
- def authenticate_heat_admin(self, keystone):
- """Authenticates the admin user with heat."""
- self.log.debug('Authenticating heat admin...')
- ep = keystone.service_catalog.url_for(service_type='orchestration',
- endpoint_type='publicURL')
- return heat_client.Client(endpoint=ep, token=keystone.auth_token)
-
- def authenticate_nova_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with nova-api."""
- self.log.debug('Authenticating nova user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return nova_client.Client(NOVA_CLIENT_VERSION,
- username=user, api_key=password,
- project_id=tenant, auth_url=ep)
-
- def authenticate_swift_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with swift api."""
- self.log.debug('Authenticating swift user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return swiftclient.Connection(authurl=ep,
- user=user,
- key=password,
- tenant_name=tenant,
- auth_version='2.0')
-
- def create_cirros_image(self, glance, image_name):
- """Download the latest cirros image and upload it to glance,
- validate and return a resource pointer.
-
- :param glance: pointer to authenticated glance connection
- :param image_name: display name for new image
- :returns: glance image pointer
- """
- self.log.debug('Creating glance cirros image '
- '({})...'.format(image_name))
-
- # Download cirros image
- http_proxy = os.getenv('AMULET_HTTP_PROXY')
- self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
- if http_proxy:
- proxies = {'http': http_proxy}
- opener = urllib.FancyURLopener(proxies)
- else:
- opener = urllib.FancyURLopener()
-
- f = opener.open('http://download.cirros-cloud.net/version/released')
- version = f.read().strip()
- cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
- local_path = os.path.join('tests', cirros_img)
-
- if not os.path.exists(local_path):
- cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
- version, cirros_img)
- opener.retrieve(cirros_url, local_path)
- f.close()
-
- # Create glance image
- with open(local_path) as f:
- image = glance.images.create(name=image_name, is_public=True,
- disk_format='qcow2',
- container_format='bare', data=f)
-
- # Wait for image to reach active status
- img_id = image.id
- ret = self.resource_reaches_status(glance.images, img_id,
- expected_stat='active',
- msg='Image status wait')
- if not ret:
- msg = 'Glance image failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new image
- self.log.debug('Validating image attributes...')
- val_img_name = glance.images.get(img_id).name
- val_img_stat = glance.images.get(img_id).status
- val_img_pub = glance.images.get(img_id).is_public
- val_img_cfmt = glance.images.get(img_id).container_format
- val_img_dfmt = glance.images.get(img_id).disk_format
- msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
- 'container fmt:{} disk fmt:{}'.format(
- val_img_name, val_img_pub, img_id,
- val_img_stat, val_img_cfmt, val_img_dfmt))
-
- if val_img_name == image_name and val_img_stat == 'active' \
- and val_img_pub is True and val_img_cfmt == 'bare' \
- and val_img_dfmt == 'qcow2':
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return image
-
- def delete_image(self, glance, image):
- """Delete the specified image."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_image.')
- self.log.debug('Deleting glance image ({})...'.format(image))
- return self.delete_resource(glance.images, image, msg='glance image')
-
- def create_instance(self, nova, image_name, instance_name, flavor):
- """Create the specified instance."""
- self.log.debug('Creating instance '
- '({}|{}|{})'.format(instance_name, image_name, flavor))
- image = nova.images.find(name=image_name)
- flavor = nova.flavors.find(name=flavor)
- instance = nova.servers.create(name=instance_name, image=image,
- flavor=flavor)
-
- count = 1
- status = instance.status
- while status != 'ACTIVE' and count < 60:
- time.sleep(3)
- instance = nova.servers.get(instance.id)
- status = instance.status
- self.log.debug('instance status: {}'.format(status))
- count += 1
-
- if status != 'ACTIVE':
- self.log.error('instance creation timed out')
- return None
-
- return instance
-
- def delete_instance(self, nova, instance):
- """Delete the specified instance."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_instance.')
- self.log.debug('Deleting instance ({})...'.format(instance))
- return self.delete_resource(nova.servers, instance,
- msg='nova instance')
-
- def create_or_get_keypair(self, nova, keypair_name="testkey"):
- """Create a new keypair, or return pointer if it already exists."""
- try:
- _keypair = nova.keypairs.get(keypair_name)
- self.log.debug('Keypair ({}) already exists, '
- 'using it.'.format(keypair_name))
- return _keypair
- except:
- self.log.debug('Keypair ({}) does not exist, '
- 'creating it.'.format(keypair_name))
-
- _keypair = nova.keypairs.create(name=keypair_name)
- return _keypair
-
- def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
- img_id=None, src_vol_id=None, snap_id=None):
- """Create cinder volume, optionally from a glance image, OR
- optionally as a clone of an existing volume, OR optionally
- from a snapshot. Wait for the new volume status to reach
- the expected status, validate and return a resource pointer.
-
- :param vol_name: cinder volume display name
- :param vol_size: size in gigabytes
- :param img_id: optional glance image id
- :param src_vol_id: optional source volume id to clone
- :param snap_id: optional snapshot id to use
- :returns: cinder volume pointer
- """
- # Handle parameter input and avoid impossible combinations
- if img_id and not src_vol_id and not snap_id:
- # Create volume from image
- self.log.debug('Creating cinder volume from glance image...')
- bootable = 'true'
- elif src_vol_id and not img_id and not snap_id:
- # Clone an existing volume
- self.log.debug('Cloning cinder volume...')
- bootable = cinder.volumes.get(src_vol_id).bootable
- elif snap_id and not src_vol_id and not img_id:
- # Create volume from snapshot
- self.log.debug('Creating cinder volume from snapshot...')
- snap = cinder.volume_snapshots.find(id=snap_id)
- vol_size = snap.size
- snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
- bootable = cinder.volumes.get(snap_vol_id).bootable
- elif not img_id and not src_vol_id and not snap_id:
- # Create volume
- self.log.debug('Creating cinder volume...')
- bootable = 'false'
- else:
- # Impossible combination of parameters
- msg = ('Invalid method use - name:{} size:{} img_id:{} '
- 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
- img_id, src_vol_id,
- snap_id))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Create new volume
- try:
- vol_new = cinder.volumes.create(display_name=vol_name,
- imageRef=img_id,
- size=vol_size,
- source_volid=src_vol_id,
- snapshot_id=snap_id)
- vol_id = vol_new.id
- except Exception as e:
- msg = 'Failed to create volume: {}'.format(e)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Wait for volume to reach available status
- ret = self.resource_reaches_status(cinder.volumes, vol_id,
- expected_stat="available",
- msg="Volume status wait")
- if not ret:
- msg = 'Cinder volume failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new volume
- self.log.debug('Validating volume attributes...')
- val_vol_name = cinder.volumes.get(vol_id).display_name
- val_vol_boot = cinder.volumes.get(vol_id).bootable
- val_vol_stat = cinder.volumes.get(vol_id).status
- val_vol_size = cinder.volumes.get(vol_id).size
- msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
- '{} size:{}'.format(val_vol_name, vol_id,
- val_vol_stat, val_vol_boot,
- val_vol_size))
-
- if val_vol_boot == bootable and val_vol_stat == 'available' \
- and val_vol_name == vol_name and val_vol_size == vol_size:
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return vol_new
-
- def delete_resource(self, resource, resource_id,
- msg="resource", max_wait=120):
- """Delete one openstack resource, such as one instance, keypair,
- image, volume, stack, etc., and confirm deletion within max wait time.
-
- :param resource: pointer to os resource type, ex:glance_client.images
- :param resource_id: unique name or id for the openstack resource
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, otherwise False
- """
- self.log.debug('Deleting OpenStack resource '
- '{} ({})'.format(resource_id, msg))
- num_before = len(list(resource.list()))
- resource.delete(resource_id)
-
- tries = 0
- num_after = len(list(resource.list()))
- while num_after != (num_before - 1) and tries < (max_wait / 4):
- self.log.debug('{} delete check: '
- '{} [{}:{}] {}'.format(msg, tries,
- num_before,
- num_after,
- resource_id))
- time.sleep(4)
- num_after = len(list(resource.list()))
- tries += 1
-
- self.log.debug('{}: expected, actual count = {}, '
- '{}'.format(msg, num_before - 1, num_after))
-
- if num_after == (num_before - 1):
- return True
- else:
- self.log.error('{} delete timed out'.format(msg))
- return False
-
- def resource_reaches_status(self, resource, resource_id,
- expected_stat='available',
- msg='resource', max_wait=120):
- """Wait for an openstack resources status to reach an
- expected status within a specified time. Useful to confirm that
- nova instances, cinder vols, snapshots, glance images, heat stacks
- and other resources eventually reach the expected status.
-
- :param resource: pointer to os resource type, ex: heat_client.stacks
- :param resource_id: unique id for the openstack resource
- :param expected_stat: status to expect resource to reach
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, False if status is not reached
- """
-
- tries = 0
- resource_stat = resource.get(resource_id).status
- while resource_stat != expected_stat and tries < (max_wait / 4):
- self.log.debug('{} status check: '
- '{} [{}:{}] {}'.format(msg, tries,
- resource_stat,
- expected_stat,
- resource_id))
- time.sleep(4)
- resource_stat = resource.get(resource_id).status
- tries += 1
-
- self.log.debug('{}: expected, actual status = {}, '
- '{}'.format(msg, resource_stat, expected_stat))
-
- if resource_stat == expected_stat:
- return True
- else:
- self.log.debug('{} never reached expected status: '
- '{}'.format(resource_id, expected_stat))
- return False
-
- def get_ceph_osd_id_cmd(self, index):
- """Produce a shell command that will return a ceph-osd id."""
- return ("`initctl list | grep 'ceph-osd ' | "
- "awk 'NR=={} {{ print $2 }}' | "
- "grep -o '[0-9]*'`".format(index + 1))
-
- def get_ceph_pools(self, sentry_unit):
- """Return a dict of ceph pools from a single ceph unit, with
- pool name as keys, pool id as vals."""
- pools = {}
- cmd = 'sudo ceph osd lspools'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
- for pool in str(output).split(','):
- pool_id_name = pool.split(' ')
- if len(pool_id_name) == 2:
- pool_id = pool_id_name[0]
- pool_name = pool_id_name[1]
- pools[pool_name] = int(pool_id)
-
- self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
- pools))
- return pools
-
- def get_ceph_df(self, sentry_unit):
- """Return dict of ceph df json output, including ceph pool state.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :returns: Dict of ceph df output
- """
- cmd = 'sudo ceph df --format=json'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return json.loads(output)
-
- def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
- """Take a sample of attributes of a ceph pool, returning ceph
- pool name, object count and disk space used for the specified
- pool ID number.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :param pool_id: Ceph pool ID
- :returns: List of pool name, object count, kb disk space used
- """
- df = self.get_ceph_df(sentry_unit)
- pool_name = df['pools'][pool_id]['name']
- obj_count = df['pools'][pool_id]['stats']['objects']
- kb_used = df['pools'][pool_id]['stats']['kb_used']
- self.log.debug('Ceph {} pool (ID {}): {} objects, '
- '{} kb used'.format(pool_name, pool_id,
- obj_count, kb_used))
- return pool_name, obj_count, kb_used
-
- def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
- """Validate ceph pool samples taken over time, such as pool
- object counts or pool kb used, before adding, after adding, and
- after deleting items which affect those pool attributes. The
- 2nd element is expected to be greater than the 1st; 3rd is expected
- to be less than the 2nd.
-
- :param samples: List containing 3 data samples
- :param sample_type: String for logging and usage context
- :returns: None if successful, Failure message otherwise
- """
- original, created, deleted = range(3)
- if samples[created] <= samples[original] or \
- samples[deleted] >= samples[created]:
- return ('Ceph {} samples ({}) '
- 'unexpected.'.format(sample_type, samples))
- else:
- self.log.debug('Ceph {} samples (OK): '
- '{}'.format(sample_type, samples))
- return None
-
- # rabbitmq/amqp specific helpers:
-
- def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
- """Wait for rmq units extended status to show cluster readiness,
- after an optional initial sleep period. Initial sleep is likely
- necessary to be effective following a config change, as status
- message may not instantly update to non-ready."""
-
- if init_sleep:
- time.sleep(init_sleep)
-
- message = re.compile('^Unit is ready and clustered$')
- deployment._auto_wait_for_status(message=message,
- timeout=timeout,
- include_only=['rabbitmq-server'])
-
- def add_rmq_test_user(self, sentry_units,
- username="testuser1", password="changeme"):
- """Add a test user via the first rmq juju unit, check connection as
- the new user against all sentry units.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Adding rmq user ({})...'.format(username))
-
- # Check that user does not already exist
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
- if username in output:
- self.log.warning('User ({}) already exists, returning '
- 'gracefully.'.format(username))
- return
-
- perms = '".*" ".*" ".*"'
- cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
- 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
-
- # Add user via first unit
- for cmd in cmds:
- output, _ = self.run_cmd_unit(sentry_units[0], cmd)
-
- # Check connection against the other sentry_units
- self.log.debug('Checking user connect against units...')
- for sentry_unit in sentry_units:
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
- username=username,
- password=password)
- connection.close()
-
- def delete_rmq_test_user(self, sentry_units, username="testuser1"):
- """Delete a rabbitmq user via the first rmq juju unit.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful or no such user.
- """
- self.log.debug('Deleting rmq user ({})...'.format(username))
-
- # Check that the user exists
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
-
- if username not in output:
- self.log.warning('User ({}) does not exist, returning '
- 'gracefully.'.format(username))
- return
-
- # Delete the user
- cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
-
- def get_rmq_cluster_status(self, sentry_unit):
- """Execute rabbitmq cluster status command on a unit and return
- the full output.
-
- :param unit: sentry unit
- :returns: String containing console output of cluster status command
- """
- cmd = 'rabbitmqctl cluster_status'
- output, _ = self.run_cmd_unit(sentry_unit, cmd)
- self.log.debug('{} cluster_status:\n{}'.format(
- sentry_unit.info['unit_name'], output))
- return str(output)
-
- def get_rmq_cluster_running_nodes(self, sentry_unit):
- """Parse rabbitmqctl cluster_status output string, return list of
- running rabbitmq cluster nodes.
-
- :param unit: sentry unit
- :returns: List containing node names of running nodes
- """
- # NOTE(beisner): rabbitmqctl cluster_status output is not
- # json-parsable, do string chop foo, then json.loads that.
- str_stat = self.get_rmq_cluster_status(sentry_unit)
- if 'running_nodes' in str_stat:
- pos_start = str_stat.find("{running_nodes,") + 15
- pos_end = str_stat.find("]},", pos_start) + 1
- str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
- run_nodes = json.loads(str_run_nodes)
- return run_nodes
- else:
- return []
-
- def validate_rmq_cluster_running_nodes(self, sentry_units):
- """Check that all rmq unit hostnames are represented in the
- cluster_status output of all units.
-
- :param host_names: dict of juju unit names to host names
- :param units: list of sentry unit pointers (all rmq units)
- :returns: None if successful, otherwise return error message
- """
- host_names = self.get_unit_hostnames(sentry_units)
- errors = []
-
- # Query every unit for cluster_status running nodes
- for query_unit in sentry_units:
- query_unit_name = query_unit.info['unit_name']
- running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
-
- # Confirm that every unit is represented in the queried unit's
- # cluster_status running nodes output.
- for validate_unit in sentry_units:
- val_host_name = host_names[validate_unit.info['unit_name']]
- val_node_name = 'rabbit@{}'.format(val_host_name)
-
- if val_node_name not in running_nodes:
- errors.append('Cluster member check failed on {}: {} not '
- 'in {}\n'.format(query_unit_name,
- val_node_name,
- running_nodes))
- if errors:
- return ''.join(errors)
-
- def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
- """Check a single juju rmq unit for ssl and port in the config file."""
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- conf_file = '/etc/rabbitmq/rabbitmq.config'
- conf_contents = str(self.file_contents_safe(sentry_unit,
- conf_file, max_wait=16))
- # Checks
- conf_ssl = 'ssl' in conf_contents
- conf_port = str(port) in conf_contents
-
- # Port explicitly checked in config
- if port and conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif port and not conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{} but not on port {} '
- '({})'.format(host, port, unit_name))
- return False
- # Port not checked (useful when checking that ssl is disabled)
- elif not port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif not conf_ssl:
- self.log.debug('SSL not enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return False
- else:
- msg = ('Unknown condition when checking SSL status @{}:{} '
- '({})'.format(host, port, unit_name))
- amulet.raise_status(amulet.FAIL, msg)
-
- def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
- """Check that ssl is enabled on rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :param port: optional ssl port override to validate
- :returns: None if successful, otherwise return error message
- """
- for sentry_unit in sentry_units:
- if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
- return ('Unexpected condition: ssl is disabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def validate_rmq_ssl_disabled_units(self, sentry_units):
- """Check that ssl is enabled on listed rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :returns: True if successful. Raise on error.
- """
- for sentry_unit in sentry_units:
- if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
- return ('Unexpected condition: ssl is enabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def configure_rmq_ssl_on(self, sentry_units, deployment,
- port=None, max_wait=60):
- """Turn ssl charm config option on, with optional non-default
- ssl port specification. Confirm that it is enabled on every
- unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param port: amqp port, use defaults if None
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: on')
-
- # Enable RMQ SSL
- config = {'ssl': 'on'}
- if port:
- config['ssl_port'] = port
-
- deployment.d.configure('rabbitmq-server', config)
-
- # Wait for unit status
- self.rmq_wait_for_cluster(deployment)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
- """Turn ssl charm config option off, confirm that it is disabled
- on every unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: off')
-
- # Disable RMQ SSL
- config = {'ssl': 'off'}
- deployment.d.configure('rabbitmq-server', config)
-
- # Wait for unit status
- self.rmq_wait_for_cluster(deployment)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def connect_amqp_by_unit(self, sentry_unit, ssl=False,
- port=None, fatal=True,
- username="testuser1", password="changeme"):
- """Establish and return a pika amqp connection to the rabbitmq service
- running on a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :param fatal: boolean, default to True (raises on connect error)
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: pika amqp connection pointer or None if failed and non-fatal
- """
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- # Default port logic if port is not specified
- if ssl and not port:
- port = 5671
- elif not ssl and not port:
- port = 5672
-
- self.log.debug('Connecting to amqp on {}:{} ({}) as '
- '{}...'.format(host, port, unit_name, username))
-
- try:
- credentials = pika.PlainCredentials(username, password)
- parameters = pika.ConnectionParameters(host=host, port=port,
- credentials=credentials,
- ssl=ssl,
- connection_attempts=3,
- retry_delay=5,
- socket_timeout=1)
- connection = pika.BlockingConnection(parameters)
- assert connection.server_properties['product'] == 'RabbitMQ'
- self.log.debug('Connect OK')
- return connection
- except Exception as e:
- msg = ('amqp connection failed to {}:{} as '
- '{} ({})'.format(host, port, username, str(e)))
- if fatal:
- amulet.raise_status(amulet.FAIL, msg)
- else:
- self.log.warn(msg)
- return None
-
- def publish_amqp_message_by_unit(self, sentry_unit, message,
- queue="test", ssl=False,
- username="testuser1",
- password="changeme",
- port=None):
- """Publish an amqp message to a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param message: amqp message string
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: None. Raises exception if publish failed.
- """
- self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
- message))
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
-
- # NOTE(beisner): extra debug here re: pika hang potential:
- # https://github.com/pika/pika/issues/297
- # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
- self.log.debug('Defining channel...')
- channel = connection.channel()
- self.log.debug('Declaring queue...')
- channel.queue_declare(queue=queue, auto_delete=False, durable=True)
- self.log.debug('Publishing message...')
- channel.basic_publish(exchange='', routing_key=queue, body=message)
- self.log.debug('Closing channel...')
- channel.close()
- self.log.debug('Closing connection...')
- connection.close()
-
- def get_amqp_message_by_unit(self, sentry_unit, queue="test",
- username="testuser1",
- password="changeme",
- ssl=False, port=None):
- """Get an amqp message from a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: amqp message body as string. Raise if get fails.
- """
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
- channel = connection.channel()
- method_frame, _, body = channel.basic_get(queue)
-
- if method_frame:
- self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
- body))
- channel.basic_ack(method_frame.delivery_tag)
- channel.close()
- connection.close()
- return body
- else:
- msg = 'No message retrieved.'
- amulet.raise_status(amulet.FAIL, msg)
diff --git a/charms/trusty/ceilometer/tests/setup/00-setup b/charms/trusty/ceilometer/tests/setup/00-setup
deleted file mode 100755
index 658eb60..0000000
--- a/charms/trusty/ceilometer/tests/setup/00-setup
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-sudo add-apt-repository --yes ppa:juju/stable
-sudo apt-get update --yes
-sudo apt-get install --yes amulet \
- distro-info-data \
- python-ceilometerclient \
- python-cinderclient \
- python-distro-info \
- python-glanceclient \
- python-heatclient \
- python-keystoneclient \
- python-neutronclient \
- python-novaclient \
- python-pika \
- python-swiftclient
diff --git a/charms/trusty/ceilometer/tests/tests.yaml b/charms/trusty/ceilometer/tests/tests.yaml
deleted file mode 100644
index 4d17631..0000000
--- a/charms/trusty/ceilometer/tests/tests.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-bootstrap: true
-reset: false
-virtualenv: true
-makefile:
- - lint
- - test
-sources:
- - ppa:juju/stable
-packages:
- - amulet
- - distro-info-data
- - python-ceilometerclient
- - python-cinderclient
- - python-distro-info
- - python-glanceclient
- - python-heatclient
- - python-keystoneclient
- - python-neutronclient
- - python-novaclient
- - python-pika
- - python-swiftclient
diff --git a/charms/trusty/ceilometer/tox.ini b/charms/trusty/ceilometer/tox.ini
deleted file mode 100644
index c051dba..0000000
--- a/charms/trusty/ceilometer/tox.ini
+++ /dev/null
@@ -1,29 +0,0 @@
-[tox]
-envlist = pep8,py27
-skipsdist = True
-
-[testenv]
-setenv = VIRTUAL_ENV={envdir}
- PYTHONHASHSEED=0
-install_command =
- pip install --allow-unverified python-apt {opts} {packages}
-commands = ostestr {posargs}
-
-[testenv:py27]
-basepython = python2.7
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-
-[testenv:pep8]
-basepython = python2.7
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-commands = flake8 {posargs} hooks unit_tests tests actions
- charm-proof
-
-[testenv:venv]
-commands = {posargs}
-
-[flake8]
-ignore = E402,E226
-exclude = hooks/charmhelpers
diff --git a/charms/trusty/ceilometer/unit_tests/__init__.py b/charms/trusty/ceilometer/unit_tests/__init__.py
deleted file mode 100644
index 53a4570..0000000
--- a/charms/trusty/ceilometer/unit_tests/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import sys
-sys.path.append('actions')
-sys.path.append('hooks')
diff --git a/charms/trusty/ceilometer/unit_tests/test_actions.py b/charms/trusty/ceilometer/unit_tests/test_actions.py
deleted file mode 100644
index 67643c8..0000000
--- a/charms/trusty/ceilometer/unit_tests/test_actions.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import mock
-from mock import patch
-
-from test_utils import CharmTestCase
-
-with patch('ceilometer_utils.register_configs') as configs:
- configs.return_value = 'test-config'
- import actions
-
-
-class PauseTestCase(CharmTestCase):
-
- def setUp(self):
- super(PauseTestCase, self).setUp(
- actions, ["pause_unit_helper"])
-
- def test_pauses_services(self):
- actions.pause([])
- self.pause_unit_helper.assert_called_once_with('test-config')
-
-
-class ResumeTestCase(CharmTestCase):
-
- def setUp(self):
- super(ResumeTestCase, self).setUp(
- actions, ["resume_unit_helper"])
-
- def test_pauses_services(self):
- actions.resume([])
- self.resume_unit_helper.assert_called_once_with('test-config')
-
-
-class MainTestCase(CharmTestCase):
-
- def setUp(self):
- super(MainTestCase, self).setUp(actions, ["action_fail"])
-
- def test_invokes_action(self):
- dummy_calls = []
-
- def dummy_action(args):
- dummy_calls.append(True)
-
- with mock.patch.dict(actions.ACTIONS, {"foo": dummy_action}):
- actions.main(["foo"])
- self.assertEqual(dummy_calls, [True])
-
- def test_unknown_action(self):
- """Unknown actions aren't a traceback."""
- exit_string = actions.main(["foo"])
- self.assertEqual("Action foo undefined", exit_string)
-
- def test_failing_action(self):
- """Actions which traceback trigger action_fail() calls."""
- dummy_calls = []
-
- self.action_fail.side_effect = dummy_calls.append
-
- def dummy_action(args):
- raise ValueError("uh oh")
-
- with mock.patch.dict(actions.ACTIONS, {"foo": dummy_action}):
- actions.main(["foo"])
- self.assertEqual(dummy_calls, ["uh oh"])
diff --git a/charms/trusty/ceilometer/unit_tests/test_actions_openstack_upgrade.py b/charms/trusty/ceilometer/unit_tests/test_actions_openstack_upgrade.py
deleted file mode 100644
index 3babe21..0000000
--- a/charms/trusty/ceilometer/unit_tests/test_actions_openstack_upgrade.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import os
-import sys
-
-from mock import patch, MagicMock
-
-# python-apt is not installed as part of test-requirements but is imported by
-# some charmhelpers modules so create a fake import.
-mock_apt = MagicMock()
-sys.modules['apt'] = mock_apt
-mock_apt.apt_pkg = MagicMock()
-
-os.environ['JUJU_UNIT_NAME'] = 'ceilometer'
-
-with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec:
- mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f:
- lambda *args, **kwargs: f(*args, **kwargs))
- with patch('ceilometer_utils.register_configs') as register_configs:
- with patch('ceilometer_utils.ceilometer_release_services'):
- import openstack_upgrade
-
-from test_utils import (
- CharmTestCase
-)
-
-TO_PATCH = [
- 'config_changed',
- 'do_openstack_upgrade',
-]
-
-
-class TestCeilometerUpgradeActions(CharmTestCase):
-
- def setUp(self):
- super(TestCeilometerUpgradeActions, self).setUp(openstack_upgrade,
- TO_PATCH)
-
- @patch('charmhelpers.contrib.openstack.utils.juju_log')
- @patch('charmhelpers.contrib.openstack.utils.config')
- @patch('charmhelpers.contrib.openstack.utils.action_set')
- @patch('charmhelpers.contrib.openstack.utils.git_install_requested')
- @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available')
- def test_openstack_upgrade_true(self, upgrade_avail, git_requested,
- action_set, config, log):
- git_requested.return_value = False
- upgrade_avail.return_value = True
- config.return_value = True
-
- openstack_upgrade.openstack_upgrade()
-
- self.assertTrue(self.do_openstack_upgrade.called)
- self.assertTrue(self.config_changed.called)
-
- @patch('charmhelpers.contrib.openstack.utils.juju_log')
- @patch('charmhelpers.contrib.openstack.utils.config')
- @patch('charmhelpers.contrib.openstack.utils.action_set')
- @patch('charmhelpers.contrib.openstack.utils.git_install_requested')
- @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available')
- def test_openstack_upgrade_false(self, upgrade_avail, git_requested,
- action_set, config, log):
- git_requested.return_value = False
- upgrade_avail.return_value = True
- config.return_value = False
-
- openstack_upgrade.openstack_upgrade()
-
- self.assertFalse(self.do_openstack_upgrade.called)
- self.assertFalse(self.config_changed.called)
diff --git a/charms/trusty/ceilometer/unit_tests/test_ceilometer_contexts.py b/charms/trusty/ceilometer/unit_tests/test_ceilometer_contexts.py
deleted file mode 100644
index 038dfd0..0000000
--- a/charms/trusty/ceilometer/unit_tests/test_ceilometer_contexts.py
+++ /dev/null
@@ -1,164 +0,0 @@
-from mock import patch
-
-import ceilometer_contexts as contexts
-import ceilometer_utils as utils
-
-from test_utils import CharmTestCase, mock_open
-
-TO_PATCH = [
- 'config',
- 'relation_get',
- 'relation_ids',
- 'related_units',
- 'os_release',
-]
-
-
-class CeilometerContextsTest(CharmTestCase):
-
- def setUp(self):
- super(CeilometerContextsTest, self).setUp(contexts, TO_PATCH)
- self.config.side_effect = self.test_config.get
- self.relation_get.side_effect = self.test_relation.get
-
- def tearDown(self):
- super(CeilometerContextsTest, self).tearDown()
-
- def test_logging_context(self):
- self.test_config.set('debug', False)
- self.test_config.set('verbose', False)
- self.assertEquals(contexts.LoggingConfigContext()(),
- {'debug': False, 'verbose': False})
- self.test_config.set('debug', True)
- self.test_config.set('verbose', False)
- self.assertEquals(contexts.LoggingConfigContext()(),
- {'debug': True, 'verbose': False})
- self.test_config.set('debug', True)
- self.test_config.set('verbose', True)
- self.assertEquals(contexts.LoggingConfigContext()(),
- {'debug': True, 'verbose': True})
-
- def test_mongodb_context_not_related(self):
- self.relation_ids.return_value = []
- self.os_release.return_value = 'icehouse'
- self.assertEquals(contexts.MongoDBContext()(), {})
-
- def test_mongodb_context_related(self):
- self.relation_ids.return_value = ['shared-db:0']
- self.related_units.return_value = ['mongodb/0']
- data = {
- 'hostname': 'mongodb',
- 'port': 8090
- }
- self.test_relation.set(data)
- self.assertEquals(contexts.MongoDBContext()(),
- {'db_host': 'mongodb', 'db_port': 8090,
- 'db_name': 'ceilometer'})
-
- def test_mongodb_context_related_replset_single_mongo(self):
- self.relation_ids.return_value = ['shared-db:0']
- self.related_units.return_value = ['mongodb/0']
- data = {
- 'hostname': 'mongodb-0',
- 'port': 8090,
- 'replset': 'replset-1'
- }
- self.test_relation.set(data)
- self.os_release.return_value = 'icehouse'
- self.assertEquals(contexts.MongoDBContext()(),
- {'db_host': 'mongodb-0', 'db_port': 8090,
- 'db_name': 'ceilometer'})
-
- @patch.object(contexts, 'context_complete')
- def test_mongodb_context_related_replset_missing_values(self, mock_ctxcmp):
- mock_ctxcmp.return_value = False
- self.relation_ids.return_value = ['shared-db:0']
- self.related_units.return_value = ['mongodb/0']
- data = {
- 'hostname': None,
- 'port': 8090,
- 'replset': 'replset-1'
- }
- self.test_relation.set(data)
- self.os_release.return_value = 'icehouse'
- self.assertEquals(contexts.MongoDBContext()(), {})
-
- def test_mongodb_context_related_replset_multiple_mongo(self):
- self.relation_ids.return_value = ['shared-db:0']
- related_units = {
- 'mongodb/0': {'hostname': 'mongodb-0',
- 'port': 8090,
- 'replset': 'replset-1'},
- 'mongodb/1': {'hostname': 'mongodb-1',
- 'port': 8090,
- 'replset': 'replset-1'}
- }
- self.related_units.return_value = [k for k in related_units.keys()]
-
- def relation_get(attr, unit, relid):
- values = related_units.get(unit)
- if attr is None:
- return values
- else:
- return values.get(attr, None)
- self.relation_get.side_effect = relation_get
-
- self.os_release.return_value = 'icehouse'
- self.assertEquals(contexts.MongoDBContext()(),
- {'db_mongo_servers': 'mongodb-0:8090,mongodb-1:8090',
- 'db_name': 'ceilometer', 'db_replset': 'replset-1'})
-
- @patch.object(utils, 'get_shared_secret')
- def test_ceilometer_context(self, secret):
- secret.return_value = 'mysecret'
- self.assertEquals(contexts.CeilometerContext()(), {
- 'port': 8777,
- 'metering_secret': 'mysecret',
- 'api_workers': 1,
- })
-
- def test_ceilometer_service_context(self):
- self.relation_ids.return_value = ['ceilometer-service:0']
- self.related_units.return_value = ['ceilometer/0']
- data = {
- 'metering_secret': 'mysecret',
- 'keystone_host': 'test'
- }
- self.test_relation.set(data)
- self.assertEquals(contexts.CeilometerServiceContext()(), data)
-
- def test_ceilometer_service_context_not_related(self):
- self.relation_ids.return_value = []
- self.assertEquals(contexts.CeilometerServiceContext()(), {})
-
- @patch('os.path.exists')
- def test_get_shared_secret_existing(self, exists):
- exists.return_value = True
- with mock_open(utils.SHARED_SECRET, u'mysecret'):
- self.assertEquals(utils.get_shared_secret(),
- 'mysecret')
-
- @patch('uuid.uuid4')
- @patch('os.path.exists')
- def test_get_shared_secret_new(self, exists, uuid4):
- exists.return_value = False
- uuid4.return_value = 'newsecret'
- with patch('__builtin__.open'):
- self.assertEquals(utils.get_shared_secret(),
- 'newsecret')
-
- @patch.object(contexts, 'determine_apache_port')
- @patch.object(contexts, 'determine_api_port')
- def test_ha_proxy_context(self, determine_api_port, determine_apache_port):
- determine_api_port.return_value = contexts.CEILOMETER_PORT - 10
- determine_apache_port.return_value = contexts.CEILOMETER_PORT - 20
-
- haproxy_port = contexts.CEILOMETER_PORT
- api_port = haproxy_port - 10
- apache_port = api_port - 10
-
- expected = {
- 'service_ports': {'ceilometer_api': [haproxy_port, apache_port]},
- 'port': api_port
- }
- self.assertEquals(contexts.HAProxyContext()(), expected)
diff --git a/charms/trusty/ceilometer/unit_tests/test_ceilometer_hooks.py b/charms/trusty/ceilometer/unit_tests/test_ceilometer_hooks.py
deleted file mode 100644
index 1a0b1b1..0000000
--- a/charms/trusty/ceilometer/unit_tests/test_ceilometer_hooks.py
+++ /dev/null
@@ -1,370 +0,0 @@
-import os
-import sys
-
-from mock import patch, MagicMock, call
-
-# python-apt is not installed as part of test-requirements but is imported by
-# some charmhelpers modules so create a fake import.
-mock_apt = MagicMock()
-sys.modules['apt'] = mock_apt
-mock_apt.apt_pkg = MagicMock()
-
-
-import ceilometer_utils
-# Patch out register_configs for import of hooks
-_register_configs = ceilometer_utils.register_configs
-ceilometer_utils.register_configs = MagicMock()
-
-with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec:
- mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f:
- lambda *args, **kwargs: f(*args, **kwargs))
- import ceilometer_hooks as hooks
-
-# Renable old function
-ceilometer_utils.register_configs = _register_configs
-
-from test_utils import CharmTestCase
-
-TO_PATCH = [
- 'relation_get',
- 'relation_set',
- 'configure_installation_source',
- 'openstack_upgrade_available',
- 'do_openstack_upgrade',
- 'apt_install',
- 'apt_update',
- 'open_port',
- 'config',
- 'log',
- 'relation_ids',
- 'filter_installed_packages',
- 'CONFIGS',
- 'get_ceilometer_context',
- 'lsb_release',
- 'get_packages',
- 'service_restart',
- 'update_nrpe_config',
- 'peer_retrieve',
- 'peer_store',
- 'configure_https',
- 'status_set',
-]
-
-
-class CeilometerHooksTest(CharmTestCase):
-
- def setUp(self):
- super(CeilometerHooksTest, self).setUp(hooks, TO_PATCH)
- self.config.side_effect = self.test_config.get
- self.get_packages.return_value = \
- ceilometer_utils.CEILOMETER_BASE_PACKAGES
- self.filter_installed_packages.return_value = \
- ceilometer_utils.CEILOMETER_BASE_PACKAGES
- self.lsb_release.return_value = {'DISTRIB_CODENAME': 'precise'}
-
- @patch('charmhelpers.payload.execd.default_execd_dir',
- return_value=os.path.join(os.getcwd(), 'exec.d'))
- @patch('charmhelpers.core.hookenv.config')
- def test_configure_source(self, mock_config, mock_execd_dir):
- self.test_config.set('openstack-origin', 'cloud:precise-havana')
- hooks.hooks.execute(['hooks/install.real'])
- self.configure_installation_source.\
- assert_called_with('cloud:precise-havana')
-
- @patch('charmhelpers.payload.execd.default_execd_dir',
- return_value=os.path.join(os.getcwd(), 'exec.d'))
- @patch('charmhelpers.core.hookenv.config')
- def test_install_hook_precise(self, mock_config, mock_execd_dir):
- hooks.hooks.execute(['hooks/install.real'])
- self.configure_installation_source.\
- assert_called_with('cloud:precise-grizzly')
- self.open_port.assert_called_with(hooks.CEILOMETER_PORT)
- self.apt_update.assert_called_with(fatal=True)
- self.apt_install.assert_called_with(
- ceilometer_utils.CEILOMETER_BASE_PACKAGES,
- fatal=True
- )
-
- @patch('charmhelpers.payload.execd.default_execd_dir',
- return_value=os.path.join(os.getcwd(), 'exec.d'))
- @patch('charmhelpers.core.hookenv.config')
- def test_install_hook_distro(self, mock_config, mock_execd_dir):
- self.lsb_release.return_value = {'DISTRIB_CODENAME': 'saucy'}
- hooks.hooks.execute(['hooks/install.real'])
- self.configure_installation_source.\
- assert_called_with('distro')
- self.open_port.assert_called_with(hooks.CEILOMETER_PORT)
- self.apt_update.assert_called_with(fatal=True)
- self.apt_install.assert_called_with(
- ceilometer_utils.CEILOMETER_BASE_PACKAGES,
- fatal=True
- )
-
- @patch('charmhelpers.core.hookenv.config')
- def test_amqp_joined(self, mock_config):
- hooks.hooks.execute(['hooks/amqp-relation-joined'])
- self.relation_set.assert_called_with(
- username=self.test_config.get('rabbit-user'),
- vhost=self.test_config.get('rabbit-vhost'))
-
- @patch('charmhelpers.core.hookenv.config')
- def test_db_joined(self, mock_config):
- hooks.hooks.execute(['hooks/shared-db-relation-joined'])
- self.relation_set.assert_called_with(
- ceilometer_database='ceilometer')
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'ceilometer_joined')
- def test_any_changed(self, joined, mock_config):
- hooks.hooks.execute(['hooks/shared-db-relation-changed'])
- self.assertTrue(self.CONFIGS.write_all.called)
- self.assertTrue(joined.called)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'install')
- @patch.object(hooks, 'any_changed')
- def test_upgrade_charm(self, changed, install, mock_config):
- hooks.hooks.execute(['hooks/upgrade-charm'])
- self.assertTrue(changed.called)
- self.assertTrue(install.called)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'ceilometer_joined')
- def test_config_changed_no_upgrade(self, joined, mock_config):
- self.openstack_upgrade_available.return_value = False
- hooks.hooks.execute(['hooks/config-changed'])
- self.openstack_upgrade_available.\
- assert_called_with('ceilometer-common')
- self.assertFalse(self.do_openstack_upgrade.called)
- self.assertTrue(self.CONFIGS.write_all.called)
- self.assertTrue(joined.called)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'ceilometer_joined')
- def test_config_changed_upgrade(self, joined, mock_config):
- self.openstack_upgrade_available.return_value = True
- hooks.hooks.execute(['hooks/config-changed'])
- self.openstack_upgrade_available.\
- assert_called_with('ceilometer-common')
- self.assertTrue(self.do_openstack_upgrade.called)
- self.assertTrue(self.CONFIGS.write_all.called)
- self.assertTrue(joined.called)
-
- def test_config_changed_with_openstack_upgrade_action(self):
- self.openstack_upgrade_available.return_value = True
- self.test_config.set('action-managed-upgrade', True)
-
- hooks.hooks.execute(['hooks/config-changed'])
-
- self.assertFalse(self.do_openstack_upgrade.called)
-
- @patch.object(hooks, 'canonical_url')
- @patch('charmhelpers.core.hookenv.config')
- def test_keystone_joined(self, mock_config, _canonical_url):
- _canonical_url.return_value = "http://thishost"
- self.test_config.set('region', 'myregion')
- hooks.hooks.execute(['hooks/identity-service-relation-joined'])
- url = "http://{}:{}".format('thishost', hooks.CEILOMETER_PORT)
- self.relation_set.assert_called_with(
- service=hooks.CEILOMETER_SERVICE,
- public_url=url, admin_url=url, internal_url=url,
- requested_roles=hooks.CEILOMETER_ROLE,
- region='myregion', relation_id=None)
-
- @patch('charmhelpers.contrib.openstack.ip.service_name',
- lambda *args: 'ceilometer')
- @patch('charmhelpers.contrib.openstack.ip.unit_get')
- @patch('charmhelpers.contrib.openstack.ip.is_clustered')
- @patch('charmhelpers.core.hookenv.config')
- @patch('charmhelpers.contrib.openstack.ip.config')
- def test_keystone_joined_url_override(self, _config, mock_config,
- _is_clustered, _unit_get):
- _unit_get.return_value = "thishost"
- _is_clustered.return_value = False
- _config.side_effect = self.test_config.get
- mock_config.side_effect = self.test_config.get
- self.test_config.set('region', 'myregion')
- self.test_config.set('os-public-hostname', 'ceilometer.example.com')
- hooks.keystone_joined(None)
- url = "http://{}:{}".format('thishost', hooks.CEILOMETER_PORT)
- public_url = "http://{}:{}".format('ceilometer.example.com',
- hooks.CEILOMETER_PORT)
- self.relation_set.assert_called_with(
- service=hooks.CEILOMETER_SERVICE,
- public_url=public_url, admin_url=url, internal_url=url,
- requested_roles=hooks.CEILOMETER_ROLE,
- region='myregion', relation_id=None)
-
- @patch('charmhelpers.core.hookenv.config')
- def test_ceilometer_joined(self, mock_config):
- self.relation_ids.return_value = ['ceilometer:0']
- self.get_ceilometer_context.return_value = {'test': 'data'}
- hooks.hooks.execute(['hooks/ceilometer-service-relation-joined'])
- self.relation_set.assert_called_with('ceilometer:0',
- {'test': 'data'})
-
- @patch('charmhelpers.core.hookenv.config')
- def test_identity_notifications_changed(self, mock_config):
- self.relation_ids.return_value = ['keystone-notifications:0']
-
- self.relation_get.return_value = None
- hooks.hooks.execute(['hooks/identity-notifications-relation-changed'])
-
- self.relation_get.return_value = {('%s-endpoint-changed' %
- (hooks.CEILOMETER_SERVICE)): 1}
-
- hooks.hooks.execute(['hooks/identity-notifications-relation-changed'])
- call1 = call('ceilometer-alarm-evaluator')
- call2 = call('ceilometer-alarm-notifier')
- self.service_restart.assert_has_calls([call1, call2], any_order=False)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'install_ceilometer_ocf')
- @patch.object(hooks, 'is_elected_leader')
- def test_cluster_joined_not_leader(self, mock_leader, mock_install_ocf,
- mock_config):
- mock_leader.return_value = False
-
- hooks.hooks.execute(['hooks/cluster-relation-joined'])
- self.assertFalse(self.relation_set.called)
- self.assertTrue(self.CONFIGS.write_all.called)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'get_shared_secret')
- @patch.object(hooks, 'install_ceilometer_ocf')
- @patch.object(hooks, 'is_elected_leader')
- def test_cluster_joined_is_leader(self, mock_leader, mock_install_ocf,
- shared_secret, mock_config):
- mock_leader.return_value = True
- shared_secret.return_value = 'secret'
-
- hooks.hooks.execute(['hooks/cluster-relation-joined'])
- self.assertTrue(self.peer_store.called)
- self.peer_store.assert_called_with('shared_secret', 'secret')
- self.assertTrue(self.CONFIGS.write_all.called)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'set_shared_secret')
- def test_cluster_changed(self, shared_secret, mock_config):
- self.peer_retrieve.return_value = None
- hooks.hooks.execute(['hooks/cluster-relation-changed'])
- self.assertFalse(shared_secret.called)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'get_shared_secret')
- @patch.object(hooks, 'set_shared_secret')
- def test_cluster_changed_new_secret(self, mock_set_secret, mock_get_secret,
- mock_config):
- self.peer_retrieve.return_value = "leader_secret"
- mock_get_secret.return_value = "my_secret"
- hooks.hooks.execute(['hooks/cluster-relation-changed'])
- mock_set_secret.assert_called_with("leader_secret")
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'get_shared_secret')
- @patch.object(hooks, 'set_shared_secret')
- def test_cluster_changed_old_secret(self, mock_set_secret, mock_get_secret,
- mock_config):
- self.peer_retrieve.return_value = "leader_secret"
- mock_get_secret.return_value = "leader_secret"
- hooks.hooks.execute(['hooks/cluster-relation-changed'])
- self.assertEquals(mock_set_secret.call_count, 0)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'get_hacluster_config')
- @patch.object(hooks, 'get_iface_for_address')
- @patch.object(hooks, 'get_netmask_for_address')
- def test_ha_joined(self, mock_netmask, mock_iface, mock_cluster_config,
- mock_config):
- mock_cluster_config.return_value = {'vip': '10.0.5.100',
- 'ha-bindiface': 'bnd0',
- 'ha-mcastport': 5802}
- mock_iface.return_value = 'eth0'
- mock_netmask.return_value = '255.255.255.10'
- hooks.hooks.execute(['hooks/ha-relation-joined'])
- self.assertEquals(self.relation_set.call_count, 2)
-
- exp_resources = {
- 'res_ceilometer_haproxy': 'lsb:haproxy',
- 'res_ceilometer_agent_central': ('ocf:openstack:'
- 'ceilometer-agent-central'),
- 'res_ceilometer_eth0_vip': 'ocf:heartbeat:IPaddr2'
- }
- exp_resource_params = {
- 'res_ceilometer_haproxy': 'op monitor interval="5s"',
- 'res_ceilometer_agent_central': 'op monitor interval="30s"',
- 'res_ceilometer_eth0_vip': ('params ip="10.0.5.100" '
- 'cidr_netmask="255.255.255.10" '
- 'nic="eth0"')
- }
- exp_clones = {'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'}
- call1 = call(groups={'grp_ceilometer_vips': 'res_ceilometer_eth0_vip'})
- call2 = call(init_services={'res_ceilometer_haproxy': 'haproxy'},
- corosync_bindiface='bnd0',
- corosync_mcastport=5802,
- resources=exp_resources,
- resource_params=exp_resource_params,
- clones=exp_clones)
- self.relation_set.assert_has_calls([call1, call2], any_order=False)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'get_netmask_for_address')
- @patch.object(hooks, 'get_hacluster_config')
- @patch.object(hooks, 'get_iface_for_address')
- @patch.object(hooks, 'relation_ids')
- @patch.object(hooks, 'related_units')
- @patch.object(hooks, 'relation_get')
- def test_ha_joined_ssl(self, mock_rel_get, mock_rel_units, mock_rel_ids,
- mock_iface, mock_cluster_config, mock_netmask,
- mock_config):
- mock_rel_ids.return_value = 'amqp:0'
- mock_rel_units.return_value = 'rabbitmq-server/0'
- mock_rel_get.return_value = '5671'
-
- mock_iface.return_value = 'eth0'
- mock_netmask.return_value = '255.255.255.10'
- mock_cluster_config.return_value = {'vip': '10.0.5.100',
- 'ha-bindiface': 'bnd0',
- 'ha-mcastport': 5802}
-
- hooks.hooks.execute(['hooks/ha-relation-joined'])
- self.assertEquals(self.relation_set.call_count, 2)
-
- exp_resources = {
- 'res_ceilometer_haproxy': 'lsb:haproxy',
- 'res_ceilometer_agent_central': ('ocf:openstack:'
- 'ceilometer-agent-central'),
- 'res_ceilometer_eth0_vip': 'ocf:heartbeat:IPaddr2'
- }
- exp_resource_params = {
- 'res_ceilometer_haproxy': 'op monitor interval="5s"',
- 'res_ceilometer_agent_central': ('params amqp_server_port="5671" '
- 'op monitor interval="30s"'),
- 'res_ceilometer_eth0_vip': ('params ip="10.0.5.100" '
- 'cidr_netmask="255.255.255.10" '
- 'nic="eth0"')
- }
- exp_clones = {'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'}
- call1 = call(groups={'grp_ceilometer_vips': 'res_ceilometer_eth0_vip'})
- call2 = call(init_services={'res_ceilometer_haproxy': 'haproxy'},
- corosync_bindiface='bnd0',
- corosync_mcastport=5802,
- resources=exp_resources,
- resource_params=exp_resource_params,
- clones=exp_clones)
- self.relation_set.assert_has_calls([call1, call2], any_order=False)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'keystone_joined')
- def test_ha_changed_not_clustered(self, mock_keystone_joined, mock_config):
- self.relation_get.return_value = None
- hooks.hooks.execute(['hooks/ha-relation-changed'])
- self.assertEquals(mock_keystone_joined.call_count, 0)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'keystone_joined')
- def test_ha_changed_clustered(self, mock_keystone_joined, mock_config):
- self.relation_get.return_value = 'yes'
- self.relation_ids.return_value = ['identity-service/0']
- hooks.hooks.execute(['hooks/ha-relation-changed'])
- self.assertEquals(mock_keystone_joined.call_count, 1)
diff --git a/charms/trusty/ceilometer/unit_tests/test_ceilometer_utils.py b/charms/trusty/ceilometer/unit_tests/test_ceilometer_utils.py
deleted file mode 100644
index 8bfe59e..0000000
--- a/charms/trusty/ceilometer/unit_tests/test_ceilometer_utils.py
+++ /dev/null
@@ -1,180 +0,0 @@
-from mock import patch, call, MagicMock
-
-import ceilometer_utils as utils
-
-from test_utils import CharmTestCase
-
-TO_PATCH = [
- 'get_os_codename_package',
- 'get_os_codename_install_source',
- 'configure_installation_source',
- 'templating',
- 'LoggingConfigContext',
- 'MongoDBContext',
- 'CeilometerContext',
- 'config',
- 'log',
- 'apt_install',
- 'apt_update',
- 'apt_upgrade',
-]
-
-
-class CeilometerUtilsTest(CharmTestCase):
-
- def setUp(self):
- super(CeilometerUtilsTest, self).setUp(utils, TO_PATCH)
- self.config.side_effect = self.test_config.get
-
- def tearDown(self):
- super(CeilometerUtilsTest, self).tearDown()
-
- def test_register_configs(self):
- configs = utils.register_configs()
- calls = []
- for conf in utils.CONFIG_FILES:
- calls.append(call(conf,
- utils.CONFIG_FILES[conf]['hook_contexts']))
- configs.register.assert_has_calls(calls, any_order=True)
-
- def test_ceilometer_release_services(self):
- """Ensure that icehouse specific services are identified"""
- self.get_os_codename_install_source.return_value = 'icehouse'
- self.assertEqual(['ceilometer-alarm-notifier',
- 'ceilometer-alarm-evaluator',
- 'ceilometer-agent-notification'],
- utils.ceilometer_release_services())
-
- def test_ceilometer_release_services_mitaka(self):
- """Ensure that mitaka specific services are identified"""
- self.get_os_codename_install_source.return_value = 'mitaka'
- self.assertEqual(['ceilometer-agent-notification'],
- utils.ceilometer_release_services())
-
- def test_restart_map(self):
- """Ensure that alarming services are present for < OpenStack Mitaka"""
- self.get_os_codename_install_source.return_value = 'icehouse'
- restart_map = utils.restart_map()
- self.assertEquals(
- restart_map,
- {'/etc/ceilometer/ceilometer.conf': [
- 'ceilometer-agent-central',
- 'ceilometer-collector',
- 'ceilometer-api',
- 'ceilometer-alarm-notifier',
- 'ceilometer-alarm-evaluator',
- 'ceilometer-agent-notification'],
- '/etc/haproxy/haproxy.cfg': ['haproxy'],
- "/etc/apache2/sites-available/openstack_https_frontend": [
- 'apache2'],
- "/etc/apache2/sites-available/openstack_https_frontend.conf": [
- 'apache2']
- }
- )
-
- def test_restart_map_mitaka(self):
- """Ensure that alarming services are missing for OpenStack Mitaka"""
- self.get_os_codename_install_source.return_value = 'mitaka'
- restart_map = utils.restart_map()
- self.assertEquals(
- restart_map,
- {'/etc/ceilometer/ceilometer.conf': [
- 'ceilometer-agent-central',
- 'ceilometer-collector',
- 'ceilometer-api',
- 'ceilometer-agent-notification'],
- '/etc/haproxy/haproxy.cfg': ['haproxy'],
- "/etc/apache2/sites-available/openstack_https_frontend": [
- 'apache2'],
- "/etc/apache2/sites-available/openstack_https_frontend.conf": [
- 'apache2']
- }
- )
-
- def test_get_ceilometer_conf(self):
- class TestContext():
-
- def __call__(self):
- return {'data': 'test'}
- with patch.dict(utils.CONFIG_FILES,
- {'/etc/ceilometer/ceilometer.conf': {
- 'hook_contexts': [TestContext()]
- }}):
- self.assertTrue(utils.get_ceilometer_context(),
- {'data': 'test'})
-
- def test_do_openstack_upgrade(self):
- self.config.side_effect = self.test_config.get
- self.test_config.set('openstack-origin', 'cloud:trusty-kilo')
- self.get_os_codename_install_source.return_value = 'kilo'
- configs = MagicMock()
- utils.do_openstack_upgrade(configs)
- configs.set_release.assert_called_with(openstack_release='kilo')
- self.assertTrue(self.log.called)
- self.apt_update.assert_called_with(fatal=True)
- dpkg_opts = [
- '--option', 'Dpkg::Options::=--force-confnew',
- '--option', 'Dpkg::Options::=--force-confdef',
- ]
- self.apt_install.assert_called_with(
- packages=utils.CEILOMETER_BASE_PACKAGES + utils.ICEHOUSE_PACKAGES,
- options=dpkg_opts, fatal=True
- )
- self.configure_installation_source.assert_called_with(
- 'cloud:trusty-kilo'
- )
-
- def test_get_packages_icehouse(self):
- self.get_os_codename_install_source.return_value = 'icehouse'
- self.assertEqual(utils.get_packages(),
- utils.CEILOMETER_BASE_PACKAGES +
- utils.ICEHOUSE_PACKAGES)
-
- def test_get_packages_mitaka(self):
- self.get_os_codename_install_source.return_value = 'mitaka'
- self.assertEqual(utils.get_packages(),
- utils.CEILOMETER_BASE_PACKAGES +
- utils.MITAKA_PACKAGES)
-
- def test_assess_status(self):
- with patch.object(utils, 'assess_status_func') as asf:
- callee = MagicMock()
- asf.return_value = callee
- utils.assess_status('test-config')
- asf.assert_called_once_with('test-config')
- callee.assert_called_once_with()
-
- @patch.object(utils, 'REQUIRED_INTERFACES')
- @patch.object(utils, 'services')
- @patch.object(utils, 'determine_ports')
- @patch.object(utils, 'make_assess_status_func')
- def test_assess_status_func(self,
- make_assess_status_func,
- determine_ports,
- services,
- REQUIRED_INTERFACES):
- services.return_value = 's1'
- determine_ports.return_value = 'p1'
- utils.assess_status_func('test-config')
- make_assess_status_func.assert_called_once_with(
- 'test-config', REQUIRED_INTERFACES, services='s1', ports='p1')
-
- def test_pause_unit_helper(self):
- with patch.object(utils, '_pause_resume_helper') as prh:
- utils.pause_unit_helper('random-config')
- prh.assert_called_once_with(utils.pause_unit, 'random-config')
- with patch.object(utils, '_pause_resume_helper') as prh:
- utils.resume_unit_helper('random-config')
- prh.assert_called_once_with(utils.resume_unit, 'random-config')
-
- @patch.object(utils, 'services')
- @patch.object(utils, 'determine_ports')
- def test_pause_resume_helper(self, determine_ports, services):
- f = MagicMock()
- services.return_value = 's1'
- determine_ports.return_value = 'p1'
- with patch.object(utils, 'assess_status_func') as asf:
- asf.return_value = 'assessor'
- utils._pause_resume_helper(f, 'some-config')
- asf.assert_called_once_with('some-config')
- f.assert_called_once_with('assessor', services='s1', ports='p1')
diff --git a/charms/trusty/ceilometer/unit_tests/test_utils.py b/charms/trusty/ceilometer/unit_tests/test_utils.py
deleted file mode 100644
index e90679e..0000000
--- a/charms/trusty/ceilometer/unit_tests/test_utils.py
+++ /dev/null
@@ -1,111 +0,0 @@
-import logging
-import unittest
-import os
-import yaml
-import io
-
-from contextlib import contextmanager
-from mock import patch
-
-
-@contextmanager
-def mock_open(filename, contents=None):
- ''' Slightly simpler mock of open to return contents for filename '''
- def mock_file(*args):
- if args[0] == filename:
- return io.StringIO(contents)
- else:
- return open(*args)
- with patch('__builtin__.open', mock_file):
- yield
-
-
-def load_config():
- '''
- Walk backwords from __file__ looking for config.yaml, load and return the
- 'options' section'
- '''
- config = None
- f = __file__
- while config is None:
- d = os.path.dirname(f)
- if os.path.isfile(os.path.join(d, 'config.yaml')):
- config = os.path.join(d, 'config.yaml')
- break
- f = d
-
- if not config:
- logging.error('Could not find config.yaml in any parent directory '
- 'of %s. ' % file)
- raise Exception
-
- return yaml.safe_load(open(config).read())['options']
-
-
-def get_default_config():
- '''
- Load default charm config from config.yaml return as a dict.
- If no default is set in config.yaml, its value is None.
- '''
- default_config = {}
- config = load_config()
- for k, v in config.iteritems():
- if 'default' in v:
- default_config[k] = v['default']
- else:
- default_config[k] = None
- return default_config
-
-
-class CharmTestCase(unittest.TestCase):
- def setUp(self, obj, patches):
- super(CharmTestCase, self).setUp()
- self.patches = patches
- self.obj = obj
- self.test_config = TestConfig()
- self.test_relation = TestRelation()
- self.patch_all()
-
- def patch(self, method):
- _m = patch.object(self.obj, method)
- mock = _m.start()
- self.addCleanup(_m.stop)
- return mock
-
- def patch_all(self):
- for method in self.patches:
- setattr(self, method, self.patch(method))
-
-
-class TestConfig(object):
- def __init__(self):
- self.config = get_default_config()
-
- def get(self, attr):
- try:
- return self.config[attr]
- except KeyError:
- return None
-
- def get_all(self):
- return self.config
-
- def set(self, attr, value):
- if attr not in self.config:
- raise KeyError
- self.config[attr] = value
-
-
-class TestRelation(object):
- def __init__(self, relation_data={}):
- self.relation_data = relation_data
-
- def set(self, relation_data):
- self.relation_data = relation_data
-
- def get(self, attr=None, unit=None, rid=None):
- if attr is None:
- return self.relation_data
- elif attr in self.relation_data:
- return self.relation_data[attr]
- return None