aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStuart Mackie <wsmackie@juniper.net>2016-11-22 14:00:39 -0800
committerStuart Mackie <wsmackie@juniper.net>2016-11-22 14:00:39 -0800
commit5de4c9f0c126b984da72128a3ca084eda6b4087a (patch)
tree0be55aa0809cc395e45baeae63db660b4e72fe83
parentf02da72993eb8e5a34ed049bad442c6d6db4701a (diff)
Add local cassandra charm from Tony Liu's machine
Change-Id: I56478233f7498861f95a0c12be983f9a1307853e Signed-off-by: Stuart Mackie <wsmackie@juniper.net>
-rw-r--r--charms/trusty/cassandra/.bzr/README3
-rw-r--r--charms/trusty/cassandra/.bzr/branch-format1
-rw-r--r--charms/trusty/cassandra/.bzr/branch/branch.conf1
-rw-r--r--charms/trusty/cassandra/.bzr/branch/format1
-rw-r--r--charms/trusty/cassandra/.bzr/branch/last-revision1
-rw-r--r--charms/trusty/cassandra/.bzr/branch/tags0
-rw-r--r--charms/trusty/cassandra/.bzr/checkout/conflicts1
-rw-r--r--charms/trusty/cassandra/.bzr/checkout/dirstatebin0 -> 21533 bytes
-rw-r--r--charms/trusty/cassandra/.bzr/checkout/format1
-rw-r--r--charms/trusty/cassandra/.bzr/checkout/views0
-rw-r--r--charms/trusty/cassandra/.bzr/repository/format1
-rw-r--r--charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.cixbin0 -> 26592 bytes
-rw-r--r--charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.iixbin0 -> 27223 bytes
-rw-r--r--charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.rixbin0 -> 27284 bytes
-rw-r--r--charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.sixbin0 -> 14814 bytes
-rw-r--r--charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.tixbin0 -> 79199 bytes
-rw-r--r--charms/trusty/cassandra/.bzr/repository/pack-namesbin0 -> 149 bytes
-rw-r--r--charms/trusty/cassandra/.bzr/repository/packs/b2ff4c83d0b0f30e7527867288318107.packbin0 -> 641193 bytes
-rw-r--r--charms/trusty/cassandra/.bzrignore9
-rw-r--r--charms/trusty/cassandra/Makefile224
-rw-r--r--charms/trusty/cassandra/README.md179
-rw-r--r--charms/trusty/cassandra/README.storage41
-rw-r--r--charms/trusty/cassandra/charm-helpers.yaml27
-rw-r--r--charms/trusty/cassandra/config.yaml316
-rw-r--r--charms/trusty/cassandra/copyright19
-rw-r--r--charms/trusty/cassandra/files/check_cassandra_heap.sh36
-rw-r--r--charms/trusty/cassandra/hooks/actions.py990
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/__init__.py38
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/contrib/__init__.py15
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/contrib/benchmark/__init__.py126
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/__init__.py15
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/nrpe.py398
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/volumes.py175
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/contrib/network/__init__.py15
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/contrib/network/ufw.py318
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/__init__.py15
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/jinja.py40
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/coordinator.py607
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/__init__.py15
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/decorators.py57
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/files.py45
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/fstab.py134
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/hookenv.py1026
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/host.py695
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/hugepage.py71
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/kernel.py68
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/services/__init__.py18
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/services/base.py353
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/services/helpers.py292
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/strutils.py72
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/sysctl.py56
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/templating.py81
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/unitdata.py521
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/fetch/__init__.py468
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/fetch/archiveurl.py167
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/fetch/bzrurl.py68
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/fetch/giturl.py70
-rwxr-xr-xcharms/trusty/cassandra/hooks/cluster-relation-changed20
-rwxr-xr-xcharms/trusty/cassandra/hooks/cluster-relation-departed20
-rwxr-xr-xcharms/trusty/cassandra/hooks/config-changed20
-rw-r--r--charms/trusty/cassandra/hooks/coordinator.py35
-rwxr-xr-xcharms/trusty/cassandra/hooks/data-relation-changed20
-rwxr-xr-xcharms/trusty/cassandra/hooks/data-relation-departed20
-rwxr-xr-xcharms/trusty/cassandra/hooks/database-admin-relation-changed20
-rwxr-xr-xcharms/trusty/cassandra/hooks/database-relation-changed20
-rw-r--r--charms/trusty/cassandra/hooks/definitions.py127
-rw-r--r--charms/trusty/cassandra/hooks/helpers.py1084
-rw-r--r--charms/trusty/cassandra/hooks/hooks.py61
-rwxr-xr-xcharms/trusty/cassandra/hooks/install20
-rwxr-xr-xcharms/trusty/cassandra/hooks/leader-elected20
-rwxr-xr-xcharms/trusty/cassandra/hooks/leader-settings-changed20
-rw-r--r--charms/trusty/cassandra/hooks/loglog.py42
-rwxr-xr-xcharms/trusty/cassandra/hooks/nrpe-external-master-relation-changed20
-rw-r--r--charms/trusty/cassandra/hooks/relations.py139
-rwxr-xr-xcharms/trusty/cassandra/hooks/stop20
-rwxr-xr-xcharms/trusty/cassandra/hooks/upgrade-charm20
-rw-r--r--charms/trusty/cassandra/icon.svg650
-rw-r--r--charms/trusty/cassandra/lib/apache.key53
-rw-r--r--charms/trusty/cassandra/lib/datastax.key49
-rwxr-xr-xcharms/trusty/cassandra/lib/juju-deployer-wrapper.py15
-rwxr-xr-xcharms/trusty/cassandra/lib/testcharms/empty/hooks/install6
-rw-r--r--charms/trusty/cassandra/lib/testcharms/empty/metadata.yaml11
-rw-r--r--charms/trusty/cassandra/metadata.yaml38
-rwxr-xr-xcharms/trusty/cassandra/scripts/volume-common.sh220
-rw-r--r--charms/trusty/cassandra/templates/cassandra_maintenance_cron.tmpl6
-rw-r--r--charms/trusty/cassandra/templates/nrpe_cmd_file.tmpl6
-rw-r--r--charms/trusty/cassandra/templates/nrpe_service_file.tmpl10
-rw-r--r--charms/trusty/cassandra/testing/__init__.py15
-rw-r--r--charms/trusty/cassandra/testing/amuletfixture.py234
-rw-r--r--charms/trusty/cassandra/testing/mocks.py182
-rw-r--r--charms/trusty/cassandra/tests/__init__.py15
-rwxr-xr-xcharms/trusty/cassandra/tests/base.py43
-rwxr-xr-xcharms/trusty/cassandra/tests/test_actions.py1156
-rwxr-xr-xcharms/trusty/cassandra/tests/test_definitions.py104
-rwxr-xr-xcharms/trusty/cassandra/tests/test_helpers.py1466
-rwxr-xr-xcharms/trusty/cassandra/tests/test_integration.py620
-rw-r--r--charms/trusty/cassandra/tests/tests.yaml15
97 files changed, 14552 insertions, 0 deletions
diff --git a/charms/trusty/cassandra/.bzr/README b/charms/trusty/cassandra/.bzr/README
new file mode 100644
index 0000000..f82dc1c
--- /dev/null
+++ b/charms/trusty/cassandra/.bzr/README
@@ -0,0 +1,3 @@
+This is a Bazaar control directory.
+Do not change any files in this directory.
+See http://bazaar.canonical.com/ for more information about Bazaar.
diff --git a/charms/trusty/cassandra/.bzr/branch-format b/charms/trusty/cassandra/.bzr/branch-format
new file mode 100644
index 0000000..9eb09b7
--- /dev/null
+++ b/charms/trusty/cassandra/.bzr/branch-format
@@ -0,0 +1 @@
+Bazaar-NG meta directory, format 1
diff --git a/charms/trusty/cassandra/.bzr/branch/branch.conf b/charms/trusty/cassandra/.bzr/branch/branch.conf
new file mode 100644
index 0000000..efb4cac
--- /dev/null
+++ b/charms/trusty/cassandra/.bzr/branch/branch.conf
@@ -0,0 +1 @@
+parent_location = bzr+ssh://bazaar.launchpad.net/~charmers/charms/trusty/cassandra/trunk/
diff --git a/charms/trusty/cassandra/.bzr/branch/format b/charms/trusty/cassandra/.bzr/branch/format
new file mode 100644
index 0000000..dc392f4
--- /dev/null
+++ b/charms/trusty/cassandra/.bzr/branch/format
@@ -0,0 +1 @@
+Bazaar Branch Format 7 (needs bzr 1.6)
diff --git a/charms/trusty/cassandra/.bzr/branch/last-revision b/charms/trusty/cassandra/.bzr/branch/last-revision
new file mode 100644
index 0000000..4f71a92
--- /dev/null
+++ b/charms/trusty/cassandra/.bzr/branch/last-revision
@@ -0,0 +1 @@
+379 stuart.bishop@canonical.com-20160701064342-way4zlx1v8mg8902
diff --git a/charms/trusty/cassandra/.bzr/branch/tags b/charms/trusty/cassandra/.bzr/branch/tags
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/charms/trusty/cassandra/.bzr/branch/tags
diff --git a/charms/trusty/cassandra/.bzr/checkout/conflicts b/charms/trusty/cassandra/.bzr/checkout/conflicts
new file mode 100644
index 0000000..0dc2d3a
--- /dev/null
+++ b/charms/trusty/cassandra/.bzr/checkout/conflicts
@@ -0,0 +1 @@
+BZR conflict list format 1
diff --git a/charms/trusty/cassandra/.bzr/checkout/dirstate b/charms/trusty/cassandra/.bzr/checkout/dirstate
new file mode 100644
index 0000000..61566d1
--- /dev/null
+++ b/charms/trusty/cassandra/.bzr/checkout/dirstate
Binary files differ
diff --git a/charms/trusty/cassandra/.bzr/checkout/format b/charms/trusty/cassandra/.bzr/checkout/format
new file mode 100644
index 0000000..e0261c7
--- /dev/null
+++ b/charms/trusty/cassandra/.bzr/checkout/format
@@ -0,0 +1 @@
+Bazaar Working Tree Format 6 (bzr 1.14)
diff --git a/charms/trusty/cassandra/.bzr/checkout/views b/charms/trusty/cassandra/.bzr/checkout/views
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/charms/trusty/cassandra/.bzr/checkout/views
diff --git a/charms/trusty/cassandra/.bzr/repository/format b/charms/trusty/cassandra/.bzr/repository/format
new file mode 100644
index 0000000..b200528
--- /dev/null
+++ b/charms/trusty/cassandra/.bzr/repository/format
@@ -0,0 +1 @@
+Bazaar repository format 2a (needs bzr 1.16 or later)
diff --git a/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.cix b/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.cix
new file mode 100644
index 0000000..62a8e1e
--- /dev/null
+++ b/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.cix
Binary files differ
diff --git a/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.iix b/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.iix
new file mode 100644
index 0000000..adccde0
--- /dev/null
+++ b/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.iix
Binary files differ
diff --git a/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.rix b/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.rix
new file mode 100644
index 0000000..3a4e8f1
--- /dev/null
+++ b/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.rix
Binary files differ
diff --git a/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.six b/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.six
new file mode 100644
index 0000000..d6e0f15
--- /dev/null
+++ b/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.six
Binary files differ
diff --git a/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.tix b/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.tix
new file mode 100644
index 0000000..ba00536
--- /dev/null
+++ b/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.tix
Binary files differ
diff --git a/charms/trusty/cassandra/.bzr/repository/pack-names b/charms/trusty/cassandra/.bzr/repository/pack-names
new file mode 100644
index 0000000..f3fd19b
--- /dev/null
+++ b/charms/trusty/cassandra/.bzr/repository/pack-names
Binary files differ
diff --git a/charms/trusty/cassandra/.bzr/repository/packs/b2ff4c83d0b0f30e7527867288318107.pack b/charms/trusty/cassandra/.bzr/repository/packs/b2ff4c83d0b0f30e7527867288318107.pack
new file mode 100644
index 0000000..c9f37e0
--- /dev/null
+++ b/charms/trusty/cassandra/.bzr/repository/packs/b2ff4c83d0b0f30e7527867288318107.pack
Binary files differ
diff --git a/charms/trusty/cassandra/.bzrignore b/charms/trusty/cassandra/.bzrignore
new file mode 100644
index 0000000..17b1268
--- /dev/null
+++ b/charms/trusty/cassandra/.bzrignore
@@ -0,0 +1,9 @@
+revision
+*.pyc
+.stamp-*
+.venv2
+.venv3
+tests/.venv2
+tests/.venv3
+.coverage
+coverage
diff --git a/charms/trusty/cassandra/Makefile b/charms/trusty/cassandra/Makefile
new file mode 100644
index 0000000..571af56
--- /dev/null
+++ b/charms/trusty/cassandra/Makefile
@@ -0,0 +1,224 @@
+#!/usr/bin/make -f
+
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+JUJU = juju
+
+default:
+ @echo Missing target
+ @echo 'Usage: make [ lint | unittest | test | clean | sync ]'
+ env
+
+# Only trusty supported, but xenial expected soon.
+SERIES := $(shell $(JUJU) get-environment default-series)
+
+HOST_SERIES := $(shell lsb_release -sc)
+ifeq ($(HOST_SERIES),trusty)
+ PYVER := 3.4
+else
+ PYVER := 3.5
+endif
+
+
+# /!\ Ensure that errors early in pipes cause failures, rather than
+# overridden by the last stage of the pipe. cf. 'test.py | ts'
+SHELL := /bin/bash
+export SHELLOPTS:=errexit:pipefail
+
+
+# Calculate the CHARM_DIR (the directory this Makefile is in)
+THIS_MAKEFILE_PATH:=$(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST))
+CHARM_DIR:=$(shell cd $(dir $(THIS_MAKEFILE_PATH));pwd)
+VENV3:=$(CHARM_DIR)/.venv3
+
+# Set the PATH so the correct tools are found.
+export PATH:=$(VENV3)/bin:$(PATH)
+
+SITE_PACKAGES=$(wildcard $(VENV3)/lib/python*/site-packages)
+
+PIP=.venv3/bin/pip$(PYVER) -q
+NOSETESTS=.venv3/bin/nosetests-3.4 -sv # Yes, even with 3.5
+
+# Set pipefail so we can get sane error codes while tagging test output
+# with ts(1)
+SHELL=bash -o pipefail
+
+deps: packages venv3
+
+lint: deps
+ date
+ free --human
+ charm proof $(CHARM_DIR)
+ flake8 \
+ --ignore=E402,E265 \
+ --exclude=charmhelpers,.venv2,.venv3 hooks tests testing
+ @echo OK: Lint free `date`
+
+unittest: lint
+ $(NOSETESTS) \
+ tests.test_actions --cover-package=actions \
+ tests.test_helpers --cover-package=helpers \
+ tests.test_definitions --cover-package=definitions \
+ --with-coverage --cover-branches
+ @echo OK: Unit tests pass `date`
+
+test: unittest
+ AMULET_TIMEOUT=3600 \
+ $(NOSETESTS) tests.test_integration
+
+ftest: unittest Test1UnitDeployment
+Test1UnitDeployment: deps
+ date
+ AMULET_TIMEOUT=5400 \
+ $(NOSETESTS) tests.test_integration:Test1UnitDeployment 2>&1 | ts
+
+20test: unittest Test20Deployment
+Test20Deployment: deps
+ date
+ AMULET_TIMEOUT=5400 \
+ $(NOSETESTS) tests.test_integration:Test20Deployment 2>&1 | ts
+
+21test: unittest Test21Deployment
+Test21Deployment: deps
+ date
+ AMULET_TIMEOUT=5400 \
+ $(NOSETESTS) tests.test_integration:Test21Deployment 2>&1 | ts
+
+30test: unittest Test30Deployment
+Test30Deployment: deps
+ date
+ AMULET_TIMEOUT=5400 \
+ $(NOSETESTS) tests.test_integration:Test30Deployment 2>&1 | ts
+
+3test: unittest Test3UnitDeployment
+Test3UnitDeployment: deps
+ date
+ AMULET_TIMEOUT=7200 \
+ $(NOSETESTS) tests.test_integration:Test3UnitDeployment 2>&1 | ts
+
+authtest: unittest TestAllowAllAuthenticatorDeployment
+TestAllowAllAuthenticatorDeployment: deps
+ date
+ AMULET_TIMEOUT=7200 \
+ $(NOSETESTS) \
+ tests.test_integration:TestAllowAllAuthenticatorDeployment 2>&1 | ts
+
+# Place a copy of the Oracle Java SE 7 Server Runtime tarball in ./lib
+# to run these tests.
+jretest: unittest
+ AMULET_TIMEOUT=5400 \
+ $(NOSETESTS) tests.test_integration:TestOracleJREDeployment 2>&1 | ts
+
+# You need the Oracle JRE (per jretest) and set the DSE_SOURCE environment
+# variable for this to work:
+# DSE_SOURCE="deb http://un:pw@debian.datastax.com/enterprise stable main"
+# You will also need a cache like squid-deb-proxy and have tweaked it to
+# cache the authenticated files, or the tests will likely timeout waiting
+# for huge downloads to complete. Alternatively, mirror the DataStax
+# packages into your own private archive.
+dsetest: unittest
+ AMULET_TIMEOUT=5400 \
+ $(NOSETESTS) tests.test_integration:TestDSEDeployment 2>&1 | ts
+
+coverage: lint
+ $(NOSETESTS) \
+ tests.test_actions --cover-package=actions \
+ tests.test_helpers --cover-package=helpers \
+ tests.test_definitions --cover-package=definitions \
+ --with-coverage --cover-branches \
+ --cover-html --cover-html-dir=coverage \
+ --cover-min-percentage=100 || \
+ (gnome-open coverage/index.html; false)
+
+clean:
+ rm -rf .venv? tests/.venv? .stamp-* coverage .coverage
+ find . -name __pycache__ -type d | xargs rm -rf
+
+
+# Attempt to diagnose environment for test failures.
+debug:
+ -which virtualenv
+ -which python
+ -which python2
+ -which python3
+ -which pip
+ -which pip3
+ -head -1 `which virtualenv || echo nothere`
+ -python -c 'import sys; print(sys.version); print(sys.path);'
+ -python2 -c 'import sys; print(sys.version); print(sys.path);'
+ -python3 -c 'import sys; print(sys.version); print(sys.path);'
+ -env
+
+
+packages: .stamp-packages
+.stamp-packages:
+ # Install bootstrap debs, and Python packages used by the charm
+ # to ensure versions match.
+ sudo add-apt-repository -y ppa:stub/juju
+ sudo add-apt-repository -y ppa:stub/cassandra
+ sudo apt-get update
+ sudo apt-get install -y \
+ python3 python3-pip python3-apt python3-dev python-virtualenv \
+ charm-tools build-essential libev4 libev-dev libffi-dev \
+ netcat python3-jinja2 juju-wait moreutils \
+ python3-cassandra python3-bcrypt
+ touch .stamp-packages
+
+venv3: packages .stamp-venv3
+.stamp-venv3:
+ # Build a Python virtualenv to run our tests.
+ virtualenv -p python3 --system-site-packages ${VENV3}
+
+ # Create a .pth so our tests can locate everything without
+ # sys.path hacks.
+ (echo ${CHARM_DIR}/hooks; echo ${CHARM_DIR}) \
+ > ${VENV3}/lib/python${PYVER}/site-packages/tests.pth
+
+ echo 'pip: ' `which pip`
+
+ # Pip install packages needed by the test suite but not used
+ # by the charm.
+ $(PIP) install bcrypt cassandra-driver blist
+ $(PIP) install --upgrade -I nose flake8
+ $(PIP) install --upgrade coverage amulet mock juju-deployer juju-wait
+
+ echo 'nosetests:' `which nosetests`
+ echo 'flake8:' `which flake8`
+
+ # Create a link for test shebang lines.
+ (cd tests && ln -s ${VENV3} .venv3)
+
+ touch .stamp-venv3
+
+venv2: packages .stamp-venv2
+.stamp-venv2:
+ virtualenv -p python2.7 --system-site-packages .venv2
+ .venv2/bin/pip install -q bundletester
+ touch .stamp-venv2
+
+bundletest: venv2
+ .venv2/bin/bundletester
+
+sync:
+ @bzr cat \
+ lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
+ > .charm_helpers_sync.py
+ @python .charm_helpers_sync.py -c charm-helpers.yaml
+ #@python .charm_helpers_sync.py \
+ # -c lib/testcharms/testclient/charm-helpers.yaml
+ @rm .charm_helpers_sync.py
diff --git a/charms/trusty/cassandra/README.md b/charms/trusty/cassandra/README.md
new file mode 100644
index 0000000..00784cc
--- /dev/null
+++ b/charms/trusty/cassandra/README.md
@@ -0,0 +1,179 @@
+# Overview
+
+The Apache Cassandra database is the right choice when you need scalability
+and high availability without compromising performance. Linear scalability
+and proven fault-tolerance on commodity hardware or cloud infrastructure
+make it the perfect platform for mission-critical data. Cassandra's support
+for replicating across multiple datacenters is best-in-class, providing lower
+latency for your users and the peace of mind of knowing that you can survive
+regional outages.
+
+See [cassandra.apache.org](http://cassandra.apache.org) for more information.
+
+
+# Editions
+
+This charm supports Apache Cassandra 2.0, 2.1, 2.2 & 3.0, and
+Datastax Enterprise 4.7 & 4.8. The default is Apache Cassandra 2.2.
+
+To use Apache Cassandra 2.0, specify the Apache Cassandra 2.0 archive source
+in the `install_sources` config setting when deploying.
+
+To use Datastax Enterprise, set the `edition` config setting to `dse`
+and the Datastax Enterprise archive URL in `install_sources` (including your
+username and password).
+
+
+# Deployment
+
+Cassandra deployments are relatively simple in that they consist of a set of
+Cassandra nodes which seed from each other to create a ring of servers:
+
+ juju deploy -n3 cs:trusty/cassandra
+
+The service units will deploy and will form a single ring.
+
+New nodes can be added to scale up:
+
+ juju add-unit cassandra
+
+
+/!\ Nodes must be manually decommissioned before dropping a unit.
+
+ juju run --unit cassandra/1 "nodetool decommission"
+ # Wait until Mode is DECOMMISSIONED
+ juju run --unit cassandra/1 "nodetool netstats"
+ juju remove-unit cassandra/1
+
+It is recommended to deploy at least 3 nodes and configure all your
+keyspaces to have a replication factor of three. Using fewer nodes or
+neglecting to set your keyspaces' replication settings means that your
+data is at risk and availability lower, as a failed unit may take the
+only copy of data with it.
+
+Production systems will normally want to set `max_heap_size` and
+`heap_newsize` to the empty string, to enable automatic memory size
+tuning. The defaults have been chosen to be suitable for development
+environments but will perform poorly with real workloads.
+
+
+## Planning
+
+- Do not attempt to store too much data per node. If you need more space,
+ add more nodes. Most workloads work best with a capacity under 1TB
+ per node.
+
+- You need to keep 50% of your disk space free for Cassandra maintenance
+ operations. If you expect your nodes to hold 500GB of data each, you
+ will need a 1TB partition. Using non-default compaction such as
+ LeveledCompactionStrategy can lower this waste.
+
+- Much more information can be found in the [Cassandra 2.2 documentation](http://docs.datastax.com/en/cassandra/2.2/cassandra/planning/planPlanningAbout.html)
+
+
+## Network Access
+
+The default Cassandra packages are installed from the apache.org
+archive. To avoid this download, place a copy of the packages in a local
+archive and specify its location in the `install_sources` configuration
+option. The signing key is automatically added.
+
+When using DataStax Enterprise, you need to specify the archive location
+containing the DataStax Enterprise .deb packages in the
+`install_sources` configuration item, and the signing key in the
+`install_keys` configuration item. Place the DataStax packages in a
+local archive to avoid downloading from datastax.com.
+
+
+## Oracle Java SE
+
+Cassandra recommends using Oracle Java SE 8. Unfortunately, this
+software is accessible only after accepting Oracle's click-through
+license making deployments using it much more cumbersome. You will need
+to download the Oracle Java SE 8 Server Runtime for Linux, and place the
+tarball at a URL accessible to your deployed units. The config item
+`private_jre_url` needs to be set to this URL.
+
+
+# Usage
+
+To relate the Cassandra charm to a service that understands how to talk to
+Cassandra using Thrift or the native Cassandra protocol::
+
+ juju deploy cs:service-that-needs-cassandra
+ juju add-relation service-that-needs-cassandra cassandra:database
+
+
+Alternatively, if you require a superuser connection, use the
+`database-admin` relation instead of `database`::
+
+ juju deploy cs:admin-service
+ juju add-relation admin-service cassandra:database-admin
+
+
+Client charms need to provide nothing. The Cassandra service publishes the
+following connection settings and cluster information on the client's relation:
+
+`username` and `password`:
+
+ Authentication credentials. The cluster is configured to use
+ the standard PasswordAuthenticator authentication provider, rather
+ than the insecure default. You can use different credentials
+ if you wish, using an account created through some other mechanism.
+
+`host`:
+
+ IP address to connect to.
+
+`native_transport_port`:
+
+ Port for drivers and tools using the newer native protocol.
+
+`rpc_port`:
+
+ Port for drivers and tools using the legacy Thrift protocol.
+
+`cluster_name`:
+
+ The cluster name. A client service may be related to several
+ Cassandra services, and this setting may be used to tell which
+ services belong to which cluster.
+
+`datacenter` and `rack`:
+
+ The datacenter and rack units in this service belong to. Required for
+ setting keyspace replication correctly.
+
+The cluster is configured to use the recommended 'snitch'
+(GossipingPropertyFileSnitch), so you will need to configure replication of
+your keyspaces using the NetworkTopologyStrategy replica placement strategy.
+For example, using the default datacenter named 'juju':
+
+ CREATE KEYSPACE IF NOT EXISTS mydata WITH REPLICATION =
+ { 'class': 'NetworkTopologyStrategy', 'juju': 3};
+
+
+Although authentication is configured using the standard
+PasswordAuthentication, by default no authorization is configured
+and the provided credentials will have access to all data on the cluster.
+For more granular permissions, you will need to set the authorizer
+in the service configuration to CassandraAuthorizer and manually grant
+permissions to the users.
+
+
+# Known Limitations and Issues
+
+This is the 'trusty' charm. Upgrade from the 'precise' charm is not supported.
+
+The `system_auth` keyspace replication factor is automatically increased
+but not decreased. If you have a service with three or more units and
+decommission enough nodes to drop below three, you will need to manually
+update the `system_auth` keyspace replication settings.
+
+
+# Contact Information
+
+## Cassandra
+
+- [Apache Cassandra homepage](http://cassandra.apache.org/)
+- [Cassandra Getting Started](http://wiki.apache.org/cassandra/GettingStarted)
diff --git a/charms/trusty/cassandra/README.storage b/charms/trusty/cassandra/README.storage
new file mode 100644
index 0000000..4a71179
--- /dev/null
+++ b/charms/trusty/cassandra/README.storage
@@ -0,0 +1,41 @@
+= Persistent storage =
+
+/!\ Unfortunately, per Bug #1334956, the storage and block-storage-broker
+ charms are not yet available in the charm store for trusty so
+ this documentation does not work as written.
+
+The cassandra charm takes advantage of the storage subordinate charm
+and the block-storage-broker charm. With these two charms cassandra can
+either request new volumes are created or use existing volumes.
+
+For requesting new volume creation, just set external_volume_mount in
+the cassandra charm and root, provider and volume_size for the storage
+subordinate charm.
+
+If using existing volumes, also set the volume_map for the storage
+subordinate charm.
+
+
+Example using existing volumes:
+
+juju deploy -n 3 cassandra
+juju set cassandra external_volume_mount="/srv/data"
+
+juju deploy storage
+juju set storage
+ root=/srv/data
+ provider=block-storage-broker
+ volume_size=10
+ # Volume map needed for existing storage
+ volume_map="{cassandra/0: e09be717-384b-43e3-b06a-3a68b5a2252d,
+ cassandra/1: ebd35228-6972-4e22-86a8-37483581154a,
+ cassandra/2: 9f02b67b-72da-4e22-98ee-10a95c1b298d}"
+
+juju deploy block-storage-broker
+
+juju add-relation storage cassandra
+juju add-relation storage block-storage-broker
+
+
+See the READMEs for the storage subordinate charm and the
+block-storage-broker charm for more detail on persistent storage.
diff --git a/charms/trusty/cassandra/charm-helpers.yaml b/charms/trusty/cassandra/charm-helpers.yaml
new file mode 100644
index 0000000..da3786b
--- /dev/null
+++ b/charms/trusty/cassandra/charm-helpers.yaml
@@ -0,0 +1,27 @@
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+destination: hooks/charmhelpers
+#branch: lp:charm-helpers
+branch: lp:~stub/charm-helpers/integration
+include:
+ - coordinator
+ - core
+ - fetch
+ - contrib.charmsupport
+ - contrib.templating.jinja
+ - contrib.network.ufw
+ - contrib.benchmark
diff --git a/charms/trusty/cassandra/config.yaml b/charms/trusty/cassandra/config.yaml
new file mode 100644
index 0000000..6ec68a5
--- /dev/null
+++ b/charms/trusty/cassandra/config.yaml
@@ -0,0 +1,316 @@
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+options:
+ # Install and upgrade charm related options
+ extra_packages:
+ type: string
+ default: ""
+ description: >
+ Extra packages to install. A space delimited list of packages.
+ package_status:
+ default: "install"
+ type: string
+ description: >
+ The status of service-affecting packages will be set to this
+ value in the dpkg database. Useful valid values are "install"
+ and "hold".
+ install_sources:
+ type: string
+ description: >
+ charm-helpers standard listing of package install sources.
+ If you are using Datastax Enterprise, you will need to
+ override one defaults with your own username and password.
+ default: |
+ - deb http://www.apache.org/dist/cassandra/debian 22x main
+ - ppa:openjdk-r/ppa # For OpenJDK 8
+ - ppa:stub/cassandra # For Python driver
+ # - deb http://debian.datastax.com/community stable main
+ # DSE requires you to register and add your username/password here.
+ # - deb http://un:pw@debian.datastax.com/enterprise stable main
+ install_keys:
+ type: string
+ description: >
+ charm-helpers standard listing of package install source
+ signing keys, corresponding to install_sources.
+ default: |
+ - null # ppa:stub/cassandra signing key added automatically.
+ - null # Apache package signing key added automatically.
+ - null # PPA package signing key added automatically.
+ # - null # DataStack package signing key added automatically.
+ http_proxy:
+ type: string
+ description: >
+ Value for the http_proxy and https_proxy environment variables.
+ This causes pip(1) and other tools to perform downloads via
+ the proxy server. eg. http://squid.dc1.lan:8080
+ default: ""
+ # TODO: Add JNA
+ # private_jna_url:
+ # type: string
+ # default: ""
+ # description: >
+ # URL for the private jna tar file. DSE requires JNA >= 3.4
+ private_jre_url:
+ type: string
+ default: ""
+ description: >
+ URL for the private jre tar file. DSE requires
+ Oracle Java SE 8 Server JRE (eg. server-jre-8u60-linux-x64.tar.gz).
+ edition:
+ type: string
+ default: community
+ description: >
+ One of 'community' or 'dse'. 'community' uses the
+ Apache Cassandra packages. 'dse' is for DataStax
+ Enterprise. Selecting 'dse' overrides the jvm setting.
+ jre:
+ type: string
+ default: openjdk
+ description: >
+ Which Java runtime environment to use. May be 'openjdk' or
+ 'oracle'.
+
+ # Storage configuration
+ wait_for_storage_broker:
+ type: boolean
+ default: False
+ description: >
+ Do not start the service before external storage has been
+ mounted using the block storage broker relation. If you do
+ not set this and you relate the service to the storage
+ broker, then your service will have started up using local
+ disk, and later torn down and rebuilt when the external
+ storage became available.
+ data_file_directories:
+ type: string
+ default: data
+ description: >
+ Space delimited data directories. Use multiple data
+ directories to split data over multiple physical hardware
+ drive partitions. Paths are relative to /var/lib/cassandra
+ or the block storage broker external mount point.
+ commitlog_directory:
+ type: string
+ default: commitlog
+ description: >
+ Commit log directory. The path is relative to
+ /var/lib/cassandra or the block storage broker external
+ mount point.
+ saved_caches_directory:
+ type: string
+ default: saved_caches
+ description: >
+ Saved caches directory. The path is relative to
+ /var/lib/cassandra or the block storage broker external
+ mount point.
+ io_scheduler:
+ type: string
+ default: "noop"
+ description: >
+ Set kernel io scheduler for persistent storage.
+ https://www.kernel.org/doc/Documentation/block/switching-sched.txt
+
+## units-to-update:
+## type: string
+## default: "all"
+## description: >
+## Comma separated list of unit numbers to update (i.e. modify
+## /etc setup and trigger cassandra restart on config-change or
+## upgrade-charm), or "all".
+
+ # nrpe-external-master relation related options
+ nagios_context:
+ default: "juju"
+ type: string
+ description: |
+ Used by the nrpe subordinate charms.
+ A string that will be prepended to instance name to set the host name
+ in nagios. So for instance the hostname would be something like:
+ juju-myservice-0
+ If you're running multiple environments with the same services in them
+ this allows you to differentiate between them.
+ nagios_servicegroups:
+ default: ""
+ type: string
+ description: >
+ A comma-separated list of nagios servicegroups.
+ If left empty, the nagios_context will be used as the servicegroup
+ nagios_heapchk_warn_pct:
+ default: 80
+ type: int
+ description: >
+ The pct of heap used to trigger a nagios warning
+ nagios_heapchk_crit_pct:
+ default: 90
+ type: int
+ description: >
+ The pct of heap used to trigger a nagios critcal alert
+ nagios_disk_warn_pct:
+ default: 50
+ type: int
+ description: >
+ The pct of data disk used to trigger a nagios warning
+ nagios_disk_crit_pct:
+ default: 25
+ type: int
+ description: >
+ The pct of data disk used to trigger a nagios critcal alert
+
+ # cassandra-env.sh related options
+ # The default tuning has been chosen to be suitable for development
+ # and test environments. The default tunings are expected to change
+ # over time.
+ max_heap_size:
+ type: string
+ default: '384M'
+ description: >
+ Total size of Java memory heap, for example 1G or 512M.
+ If you set this, you should also set heap_newsize. The
+ default is automatically tuned.
+ heap_newsize:
+ type: string
+ default: '32M'
+ description: >
+ The size of the JVM's young generation in the heap. If you
+ set this, you should also set max_heap_size. If in doubt,
+ go with 100M per physical CPU core. The default is
+ automatically tuned.
+
+ # Cassandra.yaml related options
+ cluster_name:
+ type: string
+ default: "juju"
+ description: >
+ Name of the Cassandra cluster. This is mainly used to
+ prevent machines in one logical cluster from joining
+ another. All Cassandra services you wish to cluster together
+ must have the same cluster_name. This setting cannot be changed
+ after service deployment.
+ partitioner:
+ default: Murmur3Partitioner
+ type: string
+ description: >
+ The cassandra partitioner to use. Use Murmur3Partitioner,
+ unless another is required for backwards compatibility.
+ num_tokens:
+ type: int
+ default: 256
+ description: Number of tokens per node.
+
+ # Topology of the service in the cluster.
+ datacenter:
+ type: string
+ default: "juju"
+ description: >
+ The node's datacenter used by the endpoint_snitch. e.g. "DC1".
+ It cannot be changed after service deployment.
+ rack:
+ type: string
+ default: ""
+ description: >
+ The rack used by the endpoint_snitch for all units in this
+ service. e.g. "Rack1". This cannot be changed after deployment.
+ It defaults to the service name. Cassandra will store replicated
+ data in different racks whenever possible.
+
+
+ # Networking options.
+ native_transport_port:
+ type: int
+ default: 9042
+ description: Native protocol port for native protocol clients.
+ rpc_port:
+ type: int
+ default: 9160
+ description: Thrift protocol port for legacy clients.
+ storage_port:
+ type: int
+ default: 7000
+ description: Cluster communication port
+ ssl_storage_port:
+ type: int
+ default: 7001
+ description: >
+ Cluster secure communication port. TODO: Unused. configure SSL.
+ authenticator:
+ type: string
+ default: PasswordAuthenticator
+ description: >
+ Authentication backend. Only PasswordAuthenticator and
+ AllowAllAuthenticator are supported. You should only
+ use AllowAllAuthenticator for legacy applications that
+ cannot provide authentication credentials.
+ authorizer:
+ type: string
+ default: AllowAllAuthorizer
+ description: >
+ Authorization backend, implementing IAuthorizer; used to limit
+ access/provide permissions Out of the box, Cassandra provides
+ AllowAllAuthorizer & CassandraAuthorizer
+ - AllowAllAuthorizer allows any action to any user - set it to
+ disable authorization.
+ - CassandraAuthorizer stores permissions in
+ system_auth.permissions table.
+
+
+ # Tuning options.
+ compaction_throughput_mb_per_sec:
+ type: int
+ default: 16
+ description: >
+ Throttles compaction to the given total throughput (in MB/sec)
+ across the entire system. The faster you insert data, the faster
+ you need to compact in order to keep the sstable count down,
+ but in general, setting this to 16 to 32 times the rate you
+ are inserting data is more than sufficient. Setting this to
+ 0 disables throttling. Note that this account for all types
+ of compaction, including validation compaction.
+ stream_throughput_outbound_megabits_per_sec:
+ type: int
+ default: 200
+ description: >
+ Throttles all outbound streaming file transfers on nodes to the
+ given total throughput in Mbps. This is necessary because Cassandra
+ does mostly sequential IO when streaming data during bootstrap or
+ repair, which can lead to saturating the network connection and
+ degrading rpc performance. When unset, the default is 200 Mbps
+ or 25 MB/s. 0 to disable throttling.
+
+ tombstone_warn_threshold:
+ type: int
+ default: 1000
+ description: >
+ When executing a scan, within or across a partition, we
+ need to keep the tombstones seen in memory so we can return
+ them to the coordinator, which will use them to make sure
+ other replicas also know about the deleted rows. With
+ workloads that generate a lot of tombstones, this can cause
+ performance problems and even exaust the server heap. Adjust
+ the thresholds here if you understand the dangers and want
+ to scan more tombstones anyway.
+ tombstone_failure_threshold:
+ type: int
+ default: 100000
+ description: >
+ When executing a scan, within or across a partition, we need
+ to keep the tombstones seen in memory so we can return them
+ to the coordinator, which will use them to make sure other
+ replicas also know about the deleted rows. With workloads
+ that generate a lot of tombstones, this can cause
+ performance problems and even exaust the server heap. Adjust
+ the thresholds here if you understand the dangers and want
+ to scan more tombstones anyway.
diff --git a/charms/trusty/cassandra/copyright b/charms/trusty/cassandra/copyright
new file mode 100644
index 0000000..7902f54
--- /dev/null
+++ b/charms/trusty/cassandra/copyright
@@ -0,0 +1,19 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2013, Canonical Ltd., All Rights Reserved.
+License: AGPL-3
+This file is part of the Cassandra charm.
+Copyright 2015 Canonical Ltd.
+.
+This program is free software: you can redistribute it and/or modify it
+under the terms of the GNU General Public License version 3, as
+published by the Free Software Foundation.
+.
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranties of
+MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+PURPOSE. See the GNU General Public License for more details.
+.
+You should have received a copy of the GNU General Public License along
+with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/cassandra/files/check_cassandra_heap.sh b/charms/trusty/cassandra/files/check_cassandra_heap.sh
new file mode 100644
index 0000000..395aa06
--- /dev/null
+++ b/charms/trusty/cassandra/files/check_cassandra_heap.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+#---------------------------------------------------
+# This file is Juju managed
+#---------------------------------------------------
+
+# Copyright (C) 2012 Canonical Ltd.
+# Author: Liam Young
+#
+# Script used to check Cassandra is alive and that it has space left in the heap
+
+set -u
+
+if [[ $# -lt 3 ]]; then
+ echo "$0 <jmx-ipadress> <warnpct> <criticalpct>"
+ exit 1
+fi
+WARN_PCT=$2
+CRIT_PCT=$3
+
+NODE_INF0="$(nodetool -h $1 info 2>/dev/null)"
+if [[ $? -ne 0 ]]; then
+ echo "ERROR: Failed to connect to Cassandra"
+ exit 2
+fi
+PCT_USED=$(echo "$NODE_INF0" | awk 'BEGIN {FS=":"} $1 ~ /^Heap Memory/ {print $2}' | awk '{ printf("%i\n", $1*100/$3) }')
+USAGE_INFO="${PCT_USED}% of heap memory used"
+if [[ $PCT_USED -lt $WARN_PCT ]]; then
+ echo "OK: ${USAGE_INFO}"
+ exit 0
+elif [[ $PCT_USED -lt $CRIT_PCT ]]; then
+ echo "WARNING: ${USAGE_INFO}"
+ exit 1
+else
+ echo "CRITICAL: ${USAGE_INFO}"
+ exit 1
+fi
diff --git a/charms/trusty/cassandra/hooks/actions.py b/charms/trusty/cassandra/hooks/actions.py
new file mode 100644
index 0000000..8887056
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/actions.py
@@ -0,0 +1,990 @@
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from contextlib import closing
+import errno
+from functools import wraps
+import glob
+import os.path
+import re
+import shlex
+import socket
+import subprocess
+from textwrap import dedent
+import time
+import urllib.request
+
+from charmhelpers import fetch
+from charmhelpers.contrib.charmsupport import nrpe
+from charmhelpers.contrib.network import ufw
+from charmhelpers.contrib.templating import jinja
+from charmhelpers.core import hookenv, host
+from charmhelpers.core.fstab import Fstab
+from charmhelpers.core.hookenv import DEBUG, ERROR, WARNING
+
+import cassandra
+
+from coordinator import coordinator
+import helpers
+import relations
+
+
+# These config keys cannot be changed after service deployment.
+UNCHANGEABLE_KEYS = set(['cluster_name', 'datacenter', 'rack', 'edition'])
+
+# If any of these config items are changed, Cassandra needs to be
+# restarted and maybe remounted.
+RESTART_REQUIRED_KEYS = set([
+ 'data_file_directories',
+ 'commitlog_directory',
+ 'saved_caches_directory',
+ 'storage_port',
+ 'ssl_storage_port',
+ 'rpc_port',
+ 'native_transport_port',
+ 'partitioner',
+ 'num_tokens',
+ 'max_heap_size',
+ 'heap_newsize',
+ 'authenticator',
+ 'authorizer',
+ 'compaction_throughput_mb_per_sec',
+ 'stream_throughput_outbound_megabits_per_sec',
+ 'tombstone_warn_threshold',
+ 'tombstone_failure_threshold',
+ 'jre',
+ 'private_jre_url'])
+
+ALL_CONFIG_KEYS = UNCHANGEABLE_KEYS.union(RESTART_REQUIRED_KEYS)
+
+
+# All other config items. By maintaining both lists, we can detect if
+# someone forgot to update these lists when they added a new config item.
+RESTART_NOT_REQUIRED_KEYS = set([
+ 'extra_packages',
+ 'package_status',
+ 'install_sources',
+ 'install_keys',
+ 'http_proxy',
+ 'wait_for_storage_broker',
+ 'io_scheduler',
+ 'nagios_context',
+ 'nagios_servicegroups',
+ 'nagios_heapchk_warn_pct',
+ 'nagios_heapchk_crit_pct',
+ 'nagios_disk_warn_pct',
+ 'nagios_disk_crit_pct'])
+
+
+def action(func):
+ '''Log and call func, stripping the undesirable servicename argument.
+ '''
+ @wraps(func)
+ def wrapper(servicename, *args, **kw):
+ if hookenv.remote_unit():
+ hookenv.log("** Action {}/{} ({})".format(hookenv.hook_name(),
+ func.__name__,
+ hookenv.remote_unit()))
+ else:
+ hookenv.log("** Action {}/{}".format(hookenv.hook_name(),
+ func.__name__))
+ return func(*args, **kw)
+ return wrapper
+
+
+def leader_only(func):
+ '''Decorated function is only run on the leader.'''
+ @wraps(func)
+ def wrapper(*args, **kw):
+ if hookenv.is_leader():
+ return func(*args, **kw)
+ else:
+ return None
+ return wrapper
+
+
+def authentication(func):
+ '''Decorated function is skipped if authentication is disabled.'''
+ @wraps(func)
+ def wrapper(*args, **kw):
+ auth = hookenv.config()['authenticator']
+ if auth == 'PasswordAuthenticator':
+ return func(*args, **kw)
+ elif auth == 'AllowAllAuthenticator':
+ hookenv.log('Skipped. Authentication disabled.', DEBUG)
+ return None
+ helpers.status_set('blocked', 'Unknown authenticator {}'.format(auth))
+ raise SystemExit(0)
+ return wrapper
+
+
+@action
+def set_proxy():
+ import hooks
+ hooks.set_proxy()
+
+
+@action
+def revert_unchangeable_config():
+ config = hookenv.config()
+
+ # config.previous() only becomes meaningful after the install
+ # hook has run. During the first run on the unit hook, it
+ # reports everything has having None as the previous value.
+ if config._prev_dict is None:
+ return
+
+ for key in UNCHANGEABLE_KEYS:
+ if config.changed(key):
+ previous = config.previous(key)
+ hookenv.log('{} cannot be changed after service deployment. '
+ 'Using original setting {!r}'.format(key, previous),
+ ERROR)
+ config[key] = previous
+
+
+# FOR CHARMHELPERS
+@action
+def preinstall():
+ '''Preinstallation data_ready hook.'''
+ # Only run the preinstall hooks from the actual install hook.
+ if hookenv.hook_name() == 'install':
+ # Pre-exec
+ pattern = os.path.join(hookenv.charm_dir(),
+ 'exec.d', '*', 'charm-pre-install')
+ for f in sorted(glob.glob(pattern)):
+ if os.path.isfile(f) and os.access(f, os.X_OK):
+ hookenv.log('Running preinstall hook {}'.format(f))
+ subprocess.check_call(['sh', '-c', f])
+ else:
+ hookenv.log('Ingnoring preinstall hook {}'.format(f),
+ WARNING)
+ else:
+ hookenv.log('No preinstall hooks found')
+
+
+# FOR CHARMHELPERS
+@action
+def swapoff(fstab='/etc/fstab'):
+ '''Turn off swapping in the container, permanently.'''
+ # Turn off swap in the current session
+ if helpers.is_lxc():
+ hookenv.log("In an LXC. Not touching swap.")
+ return
+ else:
+ try:
+ subprocess.check_call(['swapoff', '-a'])
+ except Exception as e:
+ hookenv.log("Got an error trying to turn off swapping. {}. "
+ "We may be in an LXC. Exiting gracefully"
+ "".format(e), WARNING)
+ return
+
+ # Disable swap permanently
+ with closing(Fstab(fstab)) as fstab:
+ while True:
+ swap_entry = fstab.get_entry_by_attr('filesystem', 'swap')
+ if swap_entry is None:
+ break
+ fstab.remove_entry(swap_entry)
+
+
+# FOR CHARMHELPERS
+@action
+def configure_sources():
+ '''Standard charmhelpers package source configuration.'''
+ config = hookenv.config()
+ if config.changed('install_sources') or config.changed('install_keys'):
+ fetch.configure_sources(True)
+
+
+@action
+def add_implicit_package_signing_keys():
+ # Rather than blindly add these keys, we should sniff
+ # config['install_sources'] for apache.org or datastax.com urls and
+ # add only the appropriate keys.
+ for key in ('apache', 'datastax'):
+ path = os.path.join(hookenv.charm_dir(), 'lib', '{}.key'.format(key))
+ subprocess.check_call(['apt-key', 'add', path],
+ stdin=subprocess.DEVNULL)
+
+
+@action
+def reset_sysctl():
+ '''Configure sysctl settings for Cassandra'''
+ if helpers.is_lxc():
+ hookenv.log("In an LXC. Leaving sysctl unchanged.")
+ else:
+ cassandra_sysctl_file = os.path.join('/', 'etc', 'sysctl.d',
+ '99-cassandra.conf')
+ contents = b"vm.max_map_count = 131072\n"
+ try:
+ host.write_file(cassandra_sysctl_file, contents)
+ subprocess.check_call(['sysctl', '-p', cassandra_sysctl_file])
+ except OSError as e:
+ if e.errno == errno.EACCES:
+ hookenv.log("Got Permission Denied trying to set the "
+ "sysctl settings at {}. We may be in an LXC. "
+ "Exiting gracefully".format(cassandra_sysctl_file),
+ WARNING)
+ else:
+ raise
+
+
+@action
+def reset_limits():
+ '''Set /etc/security/limits.d correctly for Ubuntu, so the
+ startup scripts don't emit a spurious warning.
+
+ Per Cassandra documentation, Ubuntu needs some extra
+ twiddling in /etc/security/limits.d. I have no idea why
+ the packages don't do this, since they are already
+ setting limits for the cassandra user correctly. The real
+ bug is that the limits of the user running the startup script
+ are being checked, rather than the limits of the user that will
+ actually run the process.
+ '''
+ contents = dedent('''\
+ # Maintained by Juju
+ root - memlock unlimited
+ root - nofile 100000
+ root - nproc 32768
+ root - as unlimited
+ ubuntu - memlock unlimited
+ ubuntu - nofile 100000
+ ubuntu - nproc 32768
+ ubuntu - as unlimited
+ ''')
+ host.write_file('/etc/security/limits.d/cassandra-charm.conf',
+ contents.encode('US-ASCII'))
+
+
+@action
+def install_cassandra_packages():
+ helpers.install_packages(helpers.get_cassandra_packages())
+ if helpers.get_jre() != 'oracle':
+ subprocess.check_call(['update-java-alternatives',
+ '--jre-headless',
+ '--set', 'java-1.8.0-openjdk-amd64'])
+
+
+@action
+def ensure_cassandra_package_status():
+ helpers.ensure_package_status(helpers.get_cassandra_packages())
+
+
+def _fetch_oracle_jre():
+ config = hookenv.config()
+ url = config.get('private_jre_url', None)
+ if url and config.get('retrieved_jre', None) != url:
+ filename = os.path.join(hookenv.charm_dir(),
+ 'lib', url.split('/')[-1])
+ if not filename.endswith('-linux-x64.tar.gz'):
+ helpers.status_set('blocked',
+ 'Invalid private_jre_url {}'.format(url))
+ raise SystemExit(0)
+ helpers.status_set(hookenv.status_get()[0],
+ 'Downloading Oracle JRE')
+ hookenv.log('Oracle JRE URL is {}'.format(url))
+ urllib.request.urlretrieve(url, filename)
+ config['retrieved_jre'] = url
+
+ pattern = os.path.join(hookenv.charm_dir(),
+ 'lib', 'server-jre-?u*-linux-x64.tar.gz')
+ tarballs = glob.glob(pattern)
+ if not (url or tarballs):
+ helpers.status_set('blocked',
+ 'private_jre_url not set and no local tarballs.')
+ raise SystemExit(0)
+
+ elif not tarballs:
+ helpers.status_set('blocked',
+ 'Oracle JRE tarball not found ({})'.format(pattern))
+ raise SystemExit(0)
+
+ # Latest tarball by filename/version num. Lets hope they don't hit
+ # 99 (currently at 76).
+ tarball = sorted(tarballs)[-1]
+ return tarball
+
+
+def _install_oracle_jre_tarball(tarball):
+ # Same directory as webupd8 to avoid surprising people, but it could
+ # be anything.
+ if 'jre-7u' in str(tarball):
+ dest = '/usr/lib/jvm/java-7-oracle'
+ else:
+ dest = '/usr/lib/jvm/java-8-oracle'
+
+ if not os.path.isdir(dest):
+ host.mkdir(dest)
+
+ jre_exists = os.path.exists(os.path.join(dest, 'bin', 'java'))
+
+ config = hookenv.config()
+
+ # Unpack the latest tarball if necessary.
+ if config.get('oracle_jre_tarball', '') == tarball and jre_exists:
+ hookenv.log('Already installed {}'.format(tarball))
+ else:
+ hookenv.log('Unpacking {}'.format(tarball))
+ subprocess.check_call(['tar', '-xz', '-C', dest,
+ '--strip-components=1', '-f', tarball])
+ config['oracle_jre_tarball'] = tarball
+
+ # Set alternatives, so /usr/bin/java does what we want.
+ for tool in ['java', 'javac']:
+ tool_path = os.path.join(dest, 'bin', tool)
+ subprocess.check_call(['update-alternatives', '--install',
+ os.path.join('/usr/bin', tool),
+ tool, tool_path, '1'])
+ subprocess.check_call(['update-alternatives',
+ '--set', tool, tool_path])
+
+
+@action
+def install_oracle_jre():
+ if helpers.get_jre() != 'oracle':
+ return
+
+ tarball = _fetch_oracle_jre()
+ _install_oracle_jre_tarball(tarball)
+
+
+@action
+def emit_java_version():
+ # Log the version for posterity. Could be useful since Oracle JRE
+ # security updates are not automated.
+ version = subprocess.check_output(['java', '-version'],
+ universal_newlines=True)
+ for line in version.splitlines():
+ hookenv.log('JRE: {}'.format(line))
+
+
+@action
+def emit_meminfo():
+ helpers.emit(subprocess.check_output(['free', '--human'],
+ universal_newlines=True))
+
+
+@action
+def configure_cassandra_yaml():
+ helpers.configure_cassandra_yaml()
+
+
+@action
+def configure_cassandra_env():
+ cassandra_env_path = helpers.get_cassandra_env_file()
+ assert os.path.exists(cassandra_env_path)
+
+ helpers.maybe_backup(cassandra_env_path)
+
+ overrides = [
+ ('max_heap_size', re.compile(r'^#?(MAX_HEAP_SIZE)=(.*)$', re.M)),
+ ('heap_newsize', re.compile(r'^#?(HEAP_NEWSIZE)=(.*)$', re.M)),
+ # We don't allow this to be overridden to ensure that tools
+ # will find JMX using the default port.
+ # ('jmx_port', re.compile(r'^#?(JMX_PORT)=(.*)$', re.M)),
+ ]
+
+ with open(cassandra_env_path, 'r') as f:
+ env = f.read()
+
+ config = hookenv.config()
+ for key, regexp in overrides:
+ if config[key]:
+ val = shlex.quote(str(config[key]))
+ env = regexp.sub(r'\g<1>={}'.format(val),
+ env)
+ else:
+ env = regexp.sub(r'#\1=\2', env)
+ host.write_file(cassandra_env_path, env.encode('UTF-8'))
+
+
+@action
+def configure_cassandra_rackdc():
+ config = hookenv.config()
+ datacenter = config['datacenter'].strip()
+ rack = config['rack'].strip() or hookenv.service_name()
+ rackdc_properties = dedent('''\
+ dc={}
+ rack={}
+ ''').format(datacenter, rack)
+ rackdc_path = helpers.get_cassandra_rackdc_file()
+ host.write_file(rackdc_path, rackdc_properties.encode('UTF-8'))
+
+
+def needs_reset_auth_keyspace_replication():
+ '''Guard for reset_auth_keyspace_replication.'''
+ num_nodes = helpers.num_nodes()
+ datacenter = hookenv.config()['datacenter']
+ with helpers.connect() as session:
+ strategy_opts = helpers.get_auth_keyspace_replication(session)
+ rf = int(strategy_opts.get(datacenter, -1))
+ hookenv.log('system_auth rf={!r}'.format(strategy_opts))
+ # If the node count has changed, we should change the rf.
+ return rf != num_nodes
+
+
+@leader_only
+@action
+@authentication
+@coordinator.require('repair', needs_reset_auth_keyspace_replication)
+def reset_auth_keyspace_replication():
+ # Cassandra requires you to manually set the replication factor of
+ # the system_auth keyspace, to ensure availability and redundancy.
+ # The recommendation is to set the replication factor so that every
+ # node has a copy.
+ num_nodes = helpers.num_nodes()
+ datacenter = hookenv.config()['datacenter']
+ with helpers.connect() as session:
+ strategy_opts = helpers.get_auth_keyspace_replication(session)
+ rf = int(strategy_opts.get(datacenter, -1))
+ hookenv.log('system_auth rf={!r}'.format(strategy_opts))
+ if rf != num_nodes:
+ strategy_opts['class'] = 'NetworkTopologyStrategy'
+ strategy_opts[datacenter] = num_nodes
+ if 'replication_factor' in strategy_opts:
+ del strategy_opts['replication_factor']
+ helpers.set_auth_keyspace_replication(session, strategy_opts)
+ if rf < num_nodes:
+ # Increasing rf, need to run repair.
+ helpers.repair_auth_keyspace()
+ helpers.set_active()
+
+
+@action
+def store_unit_private_ip():
+ '''Store the unit's private ip address, so we can tell if it changes.'''
+ hookenv.config()['unit_private_ip'] = hookenv.unit_private_ip()
+
+
+def needs_restart():
+ '''Return True if Cassandra is not running or needs to be restarted.'''
+ if helpers.is_decommissioned():
+ # Decommissioned nodes are never restarted. They remain up
+ # telling everyone they are decommissioned.
+ helpers.status_set('blocked', 'Decommissioned node')
+ return False
+
+ if not helpers.is_cassandra_running():
+ if helpers.is_bootstrapped():
+ helpers.status_set('waiting', 'Waiting for permission to start')
+ else:
+ helpers.status_set('waiting',
+ 'Waiting for permission to bootstrap')
+ return True
+
+ config = hookenv.config()
+
+ # If our IP address has changed, we need to restart.
+ if config.changed('unit_private_ip'):
+ helpers.status_set('waiting', 'IP address changed. '
+ 'Waiting for restart permission.')
+ return True
+
+ # If the directory paths have changed, we need to migrate data
+ # during a restart.
+ storage = relations.StorageRelation()
+ if storage.needs_remount():
+ helpers.status_set(hookenv.status_get()[0],
+ 'New mounts. Waiting for restart permission')
+ return True
+
+ # If any of these config items changed, a restart is required.
+ for key in RESTART_REQUIRED_KEYS:
+ if config.changed(key):
+ hookenv.log('{} changed. Restart required.'.format(key))
+ for key in RESTART_REQUIRED_KEYS:
+ if config.changed(key):
+ helpers.status_set(hookenv.status_get()[0],
+ 'Config changes. '
+ 'Waiting for restart permission.')
+ return True
+
+ # If we have new seeds, we should restart.
+ new_seeds = helpers.get_seed_ips()
+ if config.get('configured_seeds') != sorted(new_seeds):
+ old_seeds = set(config.previous('configured_seeds') or [])
+ changed = old_seeds.symmetric_difference(new_seeds)
+ # We don't care about the local node in the changes.
+ changed.discard(hookenv.unit_private_ip())
+ if changed:
+ helpers.status_set(hookenv.status_get()[0],
+ 'Updated seeds {!r}. '
+ 'Waiting for restart permission.'
+ ''.format(new_seeds))
+ return True
+
+ hookenv.log('Restart not required')
+ return False
+
+
+@action
+@coordinator.require('restart', needs_restart)
+def maybe_restart():
+ '''Restart sequence.
+
+ If a restart is needed, shutdown Cassandra, perform all pending operations
+ that cannot be be done while Cassandra is live, and restart it.
+ '''
+ helpers.status_set('maintenance', 'Stopping Cassandra')
+ helpers.stop_cassandra()
+ helpers.remount_cassandra()
+ helpers.ensure_database_directories()
+ if helpers.peer_relid() and not helpers.is_bootstrapped():
+ helpers.status_set('maintenance', 'Bootstrapping')
+ else:
+ helpers.status_set('maintenance', 'Starting Cassandra')
+ helpers.start_cassandra()
+
+
+@action
+def post_bootstrap():
+ '''Maintain state on if the node has bootstrapped into the cluster.
+
+ Per documented procedure for adding new units to a cluster, wait 2
+ minutes if the unit has just bootstrapped to ensure other units
+ do not attempt bootstrap too soon. Also, wait until completed joining
+ to ensure we keep the lock and ensure other nodes don't restart or
+ bootstrap.
+ '''
+ if not helpers.is_bootstrapped():
+ if coordinator.relid is not None:
+ helpers.status_set('maintenance', 'Post-bootstrap 2 minute delay')
+ hookenv.log('Post-bootstrap 2 minute delay')
+ time.sleep(120) # Must wait 2 minutes between bootstrapping nodes.
+
+ join_msg_set = False
+ while True:
+ status = helpers.get_node_status()
+ if status == 'NORMAL':
+ break
+ elif status == 'JOINING':
+ if not join_msg_set:
+ helpers.status_set('maintenance', 'Still joining cluster')
+ join_msg_set = True
+ time.sleep(10)
+ continue
+ else:
+ if status is None:
+ helpers.status_set('blocked',
+ 'Unexpectedly shutdown during '
+ 'bootstrap')
+ else:
+ helpers.status_set('blocked',
+ 'Failed to bootstrap ({})'
+ ''.format(status))
+ raise SystemExit(0)
+
+ # Unconditionally call this to publish the bootstrapped flag to
+ # the peer relation, as the first unit was bootstrapped before
+ # the peer relation existed.
+ helpers.set_bootstrapped()
+
+
+@action
+def stop_cassandra():
+ helpers.stop_cassandra()
+
+
+@action
+def start_cassandra():
+ helpers.start_cassandra()
+
+
+@leader_only
+@action
+@authentication
+def create_unit_superusers():
+ # The leader creates and updates accounts for nodes, using the
+ # encrypted password they provide in relations.PeerRelation. We
+ # don't end up with unencrypted passwords leaving the unit, and we
+ # don't need to restart Cassandra in no-auth mode which is slow and
+ # I worry may cause issues interrupting the bootstrap.
+ if not coordinator.relid:
+ return # No peer relation, no requests yet.
+
+ created_units = helpers.get_unit_superusers()
+ uncreated_units = [u for u in hookenv.related_units(coordinator.relid)
+ if u not in created_units]
+ for peer in uncreated_units:
+ rel = hookenv.relation_get(unit=peer, rid=coordinator.relid)
+ username = rel.get('username')
+ pwhash = rel.get('pwhash')
+ if not username:
+ continue
+ hookenv.log('Creating {} account for {}'.format(username, peer))
+ with helpers.connect() as session:
+ helpers.ensure_user(session, username, pwhash, superuser=True)
+ created_units.add(peer)
+ helpers.set_unit_superusers(created_units)
+
+
+@action
+def reset_all_io_schedulers():
+ dirs = helpers.get_all_database_directories()
+ dirs = (dirs['data_file_directories'] + [dirs['commitlog_directory']] +
+ [dirs['saved_caches_directory']])
+ config = hookenv.config()
+ for d in dirs:
+ if os.path.isdir(d): # Directory may not exist yet.
+ helpers.set_io_scheduler(config['io_scheduler'], d)
+
+
+def _client_credentials(relid):
+ '''Return the client credentials used by relation relid.'''
+ relinfo = hookenv.relation_get(unit=hookenv.local_unit(), rid=relid)
+ username = relinfo.get('username')
+ password = relinfo.get('password')
+ if username is None or password is None:
+ for unit in hookenv.related_units(coordinator.relid):
+ try:
+ relinfo = hookenv.relation_get(unit=unit, rid=relid)
+ username = relinfo.get('username')
+ password = relinfo.get('password')
+ if username is not None and password is not None:
+ return username, password
+ except subprocess.CalledProcessError:
+ pass # Assume the remote unit has not joined yet.
+ return None, None
+ else:
+ return username, password
+
+
+def _publish_database_relation(relid, superuser):
+ # The Casandra service needs to provide a common set of credentials
+ # to a client unit. The leader creates these, if none of the other
+ # units are found to have published them already (a previously elected
+ # leader may have done this). The leader then tickles the other units,
+ # firing a hook and giving them the opportunity to copy and publish
+ # these credentials.
+ username, password = _client_credentials(relid)
+ if username is None:
+ if hookenv.is_leader():
+ # Credentials not set. The leader must generate them. We use
+ # the service name so that database permissions remain valid
+ # even after the relation is dropped and recreated, or the
+ # juju environment rebuild and the database restored from
+ # backups.
+ service_name = helpers.get_service_name(relid)
+ if not service_name:
+ # Per Bug #1555261, we might not yet have related units,
+ # so no way to calculate the remote service name and thus
+ # the user.
+ return # Try again later.
+ username = 'juju_{}'.format(helpers.get_service_name(relid))
+ if superuser:
+ username += '_admin'
+ password = host.pwgen()
+ pwhash = helpers.encrypt_password(password)
+ with helpers.connect() as session:
+ helpers.ensure_user(session, username, pwhash, superuser)
+ # Wake the peers, if any.
+ helpers.leader_ping()
+ else:
+ return # No credentials yet. Nothing to do.
+
+ # Publish the information the client needs on the relation where
+ # they can find it.
+ # - authentication credentials
+ # - address and port
+ # - cluster_name, so clients can differentiate multiple clusters
+ # - datacenter + rack, so clients know what names they can use
+ # when altering keyspace replication settings.
+ config = hookenv.config()
+ hookenv.relation_set(relid,
+ username=username, password=password,
+ host=hookenv.unit_public_ip(),
+ native_transport_port=config['native_transport_port'],
+ rpc_port=config['rpc_port'],
+ cluster_name=config['cluster_name'],
+ datacenter=config['datacenter'],
+ rack=config['rack'])
+
+
+@action
+def publish_database_relations():
+ for relid in hookenv.relation_ids('database'):
+ _publish_database_relation(relid, superuser=False)
+
+
+@action
+def publish_database_admin_relations():
+ for relid in hookenv.relation_ids('database-admin'):
+ _publish_database_relation(relid, superuser=True)
+
+
+@action
+def install_maintenance_crontab():
+ # Every unit should run repair once per week (at least once per
+ # GCGraceSeconds, which defaults to 10 days but can be changed per
+ # keyspace). # Distribute the repair time evenly over the week.
+ unit_num = int(hookenv.local_unit().split('/')[-1])
+ dow, hour, minute = helpers.week_spread(unit_num)
+ contents = jinja.render('cassandra_maintenance_cron.tmpl', vars())
+ cron_path = "/etc/cron.d/cassandra-maintenance"
+ host.write_file(cron_path, contents.encode('US-ASCII'))
+
+
+@action
+def emit_cluster_info():
+ helpers.emit_describe_cluster()
+ helpers.emit_status()
+ helpers.emit_netstats()
+
+
+@action
+def configure_firewall():
+ '''Configure firewall rules using ufw.
+
+ This is primarily to block access to the replication and JMX ports,
+ as juju's default port access controls are not strict enough and
+ allow access to the entire environment.
+ '''
+ config = hookenv.config()
+ ufw.enable(soft_fail=True)
+
+ # Enable SSH from anywhere, relying on Juju and external firewalls
+ # to control access.
+ ufw.service('ssh', 'open')
+ ufw.service('nrpe', 'open') # Also NRPE for nagios checks.
+ ufw.service('rsync', 'open') # Also rsync for data transfer and backups.
+
+ # Clients need client access. These protocols are configured to
+ # require authentication.
+ client_keys = ['native_transport_port', 'rpc_port']
+ client_ports = [config[key] for key in client_keys]
+
+ # Peers need replication access. This protocols does not
+ # require authentication, so firewall it from other nodes.
+ peer_ports = [config['storage_port'], config['ssl_storage_port']]
+
+ # Enable client access from anywhere. Juju and external firewalls
+ # can still restrict this further of course (ie. 'juju expose').
+ for key in client_keys:
+ if config.changed(key) and config.previous(key) is not None:
+ # First close old ports. We use this order in the unlikely case
+ # someone is trying to swap the native and Thrift ports.
+ ufw.service(config.previous(key), 'close')
+ for port in client_ports:
+ # Then open or close the configured ports.
+ ufw.service(port, 'open')
+
+ desired_rules = set() # ufw.grant_access/remove_access commands.
+
+ # Rules for peers
+ for relinfo in hookenv.relations_of_type('cluster'):
+ if relinfo['private-address']:
+ pa = hookenv._ensure_ip(relinfo['private-address'])
+ for port in peer_ports:
+ desired_rules.add((pa, 'any', port))
+ # Rules for admin connections. We allow database-admin relations access
+ # to the cluster communication ports so that tools like sstableloader
+ # can run.
+ for relinfo in hookenv.relations_of_type('database-admin'):
+ if relinfo['private-address']:
+ pa = hookenv._ensure_ip(relinfo['private-address'])
+ for port in peer_ports:
+ desired_rules.add((pa, 'any', port))
+
+ previous_rules = set(tuple(rule) for rule in config.get('ufw_rules', []))
+
+ # Close any rules previously opened that are no longer desired.
+ for rule in sorted(list(previous_rules - desired_rules)):
+ ufw.revoke_access(*rule)
+
+ # Open all the desired rules.
+ for rule in sorted(list(desired_rules)):
+ ufw.grant_access(*rule)
+
+ # Store our rules for next time. Note that this is inherantly racy -
+ # this value is only persisted if the hook exits cleanly. If the
+ # hook fails, then someone changes port configuration or IP
+ # addresses change, then the failed hook retried, we can lose track
+ # of previously granted rules and they will never be revoked. It is
+ # impossible to remove this race entirely, so we stick with this
+ # simple approach.
+ config['ufw_rules'] = list(desired_rules) # A list because JSON.
+
+
+@action
+def nrpe_external_master_relation():
+ ''' Configure the nrpe-external-master relation '''
+ local_plugins = helpers.local_plugins_dir()
+ if os.path.exists(local_plugins):
+ src = os.path.join(hookenv.charm_dir(),
+ "files", "check_cassandra_heap.sh")
+ with open(src, 'rb') as f:
+ host.write_file(os.path.join(local_plugins,
+ 'check_cassandra_heap.sh'),
+ f.read(), perms=0o555)
+
+ nrpe_compat = nrpe.NRPE()
+ conf = hookenv.config()
+
+ cassandra_heap_warn = conf.get('nagios_heapchk_warn_pct')
+ cassandra_heap_crit = conf.get('nagios_heapchk_crit_pct')
+ if cassandra_heap_warn and cassandra_heap_crit:
+ nrpe_compat.add_check(
+ shortname="cassandra_heap",
+ description="Check Cassandra Heap",
+ check_cmd="check_cassandra_heap.sh localhost {} {}"
+ "".format(cassandra_heap_warn, cassandra_heap_crit))
+
+ cassandra_disk_warn = conf.get('nagios_disk_warn_pct')
+ cassandra_disk_crit = conf.get('nagios_disk_crit_pct')
+ dirs = helpers.get_all_database_directories()
+ dirs = set(dirs['data_file_directories'] +
+ [dirs['commitlog_directory'], dirs['saved_caches_directory']])
+ # We need to check the space on the mountpoint, not on the actual
+ # directory, as the nagios user won't have access to the actual directory.
+ mounts = set(helpers.mountpoint(d) for d in dirs)
+ for disk in mounts:
+ check_name = re.sub('[^A-Za-z0-9_]', '_', disk)
+ if cassandra_disk_warn and cassandra_disk_crit:
+ shortname = "cassandra_disk{}".format(check_name)
+ hookenv.log("Adding disk utilization check {}".format(shortname),
+ DEBUG)
+ nrpe_compat.add_check(
+ shortname=shortname,
+ description="Check Cassandra Disk {}".format(disk),
+ check_cmd="check_disk -u GB -w {}% -c {}% -K 5% -p {}"
+ "".format(cassandra_disk_warn, cassandra_disk_crit,
+ disk))
+ nrpe_compat.write()
+
+
+@leader_only
+@action
+def maintain_seeds():
+ '''The leader needs to maintain the list of seed nodes'''
+ seed_ips = helpers.get_seed_ips()
+ hookenv.log('Current seeds == {!r}'.format(seed_ips), DEBUG)
+
+ bootstrapped_ips = helpers.get_bootstrapped_ips()
+ hookenv.log('Bootstrapped == {!r}'.format(bootstrapped_ips), DEBUG)
+
+ # Remove any seeds that are no longer bootstrapped, such as dropped
+ # units.
+ seed_ips.intersection_update(bootstrapped_ips)
+
+ # Add more bootstrapped nodes, if necessary, to get to our maximum
+ # of 3 seeds.
+ potential_seed_ips = list(reversed(sorted(bootstrapped_ips)))
+ while len(seed_ips) < 3 and potential_seed_ips:
+ seed_ips.add(potential_seed_ips.pop())
+
+ # If there are no seeds or bootstrapped nodes, start with the leader. Us.
+ if len(seed_ips) == 0:
+ seed_ips.add(hookenv.unit_private_ip())
+
+ hookenv.log('Updated seeds == {!r}'.format(seed_ips), DEBUG)
+
+ hookenv.leader_set(seeds=','.join(sorted(seed_ips)))
+
+
+@leader_only
+@action
+@authentication
+def reset_default_password():
+ if hookenv.leader_get('default_admin_password_changed'):
+ hookenv.log('Default admin password already changed')
+ return
+
+ # Cassandra ships with well known credentials, rather than
+ # providing a tool to reset credentials. This is a huge security
+ # hole we must close.
+ try:
+ # We need a big timeout here, as the cassandra user actually
+ # springs into existence some time after Cassandra has started
+ # up and is accepting connections.
+ with helpers.connect('cassandra', 'cassandra',
+ timeout=120, auth_timeout=120) as session:
+ # But before we close this security hole, we need to use these
+ # credentials to create a different admin account for the
+ # leader, allowing it to create accounts for other nodes as they
+ # join. The alternative is restarting Cassandra without
+ # authentication, which this charm will likely need to do in the
+ # future when we allow Cassandra services to be related together.
+ helpers.status_set('maintenance',
+ 'Creating initial superuser account')
+ username, password = helpers.superuser_credentials()
+ pwhash = helpers.encrypt_password(password)
+ helpers.ensure_user(session, username, pwhash, superuser=True)
+ helpers.set_unit_superusers([hookenv.local_unit()])
+
+ helpers.status_set('maintenance',
+ 'Changing default admin password')
+ helpers.query(session, 'ALTER USER cassandra WITH PASSWORD %s',
+ cassandra.ConsistencyLevel.ALL, (host.pwgen(),))
+ except cassandra.AuthenticationFailed:
+ hookenv.log('Default superuser account already reset')
+ try:
+ with helpers.connect():
+ hookenv.log("Leader's superuser account already created")
+ except cassandra.AuthenticationFailed:
+ # We have no known superuser credentials. Create the account
+ # the hard, slow way. This will be the normal method
+ # of creating the service's initial account when we allow
+ # services to be related together.
+ helpers.create_unit_superuser_hard()
+
+ hookenv.leader_set(default_admin_password_changed=True)
+
+
+@action
+def set_active():
+ # If we got this far, the unit is active. Update the status if it is
+ # not already active. We don't do this unconditionally, as the charm
+ # may be active but doing stuff, like active but waiting for restart
+ # permission.
+ if hookenv.status_get()[0] != 'active':
+ helpers.set_active()
+ else:
+ hookenv.log('Unit status already active', DEBUG)
+
+
+@action
+@authentication
+def request_unit_superuser():
+ relid = helpers.peer_relid()
+ if relid is None:
+ hookenv.log('Request deferred until peer relation exists')
+ return
+
+ relinfo = hookenv.relation_get(unit=hookenv.local_unit(),
+ rid=relid)
+ if relinfo and relinfo.get('username'):
+ # We must avoid blindly setting the pwhash on the relation,
+ # as we will likely get a different value everytime we
+ # encrypt the password due to the random salt.
+ hookenv.log('Superuser account request previously made')
+ else:
+ # Publish the requested superuser and hash to our peers.
+ username, password = helpers.superuser_credentials()
+ pwhash = helpers.encrypt_password(password)
+ hookenv.relation_set(relid, username=username, pwhash=pwhash)
+ hookenv.log('Requested superuser account creation')
+
+
+@action
+def update_etc_hosts():
+ hostname = socket.gethostname()
+ addr = hookenv.unit_private_ip()
+ hosts_map = {addr: hostname}
+ # only need to add myself to /etc/hosts
+ helpers.update_hosts_file('/etc/hosts', hosts_map)
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/__init__.py
new file mode 100644
index 0000000..f72e7f8
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/__init__.py
@@ -0,0 +1,38 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+# Bootstrap charm-helpers, installing its dependencies if necessary using
+# only standard libraries.
+import subprocess
+import sys
+
+try:
+ import six # flake8: noqa
+except ImportError:
+ if sys.version_info.major == 2:
+ subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
+ else:
+ subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
+ import six # flake8: noqa
+
+try:
+ import yaml # flake8: noqa
+except ImportError:
+ if sys.version_info.major == 2:
+ subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
+ else:
+ subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
+ import yaml # flake8: noqa
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/contrib/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/benchmark/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/benchmark/__init__.py
new file mode 100644
index 0000000..1d039ea
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/contrib/benchmark/__init__.py
@@ -0,0 +1,126 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import subprocess
+import time
+import os
+from distutils.spawn import find_executable
+
+from charmhelpers.core.hookenv import (
+ in_relation_hook,
+ relation_ids,
+ relation_set,
+ relation_get,
+)
+
+
+def action_set(key, val):
+ if find_executable('action-set'):
+ action_cmd = ['action-set']
+
+ if isinstance(val, dict):
+ for k, v in iter(val.items()):
+ action_set('%s.%s' % (key, k), v)
+ return True
+
+ action_cmd.append('%s=%s' % (key, val))
+ subprocess.check_call(action_cmd)
+ return True
+ return False
+
+
+class Benchmark():
+ """
+ Helper class for the `benchmark` interface.
+
+ :param list actions: Define the actions that are also benchmarks
+
+ From inside the benchmark-relation-changed hook, you would
+ Benchmark(['memory', 'cpu', 'disk', 'smoke', 'custom'])
+
+ Examples:
+
+ siege = Benchmark(['siege'])
+ siege.start()
+ [... run siege ...]
+ # The higher the score, the better the benchmark
+ siege.set_composite_score(16.70, 'trans/sec', 'desc')
+ siege.finish()
+
+
+ """
+
+ BENCHMARK_CONF = '/etc/benchmark.conf' # Replaced in testing
+
+ required_keys = [
+ 'hostname',
+ 'port',
+ 'graphite_port',
+ 'graphite_endpoint',
+ 'api_port'
+ ]
+
+ def __init__(self, benchmarks=None):
+ if in_relation_hook():
+ if benchmarks is not None:
+ for rid in sorted(relation_ids('benchmark')):
+ relation_set(relation_id=rid, relation_settings={
+ 'benchmarks': ",".join(benchmarks)
+ })
+
+ # Check the relation data
+ config = {}
+ for key in self.required_keys:
+ val = relation_get(key)
+ if val is not None:
+ config[key] = val
+ else:
+ # We don't have all of the required keys
+ config = {}
+ break
+
+ if len(config):
+ with open(self.BENCHMARK_CONF, 'w') as f:
+ for key, val in iter(config.items()):
+ f.write("%s=%s\n" % (key, val))
+
+ @staticmethod
+ def start():
+ action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
+
+ """
+ If the collectd charm is also installed, tell it to send a snapshot
+ of the current profile data.
+ """
+ COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data'
+ if os.path.exists(COLLECT_PROFILE_DATA):
+ subprocess.check_output([COLLECT_PROFILE_DATA])
+
+ @staticmethod
+ def finish():
+ action_set('meta.stop', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
+
+ @staticmethod
+ def set_composite_score(value, units, direction='asc'):
+ """
+ Set the composite score for a benchmark run. This is a single number
+ representative of the benchmark results. This could be the most
+ important metric, or an amalgamation of metric scores.
+ """
+ return action_set(
+ "meta.composite",
+ {'value': value, 'units': units, 'direction': direction}
+ )
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/nrpe.py
new file mode 100644
index 0000000..2f24642
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/nrpe.py
@@ -0,0 +1,398 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+"""Compatibility with the nrpe-external-master charm"""
+# Copyright 2012 Canonical Ltd.
+#
+# Authors:
+# Matthew Wedgwood <matthew.wedgwood@canonical.com>
+
+import subprocess
+import pwd
+import grp
+import os
+import glob
+import shutil
+import re
+import shlex
+import yaml
+
+from charmhelpers.core.hookenv import (
+ config,
+ local_unit,
+ log,
+ relation_ids,
+ relation_set,
+ relations_of_type,
+)
+
+from charmhelpers.core.host import service
+
+# This module adds compatibility with the nrpe-external-master and plain nrpe
+# subordinate charms. To use it in your charm:
+#
+# 1. Update metadata.yaml
+#
+# provides:
+# (...)
+# nrpe-external-master:
+# interface: nrpe-external-master
+# scope: container
+#
+# and/or
+#
+# provides:
+# (...)
+# local-monitors:
+# interface: local-monitors
+# scope: container
+
+#
+# 2. Add the following to config.yaml
+#
+# nagios_context:
+# default: "juju"
+# type: string
+# description: |
+# Used by the nrpe subordinate charms.
+# A string that will be prepended to instance name to set the host name
+# in nagios. So for instance the hostname would be something like:
+# juju-myservice-0
+# If you're running multiple environments with the same services in them
+# this allows you to differentiate between them.
+# nagios_servicegroups:
+# default: ""
+# type: string
+# description: |
+# A comma-separated list of nagios servicegroups.
+# If left empty, the nagios_context will be used as the servicegroup
+#
+# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
+#
+# 4. Update your hooks.py with something like this:
+#
+# from charmsupport.nrpe import NRPE
+# (...)
+# def update_nrpe_config():
+# nrpe_compat = NRPE()
+# nrpe_compat.add_check(
+# shortname = "myservice",
+# description = "Check MyService",
+# check_cmd = "check_http -w 2 -c 10 http://localhost"
+# )
+# nrpe_compat.add_check(
+# "myservice_other",
+# "Check for widget failures",
+# check_cmd = "/srv/myapp/scripts/widget_check"
+# )
+# nrpe_compat.write()
+#
+# def config_changed():
+# (...)
+# update_nrpe_config()
+#
+# def nrpe_external_master_relation_changed():
+# update_nrpe_config()
+#
+# def local_monitors_relation_changed():
+# update_nrpe_config()
+#
+# 5. ln -s hooks.py nrpe-external-master-relation-changed
+# ln -s hooks.py local-monitors-relation-changed
+
+
+class CheckException(Exception):
+ pass
+
+
+class Check(object):
+ shortname_re = '[A-Za-z0-9-_]+$'
+ service_template = ("""
+#---------------------------------------------------
+# This file is Juju managed
+#---------------------------------------------------
+define service {{
+ use active-service
+ host_name {nagios_hostname}
+ service_description {nagios_hostname}[{shortname}] """
+ """{description}
+ check_command check_nrpe!{command}
+ servicegroups {nagios_servicegroup}
+}}
+""")
+
+ def __init__(self, shortname, description, check_cmd):
+ super(Check, self).__init__()
+ # XXX: could be better to calculate this from the service name
+ if not re.match(self.shortname_re, shortname):
+ raise CheckException("shortname must match {}".format(
+ Check.shortname_re))
+ self.shortname = shortname
+ self.command = "check_{}".format(shortname)
+ # Note: a set of invalid characters is defined by the
+ # Nagios server config
+ # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
+ self.description = description
+ self.check_cmd = self._locate_cmd(check_cmd)
+
+ def _get_check_filename(self):
+ return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
+
+ def _get_service_filename(self, hostname):
+ return os.path.join(NRPE.nagios_exportdir,
+ 'service__{}_{}.cfg'.format(hostname, self.command))
+
+ def _locate_cmd(self, check_cmd):
+ search_path = (
+ '/usr/lib/nagios/plugins',
+ '/usr/local/lib/nagios/plugins',
+ )
+ parts = shlex.split(check_cmd)
+ for path in search_path:
+ if os.path.exists(os.path.join(path, parts[0])):
+ command = os.path.join(path, parts[0])
+ if len(parts) > 1:
+ command += " " + " ".join(parts[1:])
+ return command
+ log('Check command not found: {}'.format(parts[0]))
+ return ''
+
+ def _remove_service_files(self):
+ if not os.path.exists(NRPE.nagios_exportdir):
+ return
+ for f in os.listdir(NRPE.nagios_exportdir):
+ if f.endswith('_{}.cfg'.format(self.command)):
+ os.remove(os.path.join(NRPE.nagios_exportdir, f))
+
+ def remove(self, hostname):
+ nrpe_check_file = self._get_check_filename()
+ if os.path.exists(nrpe_check_file):
+ os.remove(nrpe_check_file)
+ self._remove_service_files()
+
+ def write(self, nagios_context, hostname, nagios_servicegroups):
+ nrpe_check_file = self._get_check_filename()
+ with open(nrpe_check_file, 'w') as nrpe_check_config:
+ nrpe_check_config.write("# check {}\n".format(self.shortname))
+ nrpe_check_config.write("command[{}]={}\n".format(
+ self.command, self.check_cmd))
+
+ if not os.path.exists(NRPE.nagios_exportdir):
+ log('Not writing service config as {} is not accessible'.format(
+ NRPE.nagios_exportdir))
+ else:
+ self.write_service_config(nagios_context, hostname,
+ nagios_servicegroups)
+
+ def write_service_config(self, nagios_context, hostname,
+ nagios_servicegroups):
+ self._remove_service_files()
+
+ templ_vars = {
+ 'nagios_hostname': hostname,
+ 'nagios_servicegroup': nagios_servicegroups,
+ 'description': self.description,
+ 'shortname': self.shortname,
+ 'command': self.command,
+ }
+ nrpe_service_text = Check.service_template.format(**templ_vars)
+ nrpe_service_file = self._get_service_filename(hostname)
+ with open(nrpe_service_file, 'w') as nrpe_service_config:
+ nrpe_service_config.write(str(nrpe_service_text))
+
+ def run(self):
+ subprocess.call(self.check_cmd)
+
+
+class NRPE(object):
+ nagios_logdir = '/var/log/nagios'
+ nagios_exportdir = '/var/lib/nagios/export'
+ nrpe_confdir = '/etc/nagios/nrpe.d'
+
+ def __init__(self, hostname=None):
+ super(NRPE, self).__init__()
+ self.config = config()
+ self.nagios_context = self.config['nagios_context']
+ if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
+ self.nagios_servicegroups = self.config['nagios_servicegroups']
+ else:
+ self.nagios_servicegroups = self.nagios_context
+ self.unit_name = local_unit().replace('/', '-')
+ if hostname:
+ self.hostname = hostname
+ else:
+ nagios_hostname = get_nagios_hostname()
+ if nagios_hostname:
+ self.hostname = nagios_hostname
+ else:
+ self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
+ self.checks = []
+
+ def add_check(self, *args, **kwargs):
+ self.checks.append(Check(*args, **kwargs))
+
+ def remove_check(self, *args, **kwargs):
+ if kwargs.get('shortname') is None:
+ raise ValueError('shortname of check must be specified')
+
+ # Use sensible defaults if they're not specified - these are not
+ # actually used during removal, but they're required for constructing
+ # the Check object; check_disk is chosen because it's part of the
+ # nagios-plugins-basic package.
+ if kwargs.get('check_cmd') is None:
+ kwargs['check_cmd'] = 'check_disk'
+ if kwargs.get('description') is None:
+ kwargs['description'] = ''
+
+ check = Check(*args, **kwargs)
+ check.remove(self.hostname)
+
+ def write(self):
+ try:
+ nagios_uid = pwd.getpwnam('nagios').pw_uid
+ nagios_gid = grp.getgrnam('nagios').gr_gid
+ except:
+ log("Nagios user not set up, nrpe checks not updated")
+ return
+
+ if not os.path.exists(NRPE.nagios_logdir):
+ os.mkdir(NRPE.nagios_logdir)
+ os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
+
+ nrpe_monitors = {}
+ monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
+ for nrpecheck in self.checks:
+ nrpecheck.write(self.nagios_context, self.hostname,
+ self.nagios_servicegroups)
+ nrpe_monitors[nrpecheck.shortname] = {
+ "command": nrpecheck.command,
+ }
+
+ service('restart', 'nagios-nrpe-server')
+
+ monitor_ids = relation_ids("local-monitors") + \
+ relation_ids("nrpe-external-master")
+ for rid in monitor_ids:
+ relation_set(relation_id=rid, monitors=yaml.dump(monitors))
+
+
+def get_nagios_hostcontext(relation_name='nrpe-external-master'):
+ """
+ Query relation with nrpe subordinate, return the nagios_host_context
+
+ :param str relation_name: Name of relation nrpe sub joined to
+ """
+ for rel in relations_of_type(relation_name):
+ if 'nagios_host_context' in rel:
+ return rel['nagios_host_context']
+
+
+def get_nagios_hostname(relation_name='nrpe-external-master'):
+ """
+ Query relation with nrpe subordinate, return the nagios_hostname
+
+ :param str relation_name: Name of relation nrpe sub joined to
+ """
+ for rel in relations_of_type(relation_name):
+ if 'nagios_hostname' in rel:
+ return rel['nagios_hostname']
+
+
+def get_nagios_unit_name(relation_name='nrpe-external-master'):
+ """
+ Return the nagios unit name prepended with host_context if needed
+
+ :param str relation_name: Name of relation nrpe sub joined to
+ """
+ host_context = get_nagios_hostcontext(relation_name)
+ if host_context:
+ unit = "%s:%s" % (host_context, local_unit())
+ else:
+ unit = local_unit()
+ return unit
+
+
+def add_init_service_checks(nrpe, services, unit_name):
+ """
+ Add checks for each service in list
+
+ :param NRPE nrpe: NRPE object to add check to
+ :param list services: List of services to check
+ :param str unit_name: Unit name to use in check description
+ """
+ for svc in services:
+ upstart_init = '/etc/init/%s.conf' % svc
+ sysv_init = '/etc/init.d/%s' % svc
+ if os.path.exists(upstart_init):
+ # Don't add a check for these services from neutron-gateway
+ if svc not in ['ext-port', 'os-charm-phy-nic-mtu']:
+ nrpe.add_check(
+ shortname=svc,
+ description='process check {%s}' % unit_name,
+ check_cmd='check_upstart_job %s' % svc
+ )
+ elif os.path.exists(sysv_init):
+ cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
+ cron_file = ('*/5 * * * * root '
+ '/usr/local/lib/nagios/plugins/check_exit_status.pl '
+ '-s /etc/init.d/%s status > '
+ '/var/lib/nagios/service-check-%s.txt\n' % (svc,
+ svc)
+ )
+ f = open(cronpath, 'w')
+ f.write(cron_file)
+ f.close()
+ nrpe.add_check(
+ shortname=svc,
+ description='process check {%s}' % unit_name,
+ check_cmd='check_status_file.py -f '
+ '/var/lib/nagios/service-check-%s.txt' % svc,
+ )
+
+
+def copy_nrpe_checks():
+ """
+ Copy the nrpe checks into place
+
+ """
+ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
+ nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
+ 'charmhelpers', 'contrib', 'openstack',
+ 'files')
+
+ if not os.path.exists(NAGIOS_PLUGINS):
+ os.makedirs(NAGIOS_PLUGINS)
+ for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
+ if os.path.isfile(fname):
+ shutil.copy2(fname,
+ os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
+
+
+def add_haproxy_checks(nrpe, unit_name):
+ """
+ Add checks for each service in list
+
+ :param NRPE nrpe: NRPE object to add check to
+ :param str unit_name: Unit name to use in check description
+ """
+ nrpe.add_check(
+ shortname='haproxy_servers',
+ description='Check HAProxy {%s}' % unit_name,
+ check_cmd='check_haproxy.sh')
+ nrpe.add_check(
+ shortname='haproxy_queue',
+ description='Check HAProxy queue depth {%s}' % unit_name,
+ check_cmd='check_haproxy_queue_depth.sh')
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/volumes.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/volumes.py
new file mode 100644
index 0000000..320961b
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/volumes.py
@@ -0,0 +1,175 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+'''
+Functions for managing volumes in juju units. One volume is supported per unit.
+Subordinates may have their own storage, provided it is on its own partition.
+
+Configuration stanzas::
+
+ volume-ephemeral:
+ type: boolean
+ default: true
+ description: >
+ If false, a volume is mounted as sepecified in "volume-map"
+ If true, ephemeral storage will be used, meaning that log data
+ will only exist as long as the machine. YOU HAVE BEEN WARNED.
+ volume-map:
+ type: string
+ default: {}
+ description: >
+ YAML map of units to device names, e.g:
+ "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
+ Service units will raise a configure-error if volume-ephemeral
+ is 'true' and no volume-map value is set. Use 'juju set' to set a
+ value and 'juju resolved' to complete configuration.
+
+Usage::
+
+ from charmsupport.volumes import configure_volume, VolumeConfigurationError
+ from charmsupport.hookenv import log, ERROR
+ def post_mount_hook():
+ stop_service('myservice')
+ def post_mount_hook():
+ start_service('myservice')
+
+ if __name__ == '__main__':
+ try:
+ configure_volume(before_change=pre_mount_hook,
+ after_change=post_mount_hook)
+ except VolumeConfigurationError:
+ log('Storage could not be configured', ERROR)
+
+'''
+
+# XXX: Known limitations
+# - fstab is neither consulted nor updated
+
+import os
+from charmhelpers.core import hookenv
+from charmhelpers.core import host
+import yaml
+
+
+MOUNT_BASE = '/srv/juju/volumes'
+
+
+class VolumeConfigurationError(Exception):
+ '''Volume configuration data is missing or invalid'''
+ pass
+
+
+def get_config():
+ '''Gather and sanity-check volume configuration data'''
+ volume_config = {}
+ config = hookenv.config()
+
+ errors = False
+
+ if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
+ volume_config['ephemeral'] = True
+ else:
+ volume_config['ephemeral'] = False
+
+ try:
+ volume_map = yaml.safe_load(config.get('volume-map', '{}'))
+ except yaml.YAMLError as e:
+ hookenv.log("Error parsing YAML volume-map: {}".format(e),
+ hookenv.ERROR)
+ errors = True
+ if volume_map is None:
+ # probably an empty string
+ volume_map = {}
+ elif not isinstance(volume_map, dict):
+ hookenv.log("Volume-map should be a dictionary, not {}".format(
+ type(volume_map)))
+ errors = True
+
+ volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
+ if volume_config['device'] and volume_config['ephemeral']:
+ # asked for ephemeral storage but also defined a volume ID
+ hookenv.log('A volume is defined for this unit, but ephemeral '
+ 'storage was requested', hookenv.ERROR)
+ errors = True
+ elif not volume_config['device'] and not volume_config['ephemeral']:
+ # asked for permanent storage but did not define volume ID
+ hookenv.log('Ephemeral storage was requested, but there is no volume '
+ 'defined for this unit.', hookenv.ERROR)
+ errors = True
+
+ unit_mount_name = hookenv.local_unit().replace('/', '-')
+ volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
+
+ if errors:
+ return None
+ return volume_config
+
+
+def mount_volume(config):
+ if os.path.exists(config['mountpoint']):
+ if not os.path.isdir(config['mountpoint']):
+ hookenv.log('Not a directory: {}'.format(config['mountpoint']))
+ raise VolumeConfigurationError()
+ else:
+ host.mkdir(config['mountpoint'])
+ if os.path.ismount(config['mountpoint']):
+ unmount_volume(config)
+ if not host.mount(config['device'], config['mountpoint'], persist=True):
+ raise VolumeConfigurationError()
+
+
+def unmount_volume(config):
+ if os.path.ismount(config['mountpoint']):
+ if not host.umount(config['mountpoint'], persist=True):
+ raise VolumeConfigurationError()
+
+
+def managed_mounts():
+ '''List of all mounted managed volumes'''
+ return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
+
+
+def configure_volume(before_change=lambda: None, after_change=lambda: None):
+ '''Set up storage (or don't) according to the charm's volume configuration.
+ Returns the mount point or "ephemeral". before_change and after_change
+ are optional functions to be called if the volume configuration changes.
+ '''
+
+ config = get_config()
+ if not config:
+ hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
+ raise VolumeConfigurationError()
+
+ if config['ephemeral']:
+ if os.path.ismount(config['mountpoint']):
+ before_change()
+ unmount_volume(config)
+ after_change()
+ return 'ephemeral'
+ else:
+ # persistent storage
+ if os.path.ismount(config['mountpoint']):
+ mounts = dict(managed_mounts())
+ if mounts.get(config['mountpoint']) != config['device']:
+ before_change()
+ unmount_volume(config)
+ mount_volume(config)
+ after_change()
+ else:
+ before_change()
+ mount_volume(config)
+ after_change()
+ return config['mountpoint']
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/network/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/network/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/contrib/network/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/network/ufw.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/network/ufw.py
new file mode 100644
index 0000000..b65d963
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/contrib/network/ufw.py
@@ -0,0 +1,318 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+This module contains helpers to add and remove ufw rules.
+
+Examples:
+
+- open SSH port for subnet 10.0.3.0/24:
+
+ >>> from charmhelpers.contrib.network import ufw
+ >>> ufw.enable()
+ >>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp')
+
+- open service by name as defined in /etc/services:
+
+ >>> from charmhelpers.contrib.network import ufw
+ >>> ufw.enable()
+ >>> ufw.service('ssh', 'open')
+
+- close service by port number:
+
+ >>> from charmhelpers.contrib.network import ufw
+ >>> ufw.enable()
+ >>> ufw.service('4949', 'close') # munin
+"""
+import re
+import os
+import subprocess
+
+from charmhelpers.core import hookenv
+from charmhelpers.core.kernel import modprobe, is_module_loaded
+
+__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
+
+
+class UFWError(Exception):
+ pass
+
+
+class UFWIPv6Error(UFWError):
+ pass
+
+
+def is_enabled():
+ """
+ Check if `ufw` is enabled
+
+ :returns: True if ufw is enabled
+ """
+ output = subprocess.check_output(['ufw', 'status'],
+ universal_newlines=True,
+ env={'LANG': 'en_US',
+ 'PATH': os.environ['PATH']})
+
+ m = re.findall(r'^Status: active\n', output, re.M)
+
+ return len(m) >= 1
+
+
+def is_ipv6_ok(soft_fail=False):
+ """
+ Check if IPv6 support is present and ip6tables functional
+
+ :param soft_fail: If set to True and IPv6 support is broken, then reports
+ that the host doesn't have IPv6 support, otherwise a
+ UFWIPv6Error exception is raised.
+ :returns: True if IPv6 is working, False otherwise
+ """
+
+ # do we have IPv6 in the machine?
+ if os.path.isdir('/proc/sys/net/ipv6'):
+ # is ip6tables kernel module loaded?
+ if not is_module_loaded('ip6_tables'):
+ # ip6tables support isn't complete, let's try to load it
+ try:
+ modprobe('ip6_tables')
+ # great, we can load the module
+ return True
+ except subprocess.CalledProcessError as ex:
+ hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,
+ level="WARN")
+ # we are in a world where ip6tables isn't working
+ if soft_fail:
+ # so we inform that the machine doesn't have IPv6
+ return False
+ else:
+ raise UFWIPv6Error("IPv6 firewall support broken")
+ else:
+ # the module is present :)
+ return True
+
+ else:
+ # the system doesn't have IPv6
+ return False
+
+
+def disable_ipv6():
+ """
+ Disable ufw IPv6 support in /etc/default/ufw
+ """
+ exit_code = subprocess.call(['sed', '-i', 's/IPV6=.*/IPV6=no/g',
+ '/etc/default/ufw'])
+ if exit_code == 0:
+ hookenv.log('IPv6 support in ufw disabled', level='INFO')
+ else:
+ hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR")
+ raise UFWError("Couldn't disable IPv6 support in ufw")
+
+
+def enable(soft_fail=False):
+ """
+ Enable ufw
+
+ :param soft_fail: If set to True silently disables IPv6 support in ufw,
+ otherwise a UFWIPv6Error exception is raised when IP6
+ support is broken.
+ :returns: True if ufw is successfully enabled
+ """
+ if is_enabled():
+ return True
+
+ if not is_ipv6_ok(soft_fail):
+ disable_ipv6()
+
+ output = subprocess.check_output(['ufw', 'enable'],
+ universal_newlines=True,
+ env={'LANG': 'en_US',
+ 'PATH': os.environ['PATH']})
+
+ m = re.findall('^Firewall is active and enabled on system startup\n',
+ output, re.M)
+ hookenv.log(output, level='DEBUG')
+
+ if len(m) == 0:
+ hookenv.log("ufw couldn't be enabled", level='WARN')
+ return False
+ else:
+ hookenv.log("ufw enabled", level='INFO')
+ return True
+
+
+def disable():
+ """
+ Disable ufw
+
+ :returns: True if ufw is successfully disabled
+ """
+ if not is_enabled():
+ return True
+
+ output = subprocess.check_output(['ufw', 'disable'],
+ universal_newlines=True,
+ env={'LANG': 'en_US',
+ 'PATH': os.environ['PATH']})
+
+ m = re.findall(r'^Firewall stopped and disabled on system startup\n',
+ output, re.M)
+ hookenv.log(output, level='DEBUG')
+
+ if len(m) == 0:
+ hookenv.log("ufw couldn't be disabled", level='WARN')
+ return False
+ else:
+ hookenv.log("ufw disabled", level='INFO')
+ return True
+
+
+def default_policy(policy='deny', direction='incoming'):
+ """
+ Changes the default policy for traffic `direction`
+
+ :param policy: allow, deny or reject
+ :param direction: traffic direction, possible values: incoming, outgoing,
+ routed
+ """
+ if policy not in ['allow', 'deny', 'reject']:
+ raise UFWError(('Unknown policy %s, valid values: '
+ 'allow, deny, reject') % policy)
+
+ if direction not in ['incoming', 'outgoing', 'routed']:
+ raise UFWError(('Unknown direction %s, valid values: '
+ 'incoming, outgoing, routed') % direction)
+
+ output = subprocess.check_output(['ufw', 'default', policy, direction],
+ universal_newlines=True,
+ env={'LANG': 'en_US',
+ 'PATH': os.environ['PATH']})
+ hookenv.log(output, level='DEBUG')
+
+ m = re.findall("^Default %s policy changed to '%s'\n" % (direction,
+ policy),
+ output, re.M)
+ if len(m) == 0:
+ hookenv.log("ufw couldn't change the default policy to %s for %s"
+ % (policy, direction), level='WARN')
+ return False
+ else:
+ hookenv.log("ufw default policy for %s changed to %s"
+ % (direction, policy), level='INFO')
+ return True
+
+
+def modify_access(src, dst='any', port=None, proto=None, action='allow',
+ index=None):
+ """
+ Grant access to an address or subnet
+
+ :param src: address (e.g. 192.168.1.234) or subnet
+ (e.g. 192.168.1.0/24).
+ :param dst: destiny of the connection, if the machine has multiple IPs and
+ connections to only one of those have to accepted this is the
+ field has to be set.
+ :param port: destiny port
+ :param proto: protocol (tcp or udp)
+ :param action: `allow` or `delete`
+ :param index: if different from None the rule is inserted at the given
+ `index`.
+ """
+ if not is_enabled():
+ hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
+ return
+
+ if action == 'delete':
+ cmd = ['ufw', 'delete', 'allow']
+ elif index is not None:
+ cmd = ['ufw', 'insert', str(index), action]
+ else:
+ cmd = ['ufw', action]
+
+ if src is not None:
+ cmd += ['from', src]
+
+ if dst is not None:
+ cmd += ['to', dst]
+
+ if port is not None:
+ cmd += ['port', str(port)]
+
+ if proto is not None:
+ cmd += ['proto', proto]
+
+ hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG')
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ (stdout, stderr) = p.communicate()
+
+ hookenv.log(stdout, level='INFO')
+
+ if p.returncode != 0:
+ hookenv.log(stderr, level='ERROR')
+ hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd),
+ p.returncode),
+ level='ERROR')
+
+
+def grant_access(src, dst='any', port=None, proto=None, index=None):
+ """
+ Grant access to an address or subnet
+
+ :param src: address (e.g. 192.168.1.234) or subnet
+ (e.g. 192.168.1.0/24).
+ :param dst: destiny of the connection, if the machine has multiple IPs and
+ connections to only one of those have to accepted this is the
+ field has to be set.
+ :param port: destiny port
+ :param proto: protocol (tcp or udp)
+ :param index: if different from None the rule is inserted at the given
+ `index`.
+ """
+ return modify_access(src, dst=dst, port=port, proto=proto, action='allow',
+ index=index)
+
+
+def revoke_access(src, dst='any', port=None, proto=None):
+ """
+ Revoke access to an address or subnet
+
+ :param src: address (e.g. 192.168.1.234) or subnet
+ (e.g. 192.168.1.0/24).
+ :param dst: destiny of the connection, if the machine has multiple IPs and
+ connections to only one of those have to accepted this is the
+ field has to be set.
+ :param port: destiny port
+ :param proto: protocol (tcp or udp)
+ """
+ return modify_access(src, dst=dst, port=port, proto=proto, action='delete')
+
+
+def service(name, action):
+ """
+ Open/close access to a service
+
+ :param name: could be a service name defined in `/etc/services` or a port
+ number.
+ :param action: `open` or `close`
+ """
+ if action == 'open':
+ subprocess.check_output(['ufw', 'allow', str(name)],
+ universal_newlines=True)
+ elif action == 'close':
+ subprocess.check_output(['ufw', 'delete', 'allow', str(name)],
+ universal_newlines=True)
+ else:
+ raise UFWError(("'{}' not supported, use 'allow' "
+ "or 'delete'").format(action))
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/jinja.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/jinja.py
new file mode 100644
index 0000000..c5efb16
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/jinja.py
@@ -0,0 +1,40 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Templating using the python-jinja2 package.
+"""
+import six
+from charmhelpers.fetch import apt_install, apt_update
+try:
+ import jinja2
+except ImportError:
+ apt_update(fatal=True)
+ if six.PY3:
+ apt_install(["python3-jinja2"], fatal=True)
+ else:
+ apt_install(["python-jinja2"], fatal=True)
+ import jinja2
+
+
+DEFAULT_TEMPLATES_DIR = 'templates'
+
+
+def render(template_name, context, template_dir=DEFAULT_TEMPLATES_DIR):
+ templates = jinja2.Environment(
+ loader=jinja2.FileSystemLoader(template_dir))
+ template = templates.get_template(template_name)
+ return template.render(context)
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/coordinator.py b/charms/trusty/cassandra/hooks/charmhelpers/coordinator.py
new file mode 100644
index 0000000..0303c3f
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/coordinator.py
@@ -0,0 +1,607 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+'''
+The coordinator module allows you to use Juju's leadership feature to
+coordinate operations between units of a service.
+
+Behavior is defined in subclasses of coordinator.BaseCoordinator.
+One implementation is provided (coordinator.Serial), which allows an
+operation to be run on a single unit at a time, on a first come, first
+served basis. You can trivially define more complex behavior by
+subclassing BaseCoordinator or Serial.
+
+:author: Stuart Bishop <stuart.bishop@canonical.com>
+
+
+Services Framework Usage
+========================
+
+Ensure a peers relation is defined in metadata.yaml. Instantiate a
+BaseCoordinator subclass before invoking ServiceManager.manage().
+Ensure that ServiceManager.manage() is wired up to the leader-elected,
+leader-settings-changed, peers relation-changed and peers
+relation-departed hooks in addition to any other hooks you need, or your
+service will deadlock.
+
+Ensure calls to acquire() are guarded, so that locks are only requested
+when they are really needed (and thus hooks only triggered when necessary).
+Failing to do this and calling acquire() unconditionally will put your unit
+into a hook loop. Calls to granted() do not need to be guarded.
+
+For example::
+
+ from charmhelpers.core import hookenv, services
+ from charmhelpers import coordinator
+
+ def maybe_restart(servicename):
+ serial = coordinator.Serial()
+ if needs_restart():
+ serial.acquire('restart')
+ if serial.granted('restart'):
+ hookenv.service_restart(servicename)
+
+ services = [dict(service='servicename',
+ data_ready=[maybe_restart])]
+
+ if __name__ == '__main__':
+ _ = coordinator.Serial() # Must instantiate before manager.manage()
+ manager = services.ServiceManager(services)
+ manager.manage()
+
+
+You can implement a similar pattern using a decorator. If the lock has
+not been granted, an attempt to acquire() it will be made if the guard
+function returns True. If the lock has been granted, the decorated function
+is run as normal::
+
+ from charmhelpers.core import hookenv, services
+ from charmhelpers import coordinator
+
+ serial = coordinator.Serial() # Global, instatiated on module import.
+
+ def needs_restart():
+ [ ... Introspect state. Return True if restart is needed ... ]
+
+ @serial.require('restart', needs_restart)
+ def maybe_restart(servicename):
+ hookenv.service_restart(servicename)
+
+ services = [dict(service='servicename',
+ data_ready=[maybe_restart])]
+
+ if __name__ == '__main__':
+ manager = services.ServiceManager(services)
+ manager.manage()
+
+
+Traditional Usage
+=================
+
+Ensure a peers relation is defined in metadata.yaml.
+
+If you are using charmhelpers.core.hookenv.Hooks, ensure that a
+BaseCoordinator subclass is instantiated before calling Hooks.execute.
+
+If you are not using charmhelpers.core.hookenv.Hooks, ensure
+that a BaseCoordinator subclass is instantiated and its handle()
+method called at the start of all your hooks.
+
+For example::
+
+ import sys
+ from charmhelpers.core import hookenv
+ from charmhelpers import coordinator
+
+ hooks = hookenv.Hooks()
+
+ def maybe_restart():
+ serial = coordinator.Serial()
+ if serial.granted('restart'):
+ hookenv.service_restart('myservice')
+
+ @hooks.hook
+ def config_changed():
+ update_config()
+ serial = coordinator.Serial()
+ if needs_restart():
+ serial.acquire('restart'):
+ maybe_restart()
+
+ # Cluster hooks must be wired up.
+ @hooks.hook('cluster-relation-changed', 'cluster-relation-departed')
+ def cluster_relation_changed():
+ maybe_restart()
+
+ # Leader hooks must be wired up.
+ @hooks.hook('leader-elected', 'leader-settings-changed')
+ def leader_settings_changed():
+ maybe_restart()
+
+ [ ... repeat for *all* other hooks you are using ... ]
+
+ if __name__ == '__main__':
+ _ = coordinator.Serial() # Must instantiate before execute()
+ hooks.execute(sys.argv)
+
+
+You can also use the require decorator. If the lock has not been granted,
+an attempt to acquire() it will be made if the guard function returns True.
+If the lock has been granted, the decorated function is run as normal::
+
+ from charmhelpers.core import hookenv
+
+ hooks = hookenv.Hooks()
+ serial = coordinator.Serial() # Must instantiate before execute()
+
+ @require('restart', needs_restart)
+ def maybe_restart():
+ hookenv.service_restart('myservice')
+
+ @hooks.hook('install', 'config-changed', 'upgrade-charm',
+ # Peers and leader hooks must be wired up.
+ 'cluster-relation-changed', 'cluster-relation-departed',
+ 'leader-elected', 'leader-settings-changed')
+ def default_hook():
+ [...]
+ maybe_restart()
+
+ if __name__ == '__main__':
+ hooks.execute()
+
+
+Details
+=======
+
+A simple API is provided similar to traditional locking APIs. A lock
+may be requested using the acquire() method, and the granted() method
+may be used do to check if a lock previously requested by acquire() has
+been granted. It doesn't matter how many times acquire() is called in a
+hook.
+
+Locks are released at the end of the hook they are acquired in. This may
+be the current hook if the unit is leader and the lock is free. It is
+more likely a future hook (probably leader-settings-changed, possibly
+the peers relation-changed or departed hook, potentially any hook).
+
+Whenever a charm needs to perform a coordinated action it will acquire()
+the lock and perform the action immediately if acquisition is
+successful. It will also need to perform the same action in every other
+hook if the lock has been granted.
+
+
+Grubby Details
+--------------
+
+Why do you need to be able to perform the same action in every hook?
+If the unit is the leader, then it may be able to grant its own lock
+and perform the action immediately in the source hook. If the unit is
+the leader and cannot immediately grant the lock, then its only
+guaranteed chance of acquiring the lock is in the peers relation-joined,
+relation-changed or peers relation-departed hooks when another unit has
+released it (the only channel to communicate to the leader is the peers
+relation). If the unit is not the leader, then it is unlikely the lock
+is granted in the source hook (a previous hook must have also made the
+request for this to happen). A non-leader is notified about the lock via
+leader settings. These changes may be visible in any hook, even before
+the leader-settings-changed hook has been invoked. Or the requesting
+unit may be promoted to leader after making a request, in which case the
+lock may be granted in leader-elected or in a future peers
+relation-changed or relation-departed hook.
+
+This could be simpler if leader-settings-changed was invoked on the
+leader. We could then never grant locks except in
+leader-settings-changed hooks giving one place for the operation to be
+performed. Unfortunately this is not the case with Juju 1.23 leadership.
+
+But of course, this doesn't really matter to most people as most people
+seem to prefer the Services Framework or similar reset-the-world
+approaches, rather than the twisty maze of attempting to deduce what
+should be done based on what hook happens to be running (which always
+seems to evolve into reset-the-world anyway when the charm grows beyond
+the trivial).
+
+I chose not to implement a callback model, where a callback was passed
+to acquire to be executed when the lock is granted, because the callback
+may become invalid between making the request and the lock being granted
+due to an upgrade-charm being run in the interim. And it would create
+restrictions, such no lambdas, callback defined at the top level of a
+module, etc. Still, we could implement it on top of what is here, eg.
+by adding a defer decorator that stores a pickle of itself to disk and
+have BaseCoordinator unpickle and execute them when the locks are granted.
+'''
+from datetime import datetime
+from functools import wraps
+import json
+import os.path
+
+from six import with_metaclass
+
+from charmhelpers.core import hookenv
+
+
+# We make BaseCoordinator and subclasses singletons, so that if we
+# need to spill to local storage then only a single instance does so,
+# rather than having multiple instances stomp over each other.
+class Singleton(type):
+ _instances = {}
+
+ def __call__(cls, *args, **kwargs):
+ if cls not in cls._instances:
+ cls._instances[cls] = super(Singleton, cls).__call__(*args,
+ **kwargs)
+ return cls._instances[cls]
+
+
+class BaseCoordinator(with_metaclass(Singleton, object)):
+ relid = None # Peer relation-id, set by __init__
+ relname = None
+
+ grants = None # self.grants[unit][lock] == timestamp
+ requests = None # self.requests[unit][lock] == timestamp
+
+ def __init__(self, relation_key='coordinator', peer_relation_name=None):
+ '''Instatiate a Coordinator.
+
+ Data is stored on the peers relation and in leadership storage
+ under the provided relation_key.
+
+ The peers relation is identified by peer_relation_name, and defaults
+ to the first one found in metadata.yaml.
+ '''
+ # Most initialization is deferred, since invoking hook tools from
+ # the constructor makes testing hard.
+ self.key = relation_key
+ self.relname = peer_relation_name
+ hookenv.atstart(self.initialize)
+
+ # Ensure that handle() is called, without placing that burden on
+ # the charm author. They still need to do this manually if they
+ # are not using a hook framework.
+ hookenv.atstart(self.handle)
+
+ def initialize(self):
+ if self.requests is not None:
+ return # Already initialized.
+
+ assert hookenv.has_juju_version('1.23'), 'Needs Juju 1.23+'
+
+ if self.relname is None:
+ self.relname = _implicit_peer_relation_name()
+
+ relids = hookenv.relation_ids(self.relname)
+ if relids:
+ self.relid = sorted(relids)[0]
+
+ # Load our state, from leadership, the peer relationship, and maybe
+ # local state as a fallback. Populates self.requests and self.grants.
+ self._load_state()
+ self._emit_state()
+
+ # Save our state if the hook completes successfully.
+ hookenv.atexit(self._save_state)
+
+ # Schedule release of granted locks for the end of the hook.
+ # This needs to be the last of our atexit callbacks to ensure
+ # it will be run first when the hook is complete, because there
+ # is no point mutating our state after it has been saved.
+ hookenv.atexit(self._release_granted)
+
+ def acquire(self, lock):
+ '''Acquire the named lock, non-blocking.
+
+ The lock may be granted immediately, or in a future hook.
+
+ Returns True if the lock has been granted. The lock will be
+ automatically released at the end of the hook in which it is
+ granted.
+
+ Do not mindlessly call this method, as it triggers a cascade of
+ hooks. For example, if you call acquire() every time in your
+ peers relation-changed hook you will end up with an infinite loop
+ of hooks. It should almost always be guarded by some condition.
+ '''
+ unit = hookenv.local_unit()
+ ts = self.requests[unit].get(lock)
+ if not ts:
+ # If there is no outstanding request on the peers relation,
+ # create one.
+ self.requests.setdefault(lock, {})
+ self.requests[unit][lock] = _timestamp()
+ self.msg('Requested {}'.format(lock))
+
+ # If the leader has granted the lock, yay.
+ if self.granted(lock):
+ self.msg('Acquired {}'.format(lock))
+ return True
+
+ # If the unit making the request also happens to be the
+ # leader, it must handle the request now. Even though the
+ # request has been stored on the peers relation, the peers
+ # relation-changed hook will not be triggered.
+ if hookenv.is_leader():
+ return self.grant(lock, unit)
+
+ return False # Can't acquire lock, yet. Maybe next hook.
+
+ def granted(self, lock):
+ '''Return True if a previously requested lock has been granted'''
+ unit = hookenv.local_unit()
+ ts = self.requests[unit].get(lock)
+ if ts and self.grants.get(unit, {}).get(lock) == ts:
+ return True
+ return False
+
+ def requested(self, lock):
+ '''Return True if we are in the queue for the lock'''
+ return lock in self.requests[hookenv.local_unit()]
+
+ def request_timestamp(self, lock):
+ '''Return the timestamp of our outstanding request for lock, or None.
+
+ Returns a datetime.datetime() UTC timestamp, with no tzinfo attribute.
+ '''
+ ts = self.requests[hookenv.local_unit()].get(lock, None)
+ if ts is not None:
+ return datetime.strptime(ts, _timestamp_format)
+
+ def handle(self):
+ if not hookenv.is_leader():
+ return # Only the leader can grant requests.
+
+ self.msg('Leader handling coordinator requests')
+
+ # Clear our grants that have been released.
+ for unit in self.grants.keys():
+ for lock, grant_ts in list(self.grants[unit].items()):
+ req_ts = self.requests.get(unit, {}).get(lock)
+ if req_ts != grant_ts:
+ # The request timestamp does not match the granted
+ # timestamp. Several hooks on 'unit' may have run
+ # before the leader got a chance to make a decision,
+ # and 'unit' may have released its lock and attempted
+ # to reacquire it. This will change the timestamp,
+ # and we correctly revoke the old grant putting it
+ # to the end of the queue.
+ ts = datetime.strptime(self.grants[unit][lock],
+ _timestamp_format)
+ del self.grants[unit][lock]
+ self.released(unit, lock, ts)
+
+ # Grant locks
+ for unit in self.requests.keys():
+ for lock in self.requests[unit]:
+ self.grant(lock, unit)
+
+ def grant(self, lock, unit):
+ '''Maybe grant the lock to a unit.
+
+ The decision to grant the lock or not is made for $lock
+ by a corresponding method grant_$lock, which you may define
+ in a subclass. If no such method is defined, the default_grant
+ method is used. See Serial.default_grant() for details.
+ '''
+ if not hookenv.is_leader():
+ return False # Not the leader, so we cannot grant.
+
+ # Set of units already granted the lock.
+ granted = set()
+ for u in self.grants:
+ if lock in self.grants[u]:
+ granted.add(u)
+ if unit in granted:
+ return True # Already granted.
+
+ # Ordered list of units waiting for the lock.
+ reqs = set()
+ for u in self.requests:
+ if u in granted:
+ continue # In the granted set. Not wanted in the req list.
+ for l, ts in self.requests[u].items():
+ if l == lock:
+ reqs.add((ts, u))
+ queue = [t[1] for t in sorted(reqs)]
+ if unit not in queue:
+ return False # Unit has not requested the lock.
+
+ # Locate custom logic, or fallback to the default.
+ grant_func = getattr(self, 'grant_{}'.format(lock), self.default_grant)
+
+ if grant_func(lock, unit, granted, queue):
+ # Grant the lock.
+ self.msg('Leader grants {} to {}'.format(lock, unit))
+ self.grants.setdefault(unit, {})[lock] = self.requests[unit][lock]
+ return True
+
+ return False
+
+ def released(self, unit, lock, timestamp):
+ '''Called on the leader when it has released a lock.
+
+ By default, does nothing but log messages. Override if you
+ need to perform additional housekeeping when a lock is released,
+ for example recording timestamps.
+ '''
+ interval = _utcnow() - timestamp
+ self.msg('Leader released {} from {}, held {}'.format(lock, unit,
+ interval))
+
+ def require(self, lock, guard_func, *guard_args, **guard_kw):
+ """Decorate a function to be run only when a lock is acquired.
+
+ The lock is requested if the guard function returns True.
+
+ The decorated function is called if the lock has been granted.
+ """
+ def decorator(f):
+ @wraps(f)
+ def wrapper(*args, **kw):
+ if self.granted(lock):
+ self.msg('Granted {}'.format(lock))
+ return f(*args, **kw)
+ if guard_func(*guard_args, **guard_kw) and self.acquire(lock):
+ return f(*args, **kw)
+ return None
+ return wrapper
+ return decorator
+
+ def msg(self, msg):
+ '''Emit a message. Override to customize log spam.'''
+ hookenv.log('coordinator.{} {}'.format(self._name(), msg),
+ level=hookenv.INFO)
+
+ def _name(self):
+ return self.__class__.__name__
+
+ def _load_state(self):
+ self.msg('Loading state'.format(self._name()))
+
+ # All responses must be stored in the leadership settings.
+ # The leader cannot use local state, as a different unit may
+ # be leader next time. Which is fine, as the leadership
+ # settings are always available.
+ self.grants = json.loads(hookenv.leader_get(self.key) or '{}')
+
+ local_unit = hookenv.local_unit()
+
+ # All requests must be stored on the peers relation. This is
+ # the only channel units have to communicate with the leader.
+ # Even the leader needs to store its requests here, as a
+ # different unit may be leader by the time the request can be
+ # granted.
+ if self.relid is None:
+ # The peers relation is not available. Maybe we are early in
+ # the units's lifecycle. Maybe this unit is standalone.
+ # Fallback to using local state.
+ self.msg('No peer relation. Loading local state')
+ self.requests = {local_unit: self._load_local_state()}
+ else:
+ self.requests = self._load_peer_state()
+ if local_unit not in self.requests:
+ # The peers relation has just been joined. Update any state
+ # loaded from our peers with our local state.
+ self.msg('New peer relation. Merging local state')
+ self.requests[local_unit] = self._load_local_state()
+
+ def _emit_state(self):
+ # Emit this units lock status.
+ for lock in sorted(self.requests[hookenv.local_unit()].keys()):
+ if self.granted(lock):
+ self.msg('Granted {}'.format(lock))
+ else:
+ self.msg('Waiting on {}'.format(lock))
+
+ def _save_state(self):
+ self.msg('Publishing state'.format(self._name()))
+ if hookenv.is_leader():
+ # sort_keys to ensure stability.
+ raw = json.dumps(self.grants, sort_keys=True)
+ hookenv.leader_set({self.key: raw})
+
+ local_unit = hookenv.local_unit()
+
+ if self.relid is None:
+ # No peers relation yet. Fallback to local state.
+ self.msg('No peer relation. Saving local state')
+ self._save_local_state(self.requests[local_unit])
+ else:
+ # sort_keys to ensure stability.
+ raw = json.dumps(self.requests[local_unit], sort_keys=True)
+ hookenv.relation_set(self.relid, relation_settings={self.key: raw})
+
+ def _load_peer_state(self):
+ requests = {}
+ units = set(hookenv.related_units(self.relid))
+ units.add(hookenv.local_unit())
+ for unit in units:
+ raw = hookenv.relation_get(self.key, unit, self.relid)
+ if raw:
+ requests[unit] = json.loads(raw)
+ return requests
+
+ def _local_state_filename(self):
+ # Include the class name. We allow multiple BaseCoordinator
+ # subclasses to be instantiated, and they are singletons, so
+ # this avoids conflicts (unless someone creates and uses two
+ # BaseCoordinator subclasses with the same class name, so don't
+ # do that).
+ return '.charmhelpers.coordinator.{}'.format(self._name())
+
+ def _load_local_state(self):
+ fn = self._local_state_filename()
+ if os.path.exists(fn):
+ with open(fn, 'r') as f:
+ return json.load(f)
+ return {}
+
+ def _save_local_state(self, state):
+ fn = self._local_state_filename()
+ with open(fn, 'w') as f:
+ json.dump(state, f)
+
+ def _release_granted(self):
+ # At the end of every hook, release all locks granted to
+ # this unit. If a hook neglects to make use of what it
+ # requested, it will just have to make the request again.
+ # Implicit release is the only way this will work, as
+ # if the unit is standalone there may be no future triggers
+ # called to do a manual release.
+ unit = hookenv.local_unit()
+ for lock in list(self.requests[unit].keys()):
+ if self.granted(lock):
+ self.msg('Released local {} lock'.format(lock))
+ del self.requests[unit][lock]
+
+
+class Serial(BaseCoordinator):
+ def default_grant(self, lock, unit, granted, queue):
+ '''Default logic to grant a lock to a unit. Unless overridden,
+ only one unit may hold the lock and it will be granted to the
+ earliest queued request.
+
+ To define custom logic for $lock, create a subclass and
+ define a grant_$lock method.
+
+ `unit` is the unit name making the request.
+
+ `granted` is the set of units already granted the lock. It will
+ never include `unit`. It may be empty.
+
+ `queue` is the list of units waiting for the lock, ordered by time
+ of request. It will always include `unit`, but `unit` is not
+ necessarily first.
+
+ Returns True if the lock should be granted to `unit`.
+ '''
+ return unit == queue[0] and not granted
+
+
+def _implicit_peer_relation_name():
+ md = hookenv.metadata()
+ assert 'peers' in md, 'No peer relations in metadata.yaml'
+ return sorted(md['peers'].keys())[0]
+
+
+# A human readable, sortable UTC timestamp format.
+_timestamp_format = '%Y-%m-%d %H:%M:%S.%fZ'
+
+
+def _utcnow(): # pragma: no cover
+ # This wrapper exists as mocking datetime methods is problematic.
+ return datetime.utcnow()
+
+
+def _timestamp():
+ return _utcnow().strftime(_timestamp_format)
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/core/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/core/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/decorators.py b/charms/trusty/cassandra/hooks/charmhelpers/core/decorators.py
new file mode 100644
index 0000000..bb05620
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/core/decorators.py
@@ -0,0 +1,57 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+#
+# Copyright 2014 Canonical Ltd.
+#
+# Authors:
+# Edward Hope-Morley <opentastic@gmail.com>
+#
+
+import time
+
+from charmhelpers.core.hookenv import (
+ log,
+ INFO,
+)
+
+
+def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
+ """If the decorated function raises exception exc_type, allow num_retries
+ retry attempts before raise the exception.
+ """
+ def _retry_on_exception_inner_1(f):
+ def _retry_on_exception_inner_2(*args, **kwargs):
+ retries = num_retries
+ multiplier = 1
+ while True:
+ try:
+ return f(*args, **kwargs)
+ except exc_type:
+ if not retries:
+ raise
+
+ delay = base_delay * multiplier
+ multiplier += 1
+ log("Retrying '%s' %d more times (delay=%s)" %
+ (f.__name__, retries, delay), level=INFO)
+ retries -= 1
+ if delay:
+ time.sleep(delay)
+
+ return _retry_on_exception_inner_2
+
+ return _retry_on_exception_inner_1
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/files.py b/charms/trusty/cassandra/hooks/charmhelpers/core/files.py
new file mode 100644
index 0000000..0f12d32
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/core/files.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
+
+import os
+import subprocess
+
+
+def sed(filename, before, after, flags='g'):
+ """
+ Search and replaces the given pattern on filename.
+
+ :param filename: relative or absolute file path.
+ :param before: expression to be replaced (see 'man sed')
+ :param after: expression to replace with (see 'man sed')
+ :param flags: sed-compatible regex flags in example, to make
+ the search and replace case insensitive, specify ``flags="i"``.
+ The ``g`` flag is always specified regardless, so you do not
+ need to remember to include it when overriding this parameter.
+ :returns: If the sed command exit code was zero then return,
+ otherwise raise CalledProcessError.
+ """
+ expression = r's/{0}/{1}/{2}'.format(before,
+ after, flags)
+
+ return subprocess.check_call(["sed", "-i", "-r", "-e",
+ expression,
+ os.path.expanduser(filename)])
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/fstab.py b/charms/trusty/cassandra/hooks/charmhelpers/core/fstab.py
new file mode 100644
index 0000000..3056fba
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/core/fstab.py
@@ -0,0 +1,134 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import io
+import os
+
+__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
+
+
+class Fstab(io.FileIO):
+ """This class extends file in order to implement a file reader/writer
+ for file `/etc/fstab`
+ """
+
+ class Entry(object):
+ """Entry class represents a non-comment line on the `/etc/fstab` file
+ """
+ def __init__(self, device, mountpoint, filesystem,
+ options, d=0, p=0):
+ self.device = device
+ self.mountpoint = mountpoint
+ self.filesystem = filesystem
+
+ if not options:
+ options = "defaults"
+
+ self.options = options
+ self.d = int(d)
+ self.p = int(p)
+
+ def __eq__(self, o):
+ return str(self) == str(o)
+
+ def __str__(self):
+ return "{} {} {} {} {} {}".format(self.device,
+ self.mountpoint,
+ self.filesystem,
+ self.options,
+ self.d,
+ self.p)
+
+ DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
+
+ def __init__(self, path=None):
+ if path:
+ self._path = path
+ else:
+ self._path = self.DEFAULT_PATH
+ super(Fstab, self).__init__(self._path, 'rb+')
+
+ def _hydrate_entry(self, line):
+ # NOTE: use split with no arguments to split on any
+ # whitespace including tabs
+ return Fstab.Entry(*filter(
+ lambda x: x not in ('', None),
+ line.strip("\n").split()))
+
+ @property
+ def entries(self):
+ self.seek(0)
+ for line in self.readlines():
+ line = line.decode('us-ascii')
+ try:
+ if line.strip() and not line.strip().startswith("#"):
+ yield self._hydrate_entry(line)
+ except ValueError:
+ pass
+
+ def get_entry_by_attr(self, attr, value):
+ for entry in self.entries:
+ e_attr = getattr(entry, attr)
+ if e_attr == value:
+ return entry
+ return None
+
+ def add_entry(self, entry):
+ if self.get_entry_by_attr('device', entry.device):
+ return False
+
+ self.write((str(entry) + '\n').encode('us-ascii'))
+ self.truncate()
+ return entry
+
+ def remove_entry(self, entry):
+ self.seek(0)
+
+ lines = [l.decode('us-ascii') for l in self.readlines()]
+
+ found = False
+ for index, line in enumerate(lines):
+ if line.strip() and not line.strip().startswith("#"):
+ if self._hydrate_entry(line) == entry:
+ found = True
+ break
+
+ if not found:
+ return False
+
+ lines.remove(line)
+
+ self.seek(0)
+ self.write(''.join(lines).encode('us-ascii'))
+ self.truncate()
+ return True
+
+ @classmethod
+ def remove_by_mountpoint(cls, mountpoint, path=None):
+ fstab = cls(path=path)
+ entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
+ if entry:
+ return fstab.remove_entry(entry)
+ return False
+
+ @classmethod
+ def add(cls, device, mountpoint, filesystem, options=None, path=None):
+ return cls(path=path).add_entry(Fstab.Entry(device,
+ mountpoint, filesystem,
+ options=options))
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/hookenv.py b/charms/trusty/cassandra/hooks/charmhelpers/core/hookenv.py
new file mode 100644
index 0000000..3912e6e
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/core/hookenv.py
@@ -0,0 +1,1026 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+"Interactions with the Juju environment"
+# Copyright 2013 Canonical Ltd.
+#
+# Authors:
+# Charm Helpers Developers <juju@lists.ubuntu.com>
+
+from __future__ import print_function
+import copy
+from distutils.version import LooseVersion
+from functools import wraps
+import glob
+import os
+import json
+import yaml
+import socket
+import subprocess
+import sys
+import errno
+import tempfile
+from subprocess import CalledProcessError
+
+import six
+if not six.PY3:
+ from UserDict import UserDict
+else:
+ from collections import UserDict
+
+CRITICAL = "CRITICAL"
+ERROR = "ERROR"
+WARNING = "WARNING"
+INFO = "INFO"
+DEBUG = "DEBUG"
+MARKER = object()
+
+cache = {}
+
+
+def cached(func):
+ """Cache return values for multiple executions of func + args
+
+ For example::
+
+ @cached
+ def unit_get(attribute):
+ pass
+
+ unit_get('test')
+
+ will cache the result of unit_get + 'test' for future calls.
+ """
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ global cache
+ key = str((func, args, kwargs))
+ try:
+ return cache[key]
+ except KeyError:
+ pass # Drop out of the exception handler scope.
+ res = func(*args, **kwargs)
+ cache[key] = res
+ return res
+ wrapper._wrapped = func
+ return wrapper
+
+
+def flush(key):
+ """Flushes any entries from function cache where the
+ key is found in the function+args """
+ flush_list = []
+ for item in cache:
+ if key in item:
+ flush_list.append(item)
+ for item in flush_list:
+ del cache[item]
+
+
+def log(message, level=None):
+ """Write a message to the juju log"""
+ command = ['juju-log']
+ if level:
+ command += ['-l', level]
+ if not isinstance(message, six.string_types):
+ message = repr(message)
+ command += [message]
+ # Missing juju-log should not cause failures in unit tests
+ # Send log output to stderr
+ try:
+ subprocess.call(command)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ if level:
+ message = "{}: {}".format(level, message)
+ message = "juju-log: {}".format(message)
+ print(message, file=sys.stderr)
+ else:
+ raise
+
+
+class Serializable(UserDict):
+ """Wrapper, an object that can be serialized to yaml or json"""
+
+ def __init__(self, obj):
+ # wrap the object
+ UserDict.__init__(self)
+ self.data = obj
+
+ def __getattr__(self, attr):
+ # See if this object has attribute.
+ if attr in ("json", "yaml", "data"):
+ return self.__dict__[attr]
+ # Check for attribute in wrapped object.
+ got = getattr(self.data, attr, MARKER)
+ if got is not MARKER:
+ return got
+ # Proxy to the wrapped object via dict interface.
+ try:
+ return self.data[attr]
+ except KeyError:
+ raise AttributeError(attr)
+
+ def __getstate__(self):
+ # Pickle as a standard dictionary.
+ return self.data
+
+ def __setstate__(self, state):
+ # Unpickle into our wrapper.
+ self.data = state
+
+ def json(self):
+ """Serialize the object to json"""
+ return json.dumps(self.data)
+
+ def yaml(self):
+ """Serialize the object to yaml"""
+ return yaml.dump(self.data)
+
+
+def execution_environment():
+ """A convenient bundling of the current execution context"""
+ context = {}
+ context['conf'] = config()
+ if relation_id():
+ context['reltype'] = relation_type()
+ context['relid'] = relation_id()
+ context['rel'] = relation_get()
+ context['unit'] = local_unit()
+ context['rels'] = relations()
+ context['env'] = os.environ
+ return context
+
+
+def in_relation_hook():
+ """Determine whether we're running in a relation hook"""
+ return 'JUJU_RELATION' in os.environ
+
+
+def relation_type():
+ """The scope for the current relation hook"""
+ return os.environ.get('JUJU_RELATION', None)
+
+
+@cached
+def relation_id(relation_name=None, service_or_unit=None):
+ """The relation ID for the current or a specified relation"""
+ if not relation_name and not service_or_unit:
+ return os.environ.get('JUJU_RELATION_ID', None)
+ elif relation_name and service_or_unit:
+ service_name = service_or_unit.split('/')[0]
+ for relid in relation_ids(relation_name):
+ remote_service = remote_service_name(relid)
+ if remote_service == service_name:
+ return relid
+ else:
+ raise ValueError('Must specify neither or both of relation_name and service_or_unit')
+
+
+def local_unit():
+ """Local unit ID"""
+ return os.environ['JUJU_UNIT_NAME']
+
+
+def remote_unit():
+ """The remote unit for the current relation hook"""
+ return os.environ.get('JUJU_REMOTE_UNIT', None)
+
+
+def service_name():
+ """The name service group this unit belongs to"""
+ return local_unit().split('/')[0]
+
+
+@cached
+def remote_service_name(relid=None):
+ """The remote service name for a given relation-id (or the current relation)"""
+ if relid is None:
+ unit = remote_unit()
+ else:
+ units = related_units(relid)
+ unit = units[0] if units else None
+ return unit.split('/')[0] if unit else None
+
+
+def hook_name():
+ """The name of the currently executing hook"""
+ return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
+
+
+class Config(dict):
+ """A dictionary representation of the charm's config.yaml, with some
+ extra features:
+
+ - See which values in the dictionary have changed since the previous hook.
+ - For values that have changed, see what the previous value was.
+ - Store arbitrary data for use in a later hook.
+
+ NOTE: Do not instantiate this object directly - instead call
+ ``hookenv.config()``, which will return an instance of :class:`Config`.
+
+ Example usage::
+
+ >>> # inside a hook
+ >>> from charmhelpers.core import hookenv
+ >>> config = hookenv.config()
+ >>> config['foo']
+ 'bar'
+ >>> # store a new key/value for later use
+ >>> config['mykey'] = 'myval'
+
+
+ >>> # user runs `juju set mycharm foo=baz`
+ >>> # now we're inside subsequent config-changed hook
+ >>> config = hookenv.config()
+ >>> config['foo']
+ 'baz'
+ >>> # test to see if this val has changed since last hook
+ >>> config.changed('foo')
+ True
+ >>> # what was the previous value?
+ >>> config.previous('foo')
+ 'bar'
+ >>> # keys/values that we add are preserved across hooks
+ >>> config['mykey']
+ 'myval'
+
+ """
+ CONFIG_FILE_NAME = '.juju-persistent-config'
+
+ def __init__(self, *args, **kw):
+ super(Config, self).__init__(*args, **kw)
+ self.implicit_save = True
+ self._prev_dict = None
+ self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
+ if os.path.exists(self.path):
+ self.load_previous()
+ atexit(self._implicit_save)
+
+ def load_previous(self, path=None):
+ """Load previous copy of config from disk.
+
+ In normal usage you don't need to call this method directly - it
+ is called automatically at object initialization.
+
+ :param path:
+
+ File path from which to load the previous config. If `None`,
+ config is loaded from the default location. If `path` is
+ specified, subsequent `save()` calls will write to the same
+ path.
+
+ """
+ self.path = path or self.path
+ with open(self.path) as f:
+ self._prev_dict = json.load(f)
+ for k, v in copy.deepcopy(self._prev_dict).items():
+ if k not in self:
+ self[k] = v
+
+ def changed(self, key):
+ """Return True if the current value for this key is different from
+ the previous value.
+
+ """
+ if self._prev_dict is None:
+ return True
+ return self.previous(key) != self.get(key)
+
+ def previous(self, key):
+ """Return previous value for this key, or None if there
+ is no previous value.
+
+ """
+ if self._prev_dict:
+ return self._prev_dict.get(key)
+ return None
+
+ def save(self):
+ """Save this config to disk.
+
+ If the charm is using the :mod:`Services Framework <services.base>`
+ or :meth:'@hook <Hooks.hook>' decorator, this
+ is called automatically at the end of successful hook execution.
+ Otherwise, it should be called directly by user code.
+
+ To disable automatic saves, set ``implicit_save=False`` on this
+ instance.
+
+ """
+ with open(self.path, 'w') as f:
+ json.dump(self, f)
+
+ def _implicit_save(self):
+ if self.implicit_save:
+ self.save()
+
+
+@cached
+def config(scope=None):
+ """Juju charm configuration"""
+ config_cmd_line = ['config-get']
+ if scope is not None:
+ config_cmd_line.append(scope)
+ config_cmd_line.append('--format=json')
+ try:
+ config_data = json.loads(
+ subprocess.check_output(config_cmd_line).decode('UTF-8'))
+ if scope is not None:
+ return config_data
+ return Config(config_data)
+ except ValueError:
+ return None
+
+
+@cached
+def relation_get(attribute=None, unit=None, rid=None):
+ """Get relation information"""
+ _args = ['relation-get', '--format=json']
+ if rid:
+ _args.append('-r')
+ _args.append(rid)
+ _args.append(attribute or '-')
+ if unit:
+ _args.append(unit)
+ try:
+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+ except ValueError:
+ return None
+ except CalledProcessError as e:
+ if e.returncode == 2:
+ return None
+ raise
+
+
+def relation_set(relation_id=None, relation_settings=None, **kwargs):
+ """Set relation information for the current unit"""
+ relation_settings = relation_settings if relation_settings else {}
+ relation_cmd_line = ['relation-set']
+ accepts_file = "--file" in subprocess.check_output(
+ relation_cmd_line + ["--help"], universal_newlines=True)
+ if relation_id is not None:
+ relation_cmd_line.extend(('-r', relation_id))
+ settings = relation_settings.copy()
+ settings.update(kwargs)
+ for key, value in settings.items():
+ # Force value to be a string: it always should, but some call
+ # sites pass in things like dicts or numbers.
+ if value is not None:
+ settings[key] = "{}".format(value)
+ if accepts_file:
+ # --file was introduced in Juju 1.23.2. Use it by default if
+ # available, since otherwise we'll break if the relation data is
+ # too big. Ideally we should tell relation-set to read the data from
+ # stdin, but that feature is broken in 1.23.2: Bug #1454678.
+ with tempfile.NamedTemporaryFile(delete=False) as settings_file:
+ settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
+ subprocess.check_call(
+ relation_cmd_line + ["--file", settings_file.name])
+ os.remove(settings_file.name)
+ else:
+ for key, value in settings.items():
+ if value is None:
+ relation_cmd_line.append('{}='.format(key))
+ else:
+ relation_cmd_line.append('{}={}'.format(key, value))
+ subprocess.check_call(relation_cmd_line)
+ # Flush cache of any relation-gets for local unit
+ flush(local_unit())
+
+
+def relation_clear(r_id=None):
+ ''' Clears any relation data already set on relation r_id '''
+ settings = relation_get(rid=r_id,
+ unit=local_unit())
+ for setting in settings:
+ if setting not in ['public-address', 'private-address']:
+ settings[setting] = None
+ relation_set(relation_id=r_id,
+ **settings)
+
+
+@cached
+def relation_ids(reltype=None):
+ """A list of relation_ids"""
+ reltype = reltype or relation_type()
+ relid_cmd_line = ['relation-ids', '--format=json']
+ if reltype is not None:
+ relid_cmd_line.append(reltype)
+ return json.loads(
+ subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
+ return []
+
+
+@cached
+def related_units(relid=None):
+ """A list of related units"""
+ relid = relid or relation_id()
+ units_cmd_line = ['relation-list', '--format=json']
+ if relid is not None:
+ units_cmd_line.extend(('-r', relid))
+ return json.loads(
+ subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
+
+
+@cached
+def relation_for_unit(unit=None, rid=None):
+ """Get the json represenation of a unit's relation"""
+ unit = unit or remote_unit()
+ relation = relation_get(unit=unit, rid=rid)
+ for key in relation:
+ if key.endswith('-list'):
+ relation[key] = relation[key].split()
+ relation['__unit__'] = unit
+ return relation
+
+
+@cached
+def relations_for_id(relid=None):
+ """Get relations of a specific relation ID"""
+ relation_data = []
+ relid = relid or relation_ids()
+ for unit in related_units(relid):
+ unit_data = relation_for_unit(unit, relid)
+ unit_data['__relid__'] = relid
+ relation_data.append(unit_data)
+ return relation_data
+
+
+@cached
+def relations_of_type(reltype=None):
+ """Get relations of a specific type"""
+ relation_data = []
+ reltype = reltype or relation_type()
+ for relid in relation_ids(reltype):
+ for relation in relations_for_id(relid):
+ relation['__relid__'] = relid
+ relation_data.append(relation)
+ return relation_data
+
+
+@cached
+def metadata():
+ """Get the current charm metadata.yaml contents as a python object"""
+ with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
+ return yaml.safe_load(md)
+
+
+@cached
+def relation_types():
+ """Get a list of relation types supported by this charm"""
+ rel_types = []
+ md = metadata()
+ for key in ('provides', 'requires', 'peers'):
+ section = md.get(key)
+ if section:
+ rel_types.extend(section.keys())
+ return rel_types
+
+
+@cached
+def peer_relation_id():
+ '''Get the peers relation id if a peers relation has been joined, else None.'''
+ md = metadata()
+ section = md.get('peers')
+ if section:
+ for key in section:
+ relids = relation_ids(key)
+ if relids:
+ return relids[0]
+ return None
+
+
+@cached
+def relation_to_interface(relation_name):
+ """
+ Given the name of a relation, return the interface that relation uses.
+
+ :returns: The interface name, or ``None``.
+ """
+ return relation_to_role_and_interface(relation_name)[1]
+
+
+@cached
+def relation_to_role_and_interface(relation_name):
+ """
+ Given the name of a relation, return the role and the name of the interface
+ that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
+
+ :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
+ """
+ _metadata = metadata()
+ for role in ('provides', 'requires', 'peers'):
+ interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
+ if interface:
+ return role, interface
+ return None, None
+
+
+@cached
+def role_and_interface_to_relations(role, interface_name):
+ """
+ Given a role and interface name, return a list of relation names for the
+ current charm that use that interface under that role (where role is one
+ of ``provides``, ``requires``, or ``peers``).
+
+ :returns: A list of relation names.
+ """
+ _metadata = metadata()
+ results = []
+ for relation_name, relation in _metadata.get(role, {}).items():
+ if relation['interface'] == interface_name:
+ results.append(relation_name)
+ return results
+
+
+@cached
+def interface_to_relations(interface_name):
+ """
+ Given an interface, return a list of relation names for the current
+ charm that use that interface.
+
+ :returns: A list of relation names.
+ """
+ results = []
+ for role in ('provides', 'requires', 'peers'):
+ results.extend(role_and_interface_to_relations(role, interface_name))
+ return results
+
+
+@cached
+def charm_name():
+ """Get the name of the current charm as is specified on metadata.yaml"""
+ return metadata().get('name')
+
+
+@cached
+def relations():
+ """Get a nested dictionary of relation data for all related units"""
+ rels = {}
+ for reltype in relation_types():
+ relids = {}
+ for relid in relation_ids(reltype):
+ units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
+ for unit in related_units(relid):
+ reldata = relation_get(unit=unit, rid=relid)
+ units[unit] = reldata
+ relids[relid] = units
+ rels[reltype] = relids
+ return rels
+
+
+@cached
+def is_relation_made(relation, keys='private-address'):
+ '''
+ Determine whether a relation is established by checking for
+ presence of key(s). If a list of keys is provided, they
+ must all be present for the relation to be identified as made
+ '''
+ if isinstance(keys, str):
+ keys = [keys]
+ for r_id in relation_ids(relation):
+ for unit in related_units(r_id):
+ context = {}
+ for k in keys:
+ context[k] = relation_get(k, rid=r_id,
+ unit=unit)
+ if None not in context.values():
+ return True
+ return False
+
+
+def open_port(port, protocol="TCP"):
+ """Open a service network port"""
+ _args = ['open-port']
+ _args.append('{}/{}'.format(port, protocol))
+ subprocess.check_call(_args)
+
+
+def close_port(port, protocol="TCP"):
+ """Close a service network port"""
+ _args = ['close-port']
+ _args.append('{}/{}'.format(port, protocol))
+ subprocess.check_call(_args)
+
+
+@cached
+def unit_get(attribute):
+ """Get the unit ID for the remote unit"""
+ _args = ['unit-get', '--format=json', attribute]
+ try:
+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+ except ValueError:
+ return None
+
+
+def unit_public_ip():
+ """Get this unit's public IP address"""
+ return _ensure_ip(unit_get('public-address'))
+
+
+def unit_private_ip():
+ """Get this unit's private IP address"""
+ return _ensure_ip(unit_get('private-address'))
+
+
+def _ensure_ip(addr):
+ """If addr is a hostname, resolve it to an IP address"""
+ if not addr:
+ return None
+ # We need to use socket.getaddrinfo for IPv6 support.
+ info = socket.getaddrinfo(addr, None)
+ if info is None:
+ # Should never happen
+ raise ValueError("Invalid result None from getaddinfo")
+ try:
+ return info[0][4][0]
+ except IndexError:
+ # Should never happen
+ raise ValueError("Invalid result {!r} from getaddinfo".format(info))
+
+
+@cached
+def storage_get(attribute=None, storage_id=None):
+ """Get storage attributes"""
+ _args = ['storage-get', '--format=json']
+ if storage_id:
+ _args.extend(('-s', storage_id))
+ if attribute:
+ _args.append(attribute)
+ try:
+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+ except ValueError:
+ return None
+
+
+@cached
+def storage_list(storage_name=None):
+ """List the storage IDs for the unit"""
+ _args = ['storage-list', '--format=json']
+ if storage_name:
+ _args.append(storage_name)
+ try:
+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+ except ValueError:
+ return None
+ except OSError as e:
+ import errno
+ if e.errno == errno.ENOENT:
+ # storage-list does not exist
+ return []
+ raise
+
+
+class UnregisteredHookError(Exception):
+ """Raised when an undefined hook is called"""
+ pass
+
+
+class Hooks(object):
+ """A convenient handler for hook functions.
+
+ Example::
+
+ hooks = Hooks()
+
+ # register a hook, taking its name from the function name
+ @hooks.hook()
+ def install():
+ pass # your code here
+
+ # register a hook, providing a custom hook name
+ @hooks.hook("config-changed")
+ def config_changed():
+ pass # your code here
+
+ if __name__ == "__main__":
+ # execute a hook based on the name the program is called by
+ hooks.execute(sys.argv)
+ """
+
+ def __init__(self, config_save=None):
+ super(Hooks, self).__init__()
+ self._hooks = {}
+
+ # For unknown reasons, we allow the Hooks constructor to override
+ # config().implicit_save.
+ if config_save is not None:
+ config().implicit_save = config_save
+
+ def register(self, name, function):
+ """Register a hook"""
+ self._hooks[name] = function
+
+ def execute(self, args):
+ """Execute a registered hook based on args[0]"""
+ _run_atstart()
+ hook_name = os.path.basename(args[0])
+ if hook_name in self._hooks:
+ try:
+ self._hooks[hook_name]()
+ except SystemExit as x:
+ if x.code is None or x.code == 0:
+ _run_atexit()
+ raise
+ _run_atexit()
+ else:
+ raise UnregisteredHookError(hook_name)
+
+ def hook(self, *hook_names):
+ """Decorator, registering them as hooks"""
+ def wrapper(decorated):
+ for hook_name in hook_names:
+ self.register(hook_name, decorated)
+ else:
+ self.register(decorated.__name__, decorated)
+ if '_' in decorated.__name__:
+ self.register(
+ decorated.__name__.replace('_', '-'), decorated)
+ return decorated
+ return wrapper
+
+
+def charm_dir():
+ """Return the root directory of the current charm"""
+ return os.environ.get('CHARM_DIR')
+
+
+@cached
+def action_get(key=None):
+ """Gets the value of an action parameter, or all key/value param pairs"""
+ cmd = ['action-get']
+ if key is not None:
+ cmd.append(key)
+ cmd.append('--format=json')
+ action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+ return action_data
+
+
+def action_set(values):
+ """Sets the values to be returned after the action finishes"""
+ cmd = ['action-set']
+ for k, v in list(values.items()):
+ cmd.append('{}={}'.format(k, v))
+ subprocess.check_call(cmd)
+
+
+def action_fail(message):
+ """Sets the action status to failed and sets the error message.
+
+ The results set by action_set are preserved."""
+ subprocess.check_call(['action-fail', message])
+
+
+def action_name():
+ """Get the name of the currently executing action."""
+ return os.environ.get('JUJU_ACTION_NAME')
+
+
+def action_uuid():
+ """Get the UUID of the currently executing action."""
+ return os.environ.get('JUJU_ACTION_UUID')
+
+
+def action_tag():
+ """Get the tag for the currently executing action."""
+ return os.environ.get('JUJU_ACTION_TAG')
+
+
+def status_set(workload_state, message):
+ """Set the workload state with a message
+
+ Use status-set to set the workload state with a message which is visible
+ to the user via juju status. If the status-set command is not found then
+ assume this is juju < 1.23 and juju-log the message unstead.
+
+ workload_state -- valid juju workload state.
+ message -- status update message
+ """
+ valid_states = ['maintenance', 'blocked', 'waiting', 'active']
+ if workload_state not in valid_states:
+ raise ValueError(
+ '{!r} is not a valid workload state'.format(workload_state)
+ )
+ cmd = ['status-set', workload_state, message]
+ try:
+ ret = subprocess.call(cmd)
+ if ret == 0:
+ return
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ log_message = 'status-set failed: {} {}'.format(workload_state,
+ message)
+ log(log_message, level='INFO')
+
+
+def status_get():
+ """Retrieve the previously set juju workload state and message
+
+ If the status-get command is not found then assume this is juju < 1.23 and
+ return 'unknown', ""
+
+ """
+ cmd = ['status-get', "--format=json", "--include-data"]
+ try:
+ raw_status = subprocess.check_output(cmd)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ return ('unknown', "")
+ else:
+ raise
+ else:
+ status = json.loads(raw_status.decode("UTF-8"))
+ return (status["status"], status["message"])
+
+
+def translate_exc(from_exc, to_exc):
+ def inner_translate_exc1(f):
+ @wraps(f)
+ def inner_translate_exc2(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except from_exc:
+ raise to_exc
+
+ return inner_translate_exc2
+
+ return inner_translate_exc1
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def is_leader():
+ """Does the current unit hold the juju leadership
+
+ Uses juju to determine whether the current unit is the leader of its peers
+ """
+ cmd = ['is-leader', '--format=json']
+ return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def leader_get(attribute=None):
+ """Juju leader get value(s)"""
+ cmd = ['leader-get', '--format=json'] + [attribute or '-']
+ return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def leader_set(settings=None, **kwargs):
+ """Juju leader set value(s)"""
+ # Don't log secrets.
+ # log("Juju leader-set '%s'" % (settings), level=DEBUG)
+ cmd = ['leader-set']
+ settings = settings or {}
+ settings.update(kwargs)
+ for k, v in settings.items():
+ if v is None:
+ cmd.append('{}='.format(k))
+ else:
+ cmd.append('{}={}'.format(k, v))
+ subprocess.check_call(cmd)
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def payload_register(ptype, klass, pid):
+ """ is used while a hook is running to let Juju know that a
+ payload has been started."""
+ cmd = ['payload-register']
+ for x in [ptype, klass, pid]:
+ cmd.append(x)
+ subprocess.check_call(cmd)
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def payload_unregister(klass, pid):
+ """ is used while a hook is running to let Juju know
+ that a payload has been manually stopped. The <class> and <id> provided
+ must match a payload that has been previously registered with juju using
+ payload-register."""
+ cmd = ['payload-unregister']
+ for x in [klass, pid]:
+ cmd.append(x)
+ subprocess.check_call(cmd)
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def payload_status_set(klass, pid, status):
+ """is used to update the current status of a registered payload.
+ The <class> and <id> provided must match a payload that has been previously
+ registered with juju using payload-register. The <status> must be one of the
+ follow: starting, started, stopping, stopped"""
+ cmd = ['payload-status-set']
+ for x in [klass, pid, status]:
+ cmd.append(x)
+ subprocess.check_call(cmd)
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def resource_get(name):
+ """used to fetch the resource path of the given name.
+
+ <name> must match a name of defined resource in metadata.yaml
+
+ returns either a path or False if resource not available
+ """
+ if not name:
+ return False
+
+ cmd = ['resource-get', name]
+ try:
+ return subprocess.check_output(cmd).decode('UTF-8')
+ except subprocess.CalledProcessError:
+ return False
+
+
+@cached
+def juju_version():
+ """Full version string (eg. '1.23.3.1-trusty-amd64')"""
+ # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
+ jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
+ return subprocess.check_output([jujud, 'version'],
+ universal_newlines=True).strip()
+
+
+@cached
+def has_juju_version(minimum_version):
+ """Return True if the Juju version is at least the provided version"""
+ return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
+
+
+_atexit = []
+_atstart = []
+
+
+def atstart(callback, *args, **kwargs):
+ '''Schedule a callback to run before the main hook.
+
+ Callbacks are run in the order they were added.
+
+ This is useful for modules and classes to perform initialization
+ and inject behavior. In particular:
+
+ - Run common code before all of your hooks, such as logging
+ the hook name or interesting relation data.
+ - Defer object or module initialization that requires a hook
+ context until we know there actually is a hook context,
+ making testing easier.
+ - Rather than requiring charm authors to include boilerplate to
+ invoke your helper's behavior, have it run automatically if
+ your object is instantiated or module imported.
+
+ This is not at all useful after your hook framework as been launched.
+ '''
+ global _atstart
+ _atstart.append((callback, args, kwargs))
+
+
+def atexit(callback, *args, **kwargs):
+ '''Schedule a callback to run on successful hook completion.
+
+ Callbacks are run in the reverse order that they were added.'''
+ _atexit.append((callback, args, kwargs))
+
+
+def _run_atstart():
+ '''Hook frameworks must invoke this before running the main hook body.'''
+ global _atstart
+ for callback, args, kwargs in _atstart:
+ callback(*args, **kwargs)
+ del _atstart[:]
+
+
+def _run_atexit():
+ '''Hook frameworks must invoke this after the main hook body has
+ successfully completed. Do not invoke it if the hook fails.'''
+ global _atexit
+ for callback, args, kwargs in reversed(_atexit):
+ callback(*args, **kwargs)
+ del _atexit[:]
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def network_get_primary_address(binding):
+ '''
+ Retrieve the primary network address for a named binding
+
+ :param binding: string. The name of a relation of extra-binding
+ :return: string. The primary IP address for the named binding
+ :raise: NotImplementedError if run on Juju < 2.0
+ '''
+ cmd = ['network-get', '--primary-address', binding]
+ return subprocess.check_output(cmd).strip()
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/host.py b/charms/trusty/cassandra/hooks/charmhelpers/core/host.py
new file mode 100644
index 0000000..481087b
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/core/host.py
@@ -0,0 +1,695 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+"""Tools for working with the host system"""
+# Copyright 2012 Canonical Ltd.
+#
+# Authors:
+# Nick Moffitt <nick.moffitt@canonical.com>
+# Matthew Wedgwood <matthew.wedgwood@canonical.com>
+
+import os
+import re
+import pwd
+import glob
+import grp
+import random
+import string
+import subprocess
+import hashlib
+import functools
+import itertools
+from contextlib import contextmanager
+from collections import OrderedDict
+
+import six
+
+from .hookenv import log
+from .fstab import Fstab
+
+
+def service_start(service_name):
+ """Start a system service"""
+ return service('start', service_name)
+
+
+def service_stop(service_name):
+ """Stop a system service"""
+ return service('stop', service_name)
+
+
+def service_restart(service_name):
+ """Restart a system service"""
+ return service('restart', service_name)
+
+
+def service_reload(service_name, restart_on_failure=False):
+ """Reload a system service, optionally falling back to restart if
+ reload fails"""
+ service_result = service('reload', service_name)
+ if not service_result and restart_on_failure:
+ service_result = service('restart', service_name)
+ return service_result
+
+
+def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
+ """Pause a system service.
+
+ Stop it, and prevent it from starting again at boot."""
+ stopped = True
+ if service_running(service_name):
+ stopped = service_stop(service_name)
+ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
+ sysv_file = os.path.join(initd_dir, service_name)
+ if init_is_systemd():
+ service('disable', service_name)
+ elif os.path.exists(upstart_file):
+ override_path = os.path.join(
+ init_dir, '{}.override'.format(service_name))
+ with open(override_path, 'w') as fh:
+ fh.write("manual\n")
+ elif os.path.exists(sysv_file):
+ subprocess.check_call(["update-rc.d", service_name, "disable"])
+ else:
+ raise ValueError(
+ "Unable to detect {0} as SystemD, Upstart {1} or"
+ " SysV {2}".format(
+ service_name, upstart_file, sysv_file))
+ return stopped
+
+
+def service_resume(service_name, init_dir="/etc/init",
+ initd_dir="/etc/init.d"):
+ """Resume a system service.
+
+ Reenable starting again at boot. Start the service"""
+ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
+ sysv_file = os.path.join(initd_dir, service_name)
+ if init_is_systemd():
+ service('enable', service_name)
+ elif os.path.exists(upstart_file):
+ override_path = os.path.join(
+ init_dir, '{}.override'.format(service_name))
+ if os.path.exists(override_path):
+ os.unlink(override_path)
+ elif os.path.exists(sysv_file):
+ subprocess.check_call(["update-rc.d", service_name, "enable"])
+ else:
+ raise ValueError(
+ "Unable to detect {0} as SystemD, Upstart {1} or"
+ " SysV {2}".format(
+ service_name, upstart_file, sysv_file))
+
+ started = service_running(service_name)
+ if not started:
+ started = service_start(service_name)
+ return started
+
+
+def service(action, service_name):
+ """Control a system service"""
+ if init_is_systemd():
+ cmd = ['systemctl', action, service_name]
+ else:
+ cmd = ['service', service_name, action]
+ return subprocess.call(cmd) == 0
+
+
+def service_running(service_name):
+ """Determine whether a system service is running"""
+ if init_is_systemd():
+ return service('is-active', service_name)
+ else:
+ try:
+ output = subprocess.check_output(
+ ['service', service_name, 'status'],
+ stderr=subprocess.STDOUT).decode('UTF-8')
+ except subprocess.CalledProcessError:
+ return False
+ else:
+ if ("start/running" in output or "is running" in output or
+ "up and running" in output):
+ return True
+ else:
+ return False
+
+
+def service_available(service_name):
+ """Determine whether a system service is available"""
+ try:
+ subprocess.check_output(
+ ['service', service_name, 'status'],
+ stderr=subprocess.STDOUT).decode('UTF-8')
+ except subprocess.CalledProcessError as e:
+ return b'unrecognized service' not in e.output
+ else:
+ return True
+
+
+SYSTEMD_SYSTEM = '/run/systemd/system'
+
+
+def init_is_systemd():
+ """Return True if the host system uses systemd, False otherwise."""
+ return os.path.isdir(SYSTEMD_SYSTEM)
+
+
+def adduser(username, password=None, shell='/bin/bash', system_user=False,
+ primary_group=None, secondary_groups=None):
+ """Add a user to the system.
+
+ Will log but otherwise succeed if the user already exists.
+
+ :param str username: Username to create
+ :param str password: Password for user; if ``None``, create a system user
+ :param str shell: The default shell for the user
+ :param bool system_user: Whether to create a login or system user
+ :param str primary_group: Primary group for user; defaults to username
+ :param list secondary_groups: Optional list of additional groups
+
+ :returns: The password database entry struct, as returned by `pwd.getpwnam`
+ """
+ try:
+ user_info = pwd.getpwnam(username)
+ log('user {0} already exists!'.format(username))
+ except KeyError:
+ log('creating user {0}'.format(username))
+ cmd = ['useradd']
+ if system_user or password is None:
+ cmd.append('--system')
+ else:
+ cmd.extend([
+ '--create-home',
+ '--shell', shell,
+ '--password', password,
+ ])
+ if not primary_group:
+ try:
+ grp.getgrnam(username)
+ primary_group = username # avoid "group exists" error
+ except KeyError:
+ pass
+ if primary_group:
+ cmd.extend(['-g', primary_group])
+ if secondary_groups:
+ cmd.extend(['-G', ','.join(secondary_groups)])
+ cmd.append(username)
+ subprocess.check_call(cmd)
+ user_info = pwd.getpwnam(username)
+ return user_info
+
+
+def user_exists(username):
+ """Check if a user exists"""
+ try:
+ pwd.getpwnam(username)
+ user_exists = True
+ except KeyError:
+ user_exists = False
+ return user_exists
+
+
+def add_group(group_name, system_group=False):
+ """Add a group to the system"""
+ try:
+ group_info = grp.getgrnam(group_name)
+ log('group {0} already exists!'.format(group_name))
+ except KeyError:
+ log('creating group {0}'.format(group_name))
+ cmd = ['addgroup']
+ if system_group:
+ cmd.append('--system')
+ else:
+ cmd.extend([
+ '--group',
+ ])
+ cmd.append(group_name)
+ subprocess.check_call(cmd)
+ group_info = grp.getgrnam(group_name)
+ return group_info
+
+
+def add_user_to_group(username, group):
+ """Add a user to a group"""
+ cmd = ['gpasswd', '-a', username, group]
+ log("Adding user {} to group {}".format(username, group))
+ subprocess.check_call(cmd)
+
+
+def rsync(from_path, to_path, flags='-r', options=None):
+ """Replicate the contents of a path"""
+ options = options or ['--delete', '--executability']
+ cmd = ['/usr/bin/rsync', flags]
+ cmd.extend(options)
+ cmd.append(from_path)
+ cmd.append(to_path)
+ log(" ".join(cmd))
+ return subprocess.check_output(cmd).decode('UTF-8').strip()
+
+
+def symlink(source, destination):
+ """Create a symbolic link"""
+ log("Symlinking {} as {}".format(source, destination))
+ cmd = [
+ 'ln',
+ '-sf',
+ source,
+ destination,
+ ]
+ subprocess.check_call(cmd)
+
+
+def mkdir(path, owner='root', group='root', perms=0o555, force=False):
+ """Create a directory"""
+ log("Making dir {} {}:{} {:o}".format(path, owner, group,
+ perms))
+ uid = pwd.getpwnam(owner).pw_uid
+ gid = grp.getgrnam(group).gr_gid
+ realpath = os.path.abspath(path)
+ path_exists = os.path.exists(realpath)
+ if path_exists and force:
+ if not os.path.isdir(realpath):
+ log("Removing non-directory file {} prior to mkdir()".format(path))
+ os.unlink(realpath)
+ os.makedirs(realpath, perms)
+ elif not path_exists:
+ os.makedirs(realpath, perms)
+ os.chown(realpath, uid, gid)
+ os.chmod(realpath, perms)
+
+
+def write_file(path, content, owner='root', group='root', perms=0o444):
+ """Create or overwrite a file with the contents of a byte string."""
+ log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
+ uid = pwd.getpwnam(owner).pw_uid
+ gid = grp.getgrnam(group).gr_gid
+ with open(path, 'wb') as target:
+ os.fchown(target.fileno(), uid, gid)
+ os.fchmod(target.fileno(), perms)
+ target.write(content)
+
+
+def fstab_remove(mp):
+ """Remove the given mountpoint entry from /etc/fstab"""
+ return Fstab.remove_by_mountpoint(mp)
+
+
+def fstab_add(dev, mp, fs, options=None):
+ """Adds the given device entry to the /etc/fstab file"""
+ return Fstab.add(dev, mp, fs, options=options)
+
+
+def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
+ """Mount a filesystem at a particular mountpoint"""
+ cmd_args = ['mount']
+ if options is not None:
+ cmd_args.extend(['-o', options])
+ cmd_args.extend([device, mountpoint])
+ try:
+ subprocess.check_output(cmd_args)
+ except subprocess.CalledProcessError as e:
+ log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
+ return False
+
+ if persist:
+ return fstab_add(device, mountpoint, filesystem, options=options)
+ return True
+
+
+def umount(mountpoint, persist=False):
+ """Unmount a filesystem"""
+ cmd_args = ['umount', mountpoint]
+ try:
+ subprocess.check_output(cmd_args)
+ except subprocess.CalledProcessError as e:
+ log('Error unmounting {}\n{}'.format(mountpoint, e.output))
+ return False
+
+ if persist:
+ return fstab_remove(mountpoint)
+ return True
+
+
+def mounts():
+ """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
+ with open('/proc/mounts') as f:
+ # [['/mount/point','/dev/path'],[...]]
+ system_mounts = [m[1::-1] for m in [l.strip().split()
+ for l in f.readlines()]]
+ return system_mounts
+
+
+def fstab_mount(mountpoint):
+ """Mount filesystem using fstab"""
+ cmd_args = ['mount', mountpoint]
+ try:
+ subprocess.check_output(cmd_args)
+ except subprocess.CalledProcessError as e:
+ log('Error unmounting {}\n{}'.format(mountpoint, e.output))
+ return False
+ return True
+
+
+def file_hash(path, hash_type='md5'):
+ """Generate a hash checksum of the contents of 'path' or None if not found.
+
+ :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
+ such as md5, sha1, sha256, sha512, etc.
+ """
+ if os.path.exists(path):
+ h = getattr(hashlib, hash_type)()
+ with open(path, 'rb') as source:
+ h.update(source.read())
+ return h.hexdigest()
+ else:
+ return None
+
+
+def path_hash(path):
+ """Generate a hash checksum of all files matching 'path'. Standard
+ wildcards like '*' and '?' are supported, see documentation for the 'glob'
+ module for more information.
+
+ :return: dict: A { filename: hash } dictionary for all matched files.
+ Empty if none found.
+ """
+ return {
+ filename: file_hash(filename)
+ for filename in glob.iglob(path)
+ }
+
+
+def check_hash(path, checksum, hash_type='md5'):
+ """Validate a file using a cryptographic checksum.
+
+ :param str checksum: Value of the checksum used to validate the file.
+ :param str hash_type: Hash algorithm used to generate `checksum`.
+ Can be any hash alrgorithm supported by :mod:`hashlib`,
+ such as md5, sha1, sha256, sha512, etc.
+ :raises ChecksumError: If the file fails the checksum
+
+ """
+ actual_checksum = file_hash(path, hash_type)
+ if checksum != actual_checksum:
+ raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
+
+
+class ChecksumError(ValueError):
+ """A class derived from Value error to indicate the checksum failed."""
+ pass
+
+
+def restart_on_change(restart_map, stopstart=False):
+ """Restart services based on configuration files changing
+
+ This function is used a decorator, for example::
+
+ @restart_on_change({
+ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
+ '/etc/apache/sites-enabled/*': [ 'apache2' ]
+ })
+ def config_changed():
+ pass # your code here
+
+ In this example, the cinder-api and cinder-volume services
+ would be restarted if /etc/ceph/ceph.conf is changed by the
+ ceph_client_changed function. The apache2 service would be
+ restarted if any file matching the pattern got changed, created
+ or removed. Standard wildcards are supported, see documentation
+ for the 'glob' module for more information.
+
+ @param restart_map: {path_file_name: [service_name, ...]
+ @param stopstart: DEFAULT false; whether to stop, start OR restart
+ @returns result from decorated function
+ """
+ def wrap(f):
+ @functools.wraps(f)
+ def wrapped_f(*args, **kwargs):
+ return restart_on_change_helper(
+ (lambda: f(*args, **kwargs)), restart_map, stopstart)
+ return wrapped_f
+ return wrap
+
+
+def restart_on_change_helper(lambda_f, restart_map, stopstart=False):
+ """Helper function to perform the restart_on_change function.
+
+ This is provided for decorators to restart services if files described
+ in the restart_map have changed after an invocation of lambda_f().
+
+ @param lambda_f: function to call.
+ @param restart_map: {file: [service, ...]}
+ @param stopstart: whether to stop, start or restart a service
+ @returns result of lambda_f()
+ """
+ checksums = {path: path_hash(path) for path in restart_map}
+ r = lambda_f()
+ # create a list of lists of the services to restart
+ restarts = [restart_map[path]
+ for path in restart_map
+ if path_hash(path) != checksums[path]]
+ # create a flat list of ordered services without duplicates from lists
+ services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
+ if services_list:
+ actions = ('stop', 'start') if stopstart else ('restart',)
+ for action in actions:
+ for service_name in services_list:
+ service(action, service_name)
+ return r
+
+
+def lsb_release():
+ """Return /etc/lsb-release in a dict"""
+ d = {}
+ with open('/etc/lsb-release', 'r') as lsb:
+ for l in lsb:
+ k, v = l.split('=')
+ d[k.strip()] = v.strip()
+ return d
+
+
+def pwgen(length=None):
+ """Generate a random pasword."""
+ if length is None:
+ # A random length is ok to use a weak PRNG
+ length = random.choice(range(35, 45))
+ alphanumeric_chars = [
+ l for l in (string.ascii_letters + string.digits)
+ if l not in 'l0QD1vAEIOUaeiou']
+ # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
+ # actual password
+ random_generator = random.SystemRandom()
+ random_chars = [
+ random_generator.choice(alphanumeric_chars) for _ in range(length)]
+ return(''.join(random_chars))
+
+
+def is_phy_iface(interface):
+ """Returns True if interface is not virtual, otherwise False."""
+ if interface:
+ sys_net = '/sys/class/net'
+ if os.path.isdir(sys_net):
+ for iface in glob.glob(os.path.join(sys_net, '*')):
+ if '/virtual/' in os.path.realpath(iface):
+ continue
+
+ if interface == os.path.basename(iface):
+ return True
+
+ return False
+
+
+def get_bond_master(interface):
+ """Returns bond master if interface is bond slave otherwise None.
+
+ NOTE: the provided interface is expected to be physical
+ """
+ if interface:
+ iface_path = '/sys/class/net/%s' % (interface)
+ if os.path.exists(iface_path):
+ if '/virtual/' in os.path.realpath(iface_path):
+ return None
+
+ master = os.path.join(iface_path, 'master')
+ if os.path.exists(master):
+ master = os.path.realpath(master)
+ # make sure it is a bond master
+ if os.path.exists(os.path.join(master, 'bonding')):
+ return os.path.basename(master)
+
+ return None
+
+
+def list_nics(nic_type=None):
+ """Return a list of nics of given type(s)"""
+ if isinstance(nic_type, six.string_types):
+ int_types = [nic_type]
+ else:
+ int_types = nic_type
+
+ interfaces = []
+ if nic_type:
+ for int_type in int_types:
+ cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
+ ip_output = subprocess.check_output(cmd).decode('UTF-8')
+ ip_output = ip_output.split('\n')
+ ip_output = (line for line in ip_output if line)
+ for line in ip_output:
+ if line.split()[1].startswith(int_type):
+ matched = re.search('.*: (' + int_type +
+ r'[0-9]+\.[0-9]+)@.*', line)
+ if matched:
+ iface = matched.groups()[0]
+ else:
+ iface = line.split()[1].replace(":", "")
+
+ if iface not in interfaces:
+ interfaces.append(iface)
+ else:
+ cmd = ['ip', 'a']
+ ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
+ ip_output = (line.strip() for line in ip_output if line)
+
+ key = re.compile('^[0-9]+:\s+(.+):')
+ for line in ip_output:
+ matched = re.search(key, line)
+ if matched:
+ iface = matched.group(1)
+ iface = iface.partition("@")[0]
+ if iface not in interfaces:
+ interfaces.append(iface)
+
+ return interfaces
+
+
+def set_nic_mtu(nic, mtu):
+ """Set the Maximum Transmission Unit (MTU) on a network interface."""
+ cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
+ subprocess.check_call(cmd)
+
+
+def get_nic_mtu(nic):
+ """Return the Maximum Transmission Unit (MTU) for a network interface."""
+ cmd = ['ip', 'addr', 'show', nic]
+ ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
+ mtu = ""
+ for line in ip_output:
+ words = line.split()
+ if 'mtu' in words:
+ mtu = words[words.index("mtu") + 1]
+ return mtu
+
+
+def get_nic_hwaddr(nic):
+ """Return the Media Access Control (MAC) for a network interface."""
+ cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
+ ip_output = subprocess.check_output(cmd).decode('UTF-8')
+ hwaddr = ""
+ words = ip_output.split()
+ if 'link/ether' in words:
+ hwaddr = words[words.index('link/ether') + 1]
+ return hwaddr
+
+
+def cmp_pkgrevno(package, revno, pkgcache=None):
+ """Compare supplied revno with the revno of the installed package
+
+ * 1 => Installed revno is greater than supplied arg
+ * 0 => Installed revno is the same as supplied arg
+ * -1 => Installed revno is less than supplied arg
+
+ This function imports apt_cache function from charmhelpers.fetch if
+ the pkgcache argument is None. Be sure to add charmhelpers.fetch if
+ you call this function, or pass an apt_pkg.Cache() instance.
+ """
+ import apt_pkg
+ if not pkgcache:
+ from charmhelpers.fetch import apt_cache
+ pkgcache = apt_cache()
+ pkg = pkgcache[package]
+ return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
+
+
+@contextmanager
+def chdir(directory):
+ """Change the current working directory to a different directory for a code
+ block and return the previous directory after the block exits. Useful to
+ run commands from a specificed directory.
+
+ :param str directory: The directory path to change to for this context.
+ """
+ cur = os.getcwd()
+ try:
+ yield os.chdir(directory)
+ finally:
+ os.chdir(cur)
+
+
+def chownr(path, owner, group, follow_links=True, chowntopdir=False):
+ """Recursively change user and group ownership of files and directories
+ in given path. Doesn't chown path itself by default, only its children.
+
+ :param str path: The string path to start changing ownership.
+ :param str owner: The owner string to use when looking up the uid.
+ :param str group: The group string to use when looking up the gid.
+ :param bool follow_links: Also Chown links if True
+ :param bool chowntopdir: Also chown path itself if True
+ """
+ uid = pwd.getpwnam(owner).pw_uid
+ gid = grp.getgrnam(group).gr_gid
+ if follow_links:
+ chown = os.chown
+ else:
+ chown = os.lchown
+
+ if chowntopdir:
+ broken_symlink = os.path.lexists(path) and not os.path.exists(path)
+ if not broken_symlink:
+ chown(path, uid, gid)
+ for root, dirs, files in os.walk(path):
+ for name in dirs + files:
+ full = os.path.join(root, name)
+ broken_symlink = os.path.lexists(full) and not os.path.exists(full)
+ if not broken_symlink:
+ chown(full, uid, gid)
+
+
+def lchownr(path, owner, group):
+ """Recursively change user and group ownership of files and directories
+ in a given path, not following symbolic links. See the documentation for
+ 'os.lchown' for more information.
+
+ :param str path: The string path to start changing ownership.
+ :param str owner: The owner string to use when looking up the uid.
+ :param str group: The group string to use when looking up the gid.
+ """
+ chownr(path, owner, group, follow_links=False)
+
+
+def get_total_ram():
+ """The total amount of system RAM in bytes.
+
+ This is what is reported by the OS, and may be overcommitted when
+ there are multiple containers hosted on the same machine.
+ """
+ with open('/proc/meminfo', 'r') as f:
+ for line in f.readlines():
+ if line:
+ key, value, unit = line.split()
+ if key == 'MemTotal:':
+ assert unit == 'kB', 'Unknown unit'
+ return int(value) * 1024 # Classic, not KiB.
+ raise NotImplementedError()
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/hugepage.py b/charms/trusty/cassandra/hooks/charmhelpers/core/hugepage.py
new file mode 100644
index 0000000..a783ad9
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/core/hugepage.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import yaml
+from charmhelpers.core import fstab
+from charmhelpers.core import sysctl
+from charmhelpers.core.host import (
+ add_group,
+ add_user_to_group,
+ fstab_mount,
+ mkdir,
+)
+from charmhelpers.core.strutils import bytes_from_string
+from subprocess import check_output
+
+
+def hugepage_support(user, group='hugetlb', nr_hugepages=256,
+ max_map_count=65536, mnt_point='/run/hugepages/kvm',
+ pagesize='2MB', mount=True, set_shmmax=False):
+ """Enable hugepages on system.
+
+ Args:
+ user (str) -- Username to allow access to hugepages to
+ group (str) -- Group name to own hugepages
+ nr_hugepages (int) -- Number of pages to reserve
+ max_map_count (int) -- Number of Virtual Memory Areas a process can own
+ mnt_point (str) -- Directory to mount hugepages on
+ pagesize (str) -- Size of hugepages
+ mount (bool) -- Whether to Mount hugepages
+ """
+ group_info = add_group(group)
+ gid = group_info.gr_gid
+ add_user_to_group(user, group)
+ if max_map_count < 2 * nr_hugepages:
+ max_map_count = 2 * nr_hugepages
+ sysctl_settings = {
+ 'vm.nr_hugepages': nr_hugepages,
+ 'vm.max_map_count': max_map_count,
+ 'vm.hugetlb_shm_group': gid,
+ }
+ if set_shmmax:
+ shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
+ shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
+ if shmmax_minsize > shmmax_current:
+ sysctl_settings['kernel.shmmax'] = shmmax_minsize
+ sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
+ mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
+ lfstab = fstab.Fstab()
+ fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
+ if fstab_entry:
+ lfstab.remove_entry(fstab_entry)
+ entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
+ 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
+ lfstab.add_entry(entry)
+ if mount:
+ fstab_mount(mnt_point)
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/kernel.py b/charms/trusty/cassandra/hooks/charmhelpers/core/kernel.py
new file mode 100644
index 0000000..5dc6495
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/core/kernel.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
+
+from charmhelpers.core.hookenv import (
+ log,
+ INFO
+)
+
+from subprocess import check_call, check_output
+import re
+
+
+def modprobe(module, persist=True):
+ """Load a kernel module and configure for auto-load on reboot."""
+ cmd = ['modprobe', module]
+
+ log('Loading kernel module %s' % module, level=INFO)
+
+ check_call(cmd)
+ if persist:
+ with open('/etc/modules', 'r+') as modules:
+ if module not in modules.read():
+ modules.write(module)
+
+
+def rmmod(module, force=False):
+ """Remove a module from the linux kernel"""
+ cmd = ['rmmod']
+ if force:
+ cmd.append('-f')
+ cmd.append(module)
+ log('Removing kernel module %s' % module, level=INFO)
+ return check_call(cmd)
+
+
+def lsmod():
+ """Shows what kernel modules are currently loaded"""
+ return check_output(['lsmod'],
+ universal_newlines=True)
+
+
+def is_module_loaded(module):
+ """Checks if a kernel module is already loaded"""
+ matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
+ return len(matches) > 0
+
+
+def update_initramfs(version='all'):
+ """Updates an initramfs image"""
+ return check_call(["update-initramfs", "-k", version, "-u"])
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/services/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/core/services/__init__.py
new file mode 100644
index 0000000..0928158
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/core/services/__init__.py
@@ -0,0 +1,18 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from .base import * # NOQA
+from .helpers import * # NOQA
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/services/base.py b/charms/trusty/cassandra/hooks/charmhelpers/core/services/base.py
new file mode 100644
index 0000000..a42660c
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/core/services/base.py
@@ -0,0 +1,353 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import json
+from inspect import getargspec
+from collections import Iterable, OrderedDict
+
+from charmhelpers.core import host
+from charmhelpers.core import hookenv
+
+
+__all__ = ['ServiceManager', 'ManagerCallback',
+ 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
+ 'service_restart', 'service_stop']
+
+
+class ServiceManager(object):
+ def __init__(self, services=None):
+ """
+ Register a list of services, given their definitions.
+
+ Service definitions are dicts in the following formats (all keys except
+ 'service' are optional)::
+
+ {
+ "service": <service name>,
+ "required_data": <list of required data contexts>,
+ "provided_data": <list of provided data contexts>,
+ "data_ready": <one or more callbacks>,
+ "data_lost": <one or more callbacks>,
+ "start": <one or more callbacks>,
+ "stop": <one or more callbacks>,
+ "ports": <list of ports to manage>,
+ }
+
+ The 'required_data' list should contain dicts of required data (or
+ dependency managers that act like dicts and know how to collect the data).
+ Only when all items in the 'required_data' list are populated are the list
+ of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
+ information.
+
+ The 'provided_data' list should contain relation data providers, most likely
+ a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
+ that will indicate a set of data to set on a given relation.
+
+ The 'data_ready' value should be either a single callback, or a list of
+ callbacks, to be called when all items in 'required_data' pass `is_ready()`.
+ Each callback will be called with the service name as the only parameter.
+ After all of the 'data_ready' callbacks are called, the 'start' callbacks
+ are fired.
+
+ The 'data_lost' value should be either a single callback, or a list of
+ callbacks, to be called when a 'required_data' item no longer passes
+ `is_ready()`. Each callback will be called with the service name as the
+ only parameter. After all of the 'data_lost' callbacks are called,
+ the 'stop' callbacks are fired.
+
+ The 'start' value should be either a single callback, or a list of
+ callbacks, to be called when starting the service, after the 'data_ready'
+ callbacks are complete. Each callback will be called with the service
+ name as the only parameter. This defaults to
+ `[host.service_start, services.open_ports]`.
+
+ The 'stop' value should be either a single callback, or a list of
+ callbacks, to be called when stopping the service. If the service is
+ being stopped because it no longer has all of its 'required_data', this
+ will be called after all of the 'data_lost' callbacks are complete.
+ Each callback will be called with the service name as the only parameter.
+ This defaults to `[services.close_ports, host.service_stop]`.
+
+ The 'ports' value should be a list of ports to manage. The default
+ 'start' handler will open the ports after the service is started,
+ and the default 'stop' handler will close the ports prior to stopping
+ the service.
+
+
+ Examples:
+
+ The following registers an Upstart service called bingod that depends on
+ a mongodb relation and which runs a custom `db_migrate` function prior to
+ restarting the service, and a Runit service called spadesd::
+
+ manager = services.ServiceManager([
+ {
+ 'service': 'bingod',
+ 'ports': [80, 443],
+ 'required_data': [MongoRelation(), config(), {'my': 'data'}],
+ 'data_ready': [
+ services.template(source='bingod.conf'),
+ services.template(source='bingod.ini',
+ target='/etc/bingod.ini',
+ owner='bingo', perms=0400),
+ ],
+ },
+ {
+ 'service': 'spadesd',
+ 'data_ready': services.template(source='spadesd_run.j2',
+ target='/etc/sv/spadesd/run',
+ perms=0555),
+ 'start': runit_start,
+ 'stop': runit_stop,
+ },
+ ])
+ manager.manage()
+ """
+ self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
+ self._ready = None
+ self.services = OrderedDict()
+ for service in services or []:
+ service_name = service['service']
+ self.services[service_name] = service
+
+ def manage(self):
+ """
+ Handle the current hook by doing The Right Thing with the registered services.
+ """
+ hookenv._run_atstart()
+ try:
+ hook_name = hookenv.hook_name()
+ if hook_name == 'stop':
+ self.stop_services()
+ else:
+ self.reconfigure_services()
+ self.provide_data()
+ except SystemExit as x:
+ if x.code is None or x.code == 0:
+ hookenv._run_atexit()
+ hookenv._run_atexit()
+
+ def provide_data(self):
+ """
+ Set the relation data for each provider in the ``provided_data`` list.
+
+ A provider must have a `name` attribute, which indicates which relation
+ to set data on, and a `provide_data()` method, which returns a dict of
+ data to set.
+
+ The `provide_data()` method can optionally accept two parameters:
+
+ * ``remote_service`` The name of the remote service that the data will
+ be provided to. The `provide_data()` method will be called once
+ for each connected service (not unit). This allows the method to
+ tailor its data to the given service.
+ * ``service_ready`` Whether or not the service definition had all of
+ its requirements met, and thus the ``data_ready`` callbacks run.
+
+ Note that the ``provided_data`` methods are now called **after** the
+ ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
+ a chance to generate any data necessary for the providing to the remote
+ services.
+ """
+ for service_name, service in self.services.items():
+ service_ready = self.is_ready(service_name)
+ for provider in service.get('provided_data', []):
+ for relid in hookenv.relation_ids(provider.name):
+ units = hookenv.related_units(relid)
+ if not units:
+ continue
+ remote_service = units[0].split('/')[0]
+ argspec = getargspec(provider.provide_data)
+ if len(argspec.args) > 1:
+ data = provider.provide_data(remote_service, service_ready)
+ else:
+ data = provider.provide_data()
+ if data:
+ hookenv.relation_set(relid, data)
+
+ def reconfigure_services(self, *service_names):
+ """
+ Update all files for one or more registered services, and,
+ if ready, optionally restart them.
+
+ If no service names are given, reconfigures all registered services.
+ """
+ for service_name in service_names or self.services.keys():
+ if self.is_ready(service_name):
+ self.fire_event('data_ready', service_name)
+ self.fire_event('start', service_name, default=[
+ service_restart,
+ manage_ports])
+ self.save_ready(service_name)
+ else:
+ if self.was_ready(service_name):
+ self.fire_event('data_lost', service_name)
+ self.fire_event('stop', service_name, default=[
+ manage_ports,
+ service_stop])
+ self.save_lost(service_name)
+
+ def stop_services(self, *service_names):
+ """
+ Stop one or more registered services, by name.
+
+ If no service names are given, stops all registered services.
+ """
+ for service_name in service_names or self.services.keys():
+ self.fire_event('stop', service_name, default=[
+ manage_ports,
+ service_stop])
+
+ def get_service(self, service_name):
+ """
+ Given the name of a registered service, return its service definition.
+ """
+ service = self.services.get(service_name)
+ if not service:
+ raise KeyError('Service not registered: %s' % service_name)
+ return service
+
+ def fire_event(self, event_name, service_name, default=None):
+ """
+ Fire a data_ready, data_lost, start, or stop event on a given service.
+ """
+ service = self.get_service(service_name)
+ callbacks = service.get(event_name, default)
+ if not callbacks:
+ return
+ if not isinstance(callbacks, Iterable):
+ callbacks = [callbacks]
+ for callback in callbacks:
+ if isinstance(callback, ManagerCallback):
+ callback(self, service_name, event_name)
+ else:
+ callback(service_name)
+
+ def is_ready(self, service_name):
+ """
+ Determine if a registered service is ready, by checking its 'required_data'.
+
+ A 'required_data' item can be any mapping type, and is considered ready
+ if `bool(item)` evaluates as True.
+ """
+ service = self.get_service(service_name)
+ reqs = service.get('required_data', [])
+ return all(bool(req) for req in reqs)
+
+ def _load_ready_file(self):
+ if self._ready is not None:
+ return
+ if os.path.exists(self._ready_file):
+ with open(self._ready_file) as fp:
+ self._ready = set(json.load(fp))
+ else:
+ self._ready = set()
+
+ def _save_ready_file(self):
+ if self._ready is None:
+ return
+ with open(self._ready_file, 'w') as fp:
+ json.dump(list(self._ready), fp)
+
+ def save_ready(self, service_name):
+ """
+ Save an indicator that the given service is now data_ready.
+ """
+ self._load_ready_file()
+ self._ready.add(service_name)
+ self._save_ready_file()
+
+ def save_lost(self, service_name):
+ """
+ Save an indicator that the given service is no longer data_ready.
+ """
+ self._load_ready_file()
+ self._ready.discard(service_name)
+ self._save_ready_file()
+
+ def was_ready(self, service_name):
+ """
+ Determine if the given service was previously data_ready.
+ """
+ self._load_ready_file()
+ return service_name in self._ready
+
+
+class ManagerCallback(object):
+ """
+ Special case of a callback that takes the `ServiceManager` instance
+ in addition to the service name.
+
+ Subclasses should implement `__call__` which should accept three parameters:
+
+ * `manager` The `ServiceManager` instance
+ * `service_name` The name of the service it's being triggered for
+ * `event_name` The name of the event that this callback is handling
+ """
+ def __call__(self, manager, service_name, event_name):
+ raise NotImplementedError()
+
+
+class PortManagerCallback(ManagerCallback):
+ """
+ Callback class that will open or close ports, for use as either
+ a start or stop action.
+ """
+ def __call__(self, manager, service_name, event_name):
+ service = manager.get_service(service_name)
+ new_ports = service.get('ports', [])
+ port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
+ if os.path.exists(port_file):
+ with open(port_file) as fp:
+ old_ports = fp.read().split(',')
+ for old_port in old_ports:
+ if bool(old_port):
+ old_port = int(old_port)
+ if old_port not in new_ports:
+ hookenv.close_port(old_port)
+ with open(port_file, 'w') as fp:
+ fp.write(','.join(str(port) for port in new_ports))
+ for port in new_ports:
+ if event_name == 'start':
+ hookenv.open_port(port)
+ elif event_name == 'stop':
+ hookenv.close_port(port)
+
+
+def service_stop(service_name):
+ """
+ Wrapper around host.service_stop to prevent spurious "unknown service"
+ messages in the logs.
+ """
+ if host.service_running(service_name):
+ host.service_stop(service_name)
+
+
+def service_restart(service_name):
+ """
+ Wrapper around host.service_restart to prevent spurious "unknown service"
+ messages in the logs.
+ """
+ if host.service_available(service_name):
+ if host.service_running(service_name):
+ host.service_restart(service_name)
+ else:
+ host.service_start(service_name)
+
+
+# Convenience aliases
+open_ports = close_ports = manage_ports = PortManagerCallback()
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/services/helpers.py b/charms/trusty/cassandra/hooks/charmhelpers/core/services/helpers.py
new file mode 100644
index 0000000..2423704
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/core/services/helpers.py
@@ -0,0 +1,292 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import yaml
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import host
+from charmhelpers.core import templating
+
+from charmhelpers.core.services.base import ManagerCallback
+
+
+__all__ = ['RelationContext', 'TemplateCallback',
+ 'render_template', 'template']
+
+
+class RelationContext(dict):
+ """
+ Base class for a context generator that gets relation data from juju.
+
+ Subclasses must provide the attributes `name`, which is the name of the
+ interface of interest, `interface`, which is the type of the interface of
+ interest, and `required_keys`, which is the set of keys required for the
+ relation to be considered complete. The data for all interfaces matching
+ the `name` attribute that are complete will used to populate the dictionary
+ values (see `get_data`, below).
+
+ The generated context will be namespaced under the relation :attr:`name`,
+ to prevent potential naming conflicts.
+
+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
+ """
+ name = None
+ interface = None
+
+ def __init__(self, name=None, additional_required_keys=None):
+ if not hasattr(self, 'required_keys'):
+ self.required_keys = []
+
+ if name is not None:
+ self.name = name
+ if additional_required_keys:
+ self.required_keys.extend(additional_required_keys)
+ self.get_data()
+
+ def __bool__(self):
+ """
+ Returns True if all of the required_keys are available.
+ """
+ return self.is_ready()
+
+ __nonzero__ = __bool__
+
+ def __repr__(self):
+ return super(RelationContext, self).__repr__()
+
+ def is_ready(self):
+ """
+ Returns True if all of the `required_keys` are available from any units.
+ """
+ ready = len(self.get(self.name, [])) > 0
+ if not ready:
+ hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
+ return ready
+
+ def _is_ready(self, unit_data):
+ """
+ Helper method that tests a set of relation data and returns True if
+ all of the `required_keys` are present.
+ """
+ return set(unit_data.keys()).issuperset(set(self.required_keys))
+
+ def get_data(self):
+ """
+ Retrieve the relation data for each unit involved in a relation and,
+ if complete, store it in a list under `self[self.name]`. This
+ is automatically called when the RelationContext is instantiated.
+
+ The units are sorted lexographically first by the service ID, then by
+ the unit ID. Thus, if an interface has two other services, 'db:1'
+ and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
+ and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
+ set of data, the relation data for the units will be stored in the
+ order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
+
+ If you only care about a single unit on the relation, you can just
+ access it as `{{ interface[0]['key'] }}`. However, if you can at all
+ support multiple units on a relation, you should iterate over the list,
+ like::
+
+ {% for unit in interface -%}
+ {{ unit['key'] }}{% if not loop.last %},{% endif %}
+ {%- endfor %}
+
+ Note that since all sets of relation data from all related services and
+ units are in a single list, if you need to know which service or unit a
+ set of data came from, you'll need to extend this class to preserve
+ that information.
+ """
+ if not hookenv.relation_ids(self.name):
+ return
+
+ ns = self.setdefault(self.name, [])
+ for rid in sorted(hookenv.relation_ids(self.name)):
+ for unit in sorted(hookenv.related_units(rid)):
+ reldata = hookenv.relation_get(rid=rid, unit=unit)
+ if self._is_ready(reldata):
+ ns.append(reldata)
+
+ def provide_data(self):
+ """
+ Return data to be relation_set for this interface.
+ """
+ return {}
+
+
+class MysqlRelation(RelationContext):
+ """
+ Relation context for the `mysql` interface.
+
+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
+ """
+ name = 'db'
+ interface = 'mysql'
+
+ def __init__(self, *args, **kwargs):
+ self.required_keys = ['host', 'user', 'password', 'database']
+ RelationContext.__init__(self, *args, **kwargs)
+
+
+class HttpRelation(RelationContext):
+ """
+ Relation context for the `http` interface.
+
+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
+ """
+ name = 'website'
+ interface = 'http'
+
+ def __init__(self, *args, **kwargs):
+ self.required_keys = ['host', 'port']
+ RelationContext.__init__(self, *args, **kwargs)
+
+ def provide_data(self):
+ return {
+ 'host': hookenv.unit_get('private-address'),
+ 'port': 80,
+ }
+
+
+class RequiredConfig(dict):
+ """
+ Data context that loads config options with one or more mandatory options.
+
+ Once the required options have been changed from their default values, all
+ config options will be available, namespaced under `config` to prevent
+ potential naming conflicts (for example, between a config option and a
+ relation property).
+
+ :param list *args: List of options that must be changed from their default values.
+ """
+
+ def __init__(self, *args):
+ self.required_options = args
+ self['config'] = hookenv.config()
+ with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
+ self.config = yaml.load(fp).get('options', {})
+
+ def __bool__(self):
+ for option in self.required_options:
+ if option not in self['config']:
+ return False
+ current_value = self['config'][option]
+ default_value = self.config[option].get('default')
+ if current_value == default_value:
+ return False
+ if current_value in (None, '') and default_value in (None, ''):
+ return False
+ return True
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+
+class StoredContext(dict):
+ """
+ A data context that always returns the data that it was first created with.
+
+ This is useful to do a one-time generation of things like passwords, that
+ will thereafter use the same value that was originally generated, instead
+ of generating a new value each time it is run.
+ """
+ def __init__(self, file_name, config_data):
+ """
+ If the file exists, populate `self` with the data from the file.
+ Otherwise, populate with the given data and persist it to the file.
+ """
+ if os.path.exists(file_name):
+ self.update(self.read_context(file_name))
+ else:
+ self.store_context(file_name, config_data)
+ self.update(config_data)
+
+ def store_context(self, file_name, config_data):
+ if not os.path.isabs(file_name):
+ file_name = os.path.join(hookenv.charm_dir(), file_name)
+ with open(file_name, 'w') as file_stream:
+ os.fchmod(file_stream.fileno(), 0o600)
+ yaml.dump(config_data, file_stream)
+
+ def read_context(self, file_name):
+ if not os.path.isabs(file_name):
+ file_name = os.path.join(hookenv.charm_dir(), file_name)
+ with open(file_name, 'r') as file_stream:
+ data = yaml.load(file_stream)
+ if not data:
+ raise OSError("%s is empty" % file_name)
+ return data
+
+
+class TemplateCallback(ManagerCallback):
+ """
+ Callback class that will render a Jinja2 template, for use as a ready
+ action.
+
+ :param str source: The template source file, relative to
+ `$CHARM_DIR/templates`
+
+ :param str target: The target to write the rendered template to (or None)
+ :param str owner: The owner of the rendered file
+ :param str group: The group of the rendered file
+ :param int perms: The permissions of the rendered file
+ :param partial on_change_action: functools partial to be executed when
+ rendered file changes
+ :param jinja2 loader template_loader: A jinja2 template loader
+
+ :return str: The rendered template
+ """
+ def __init__(self, source, target,
+ owner='root', group='root', perms=0o444,
+ on_change_action=None, template_loader=None):
+ self.source = source
+ self.target = target
+ self.owner = owner
+ self.group = group
+ self.perms = perms
+ self.on_change_action = on_change_action
+ self.template_loader = template_loader
+
+ def __call__(self, manager, service_name, event_name):
+ pre_checksum = ''
+ if self.on_change_action and os.path.isfile(self.target):
+ pre_checksum = host.file_hash(self.target)
+ service = manager.get_service(service_name)
+ context = {'ctx': {}}
+ for ctx in service.get('required_data', []):
+ context.update(ctx)
+ context['ctx'].update(ctx)
+
+ result = templating.render(self.source, self.target, context,
+ self.owner, self.group, self.perms,
+ template_loader=self.template_loader)
+ if self.on_change_action:
+ if pre_checksum == host.file_hash(self.target):
+ hookenv.log(
+ 'No change detected: {}'.format(self.target),
+ hookenv.DEBUG)
+ else:
+ self.on_change_action()
+
+ return result
+
+
+# Convenience aliases for templates
+render_template = template = TemplateCallback
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/strutils.py b/charms/trusty/cassandra/hooks/charmhelpers/core/strutils.py
new file mode 100644
index 0000000..7e3f969
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/core/strutils.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import six
+import re
+
+
+def bool_from_string(value):
+ """Interpret string value as boolean.
+
+ Returns True if value translates to True otherwise False.
+ """
+ if isinstance(value, six.string_types):
+ value = six.text_type(value)
+ else:
+ msg = "Unable to interpret non-string value '%s' as boolean" % (value)
+ raise ValueError(msg)
+
+ value = value.strip().lower()
+
+ if value in ['y', 'yes', 'true', 't', 'on']:
+ return True
+ elif value in ['n', 'no', 'false', 'f', 'off']:
+ return False
+
+ msg = "Unable to interpret string value '%s' as boolean" % (value)
+ raise ValueError(msg)
+
+
+def bytes_from_string(value):
+ """Interpret human readable string value as bytes.
+
+ Returns int
+ """
+ BYTE_POWER = {
+ 'K': 1,
+ 'KB': 1,
+ 'M': 2,
+ 'MB': 2,
+ 'G': 3,
+ 'GB': 3,
+ 'T': 4,
+ 'TB': 4,
+ 'P': 5,
+ 'PB': 5,
+ }
+ if isinstance(value, six.string_types):
+ value = six.text_type(value)
+ else:
+ msg = "Unable to interpret non-string value '%s' as boolean" % (value)
+ raise ValueError(msg)
+ matches = re.match("([0-9]+)([a-zA-Z]+)", value)
+ if not matches:
+ msg = "Unable to interpret string value '%s' as bytes" % (value)
+ raise ValueError(msg)
+ return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/sysctl.py b/charms/trusty/cassandra/hooks/charmhelpers/core/sysctl.py
new file mode 100644
index 0000000..21cc8ab
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/core/sysctl.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import yaml
+
+from subprocess import check_call
+
+from charmhelpers.core.hookenv import (
+ log,
+ DEBUG,
+ ERROR,
+)
+
+__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
+
+
+def create(sysctl_dict, sysctl_file):
+ """Creates a sysctl.conf file from a YAML associative array
+
+ :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
+ :type sysctl_dict: str
+ :param sysctl_file: path to the sysctl file to be saved
+ :type sysctl_file: str or unicode
+ :returns: None
+ """
+ try:
+ sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
+ except yaml.YAMLError:
+ log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
+ level=ERROR)
+ return
+
+ with open(sysctl_file, "w") as fd:
+ for key, value in sysctl_dict_parsed.items():
+ fd.write("{}={}\n".format(key, value))
+
+ log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
+ level=DEBUG)
+
+ check_call(["sysctl", "-p", sysctl_file])
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/templating.py b/charms/trusty/cassandra/hooks/charmhelpers/core/templating.py
new file mode 100644
index 0000000..d2d8eaf
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/core/templating.py
@@ -0,0 +1,81 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from charmhelpers.core import host
+from charmhelpers.core import hookenv
+
+
+def render(source, target, context, owner='root', group='root',
+ perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
+ """
+ Render a template.
+
+ The `source` path, if not absolute, is relative to the `templates_dir`.
+
+ The `target` path should be absolute. It can also be `None`, in which
+ case no file will be written.
+
+ The context should be a dict containing the values to be replaced in the
+ template.
+
+ The `owner`, `group`, and `perms` options will be passed to `write_file`.
+
+ If omitted, `templates_dir` defaults to the `templates` folder in the charm.
+
+ The rendered template will be written to the file as well as being returned
+ as a string.
+
+ Note: Using this requires python-jinja2; if it is not installed, calling
+ this will attempt to use charmhelpers.fetch.apt_install to install it.
+ """
+ try:
+ from jinja2 import FileSystemLoader, Environment, exceptions
+ except ImportError:
+ try:
+ from charmhelpers.fetch import apt_install
+ except ImportError:
+ hookenv.log('Could not import jinja2, and could not import '
+ 'charmhelpers.fetch to install it',
+ level=hookenv.ERROR)
+ raise
+ apt_install('python-jinja2', fatal=True)
+ from jinja2 import FileSystemLoader, Environment, exceptions
+
+ if template_loader:
+ template_env = Environment(loader=template_loader)
+ else:
+ if templates_dir is None:
+ templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
+ template_env = Environment(loader=FileSystemLoader(templates_dir))
+ try:
+ source = source
+ template = template_env.get_template(source)
+ except exceptions.TemplateNotFound as e:
+ hookenv.log('Could not load template %s from %s.' %
+ (source, templates_dir),
+ level=hookenv.ERROR)
+ raise e
+ content = template.render(context)
+ if target is not None:
+ target_dir = os.path.dirname(target)
+ if not os.path.exists(target_dir):
+ # This is a terrible default directory permission, as the file
+ # or its siblings will often contain secrets.
+ host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
+ host.write_file(target, content.encode(encoding), owner, group, perms)
+ return content
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/unitdata.py b/charms/trusty/cassandra/hooks/charmhelpers/core/unitdata.py
new file mode 100644
index 0000000..338104e
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/core/unitdata.py
@@ -0,0 +1,521 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+#
+#
+# Authors:
+# Kapil Thangavelu <kapil.foss@gmail.com>
+#
+"""
+Intro
+-----
+
+A simple way to store state in units. This provides a key value
+storage with support for versioned, transactional operation,
+and can calculate deltas from previous values to simplify unit logic
+when processing changes.
+
+
+Hook Integration
+----------------
+
+There are several extant frameworks for hook execution, including
+
+ - charmhelpers.core.hookenv.Hooks
+ - charmhelpers.core.services.ServiceManager
+
+The storage classes are framework agnostic, one simple integration is
+via the HookData contextmanager. It will record the current hook
+execution environment (including relation data, config data, etc.),
+setup a transaction and allow easy access to the changes from
+previously seen values. One consequence of the integration is the
+reservation of particular keys ('rels', 'unit', 'env', 'config',
+'charm_revisions') for their respective values.
+
+Here's a fully worked integration example using hookenv.Hooks::
+
+ from charmhelper.core import hookenv, unitdata
+
+ hook_data = unitdata.HookData()
+ db = unitdata.kv()
+ hooks = hookenv.Hooks()
+
+ @hooks.hook
+ def config_changed():
+ # Print all changes to configuration from previously seen
+ # values.
+ for changed, (prev, cur) in hook_data.conf.items():
+ print('config changed', changed,
+ 'previous value', prev,
+ 'current value', cur)
+
+ # Get some unit specific bookeeping
+ if not db.get('pkg_key'):
+ key = urllib.urlopen('https://example.com/pkg_key').read()
+ db.set('pkg_key', key)
+
+ # Directly access all charm config as a mapping.
+ conf = db.getrange('config', True)
+
+ # Directly access all relation data as a mapping
+ rels = db.getrange('rels', True)
+
+ if __name__ == '__main__':
+ with hook_data():
+ hook.execute()
+
+
+A more basic integration is via the hook_scope context manager which simply
+manages transaction scope (and records hook name, and timestamp)::
+
+ >>> from unitdata import kv
+ >>> db = kv()
+ >>> with db.hook_scope('install'):
+ ... # do work, in transactional scope.
+ ... db.set('x', 1)
+ >>> db.get('x')
+ 1
+
+
+Usage
+-----
+
+Values are automatically json de/serialized to preserve basic typing
+and complex data struct capabilities (dicts, lists, ints, booleans, etc).
+
+Individual values can be manipulated via get/set::
+
+ >>> kv.set('y', True)
+ >>> kv.get('y')
+ True
+
+ # We can set complex values (dicts, lists) as a single key.
+ >>> kv.set('config', {'a': 1, 'b': True'})
+
+ # Also supports returning dictionaries as a record which
+ # provides attribute access.
+ >>> config = kv.get('config', record=True)
+ >>> config.b
+ True
+
+
+Groups of keys can be manipulated with update/getrange::
+
+ >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
+ >>> kv.getrange('gui.', strip=True)
+ {'z': 1, 'y': 2}
+
+When updating values, its very helpful to understand which values
+have actually changed and how have they changed. The storage
+provides a delta method to provide for this::
+
+ >>> data = {'debug': True, 'option': 2}
+ >>> delta = kv.delta(data, 'config.')
+ >>> delta.debug.previous
+ None
+ >>> delta.debug.current
+ True
+ >>> delta
+ {'debug': (None, True), 'option': (None, 2)}
+
+Note the delta method does not persist the actual change, it needs to
+be explicitly saved via 'update' method::
+
+ >>> kv.update(data, 'config.')
+
+Values modified in the context of a hook scope retain historical values
+associated to the hookname.
+
+ >>> with db.hook_scope('config-changed'):
+ ... db.set('x', 42)
+ >>> db.gethistory('x')
+ [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
+ (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
+
+"""
+
+import collections
+import contextlib
+import datetime
+import itertools
+import json
+import os
+import pprint
+import sqlite3
+import sys
+
+__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
+
+
+class Storage(object):
+ """Simple key value database for local unit state within charms.
+
+ Modifications are not persisted unless :meth:`flush` is called.
+
+ To support dicts, lists, integer, floats, and booleans values
+ are automatically json encoded/decoded.
+ """
+ def __init__(self, path=None):
+ self.db_path = path
+ if path is None:
+ if 'UNIT_STATE_DB' in os.environ:
+ self.db_path = os.environ['UNIT_STATE_DB']
+ else:
+ self.db_path = os.path.join(
+ os.environ.get('CHARM_DIR', ''), '.unit-state.db')
+ self.conn = sqlite3.connect('%s' % self.db_path)
+ self.cursor = self.conn.cursor()
+ self.revision = None
+ self._closed = False
+ self._init()
+
+ def close(self):
+ if self._closed:
+ return
+ self.flush(False)
+ self.cursor.close()
+ self.conn.close()
+ self._closed = True
+
+ def get(self, key, default=None, record=False):
+ self.cursor.execute('select data from kv where key=?', [key])
+ result = self.cursor.fetchone()
+ if not result:
+ return default
+ if record:
+ return Record(json.loads(result[0]))
+ return json.loads(result[0])
+
+ def getrange(self, key_prefix, strip=False):
+ """
+ Get a range of keys starting with a common prefix as a mapping of
+ keys to values.
+
+ :param str key_prefix: Common prefix among all keys
+ :param bool strip: Optionally strip the common prefix from the key
+ names in the returned dict
+ :return dict: A (possibly empty) dict of key-value mappings
+ """
+ self.cursor.execute("select key, data from kv where key like ?",
+ ['%s%%' % key_prefix])
+ result = self.cursor.fetchall()
+
+ if not result:
+ return {}
+ if not strip:
+ key_prefix = ''
+ return dict([
+ (k[len(key_prefix):], json.loads(v)) for k, v in result])
+
+ def update(self, mapping, prefix=""):
+ """
+ Set the values of multiple keys at once.
+
+ :param dict mapping: Mapping of keys to values
+ :param str prefix: Optional prefix to apply to all keys in `mapping`
+ before setting
+ """
+ for k, v in mapping.items():
+ self.set("%s%s" % (prefix, k), v)
+
+ def unset(self, key):
+ """
+ Remove a key from the database entirely.
+ """
+ self.cursor.execute('delete from kv where key=?', [key])
+ if self.revision and self.cursor.rowcount:
+ self.cursor.execute(
+ 'insert into kv_revisions values (?, ?, ?)',
+ [key, self.revision, json.dumps('DELETED')])
+
+ def unsetrange(self, keys=None, prefix=""):
+ """
+ Remove a range of keys starting with a common prefix, from the database
+ entirely.
+
+ :param list keys: List of keys to remove.
+ :param str prefix: Optional prefix to apply to all keys in ``keys``
+ before removing.
+ """
+ if keys is not None:
+ keys = ['%s%s' % (prefix, key) for key in keys]
+ self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
+ if self.revision and self.cursor.rowcount:
+ self.cursor.execute(
+ 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
+ list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
+ else:
+ self.cursor.execute('delete from kv where key like ?',
+ ['%s%%' % prefix])
+ if self.revision and self.cursor.rowcount:
+ self.cursor.execute(
+ 'insert into kv_revisions values (?, ?, ?)',
+ ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
+
+ def set(self, key, value):
+ """
+ Set a value in the database.
+
+ :param str key: Key to set the value for
+ :param value: Any JSON-serializable value to be set
+ """
+ serialized = json.dumps(value)
+
+ self.cursor.execute('select data from kv where key=?', [key])
+ exists = self.cursor.fetchone()
+
+ # Skip mutations to the same value
+ if exists:
+ if exists[0] == serialized:
+ return value
+
+ if not exists:
+ self.cursor.execute(
+ 'insert into kv (key, data) values (?, ?)',
+ (key, serialized))
+ else:
+ self.cursor.execute('''
+ update kv
+ set data = ?
+ where key = ?''', [serialized, key])
+
+ # Save
+ if not self.revision:
+ return value
+
+ self.cursor.execute(
+ 'select 1 from kv_revisions where key=? and revision=?',
+ [key, self.revision])
+ exists = self.cursor.fetchone()
+
+ if not exists:
+ self.cursor.execute(
+ '''insert into kv_revisions (
+ revision, key, data) values (?, ?, ?)''',
+ (self.revision, key, serialized))
+ else:
+ self.cursor.execute(
+ '''
+ update kv_revisions
+ set data = ?
+ where key = ?
+ and revision = ?''',
+ [serialized, key, self.revision])
+
+ return value
+
+ def delta(self, mapping, prefix):
+ """
+ return a delta containing values that have changed.
+ """
+ previous = self.getrange(prefix, strip=True)
+ if not previous:
+ pk = set()
+ else:
+ pk = set(previous.keys())
+ ck = set(mapping.keys())
+ delta = DeltaSet()
+
+ # added
+ for k in ck.difference(pk):
+ delta[k] = Delta(None, mapping[k])
+
+ # removed
+ for k in pk.difference(ck):
+ delta[k] = Delta(previous[k], None)
+
+ # changed
+ for k in pk.intersection(ck):
+ c = mapping[k]
+ p = previous[k]
+ if c != p:
+ delta[k] = Delta(p, c)
+
+ return delta
+
+ @contextlib.contextmanager
+ def hook_scope(self, name=""):
+ """Scope all future interactions to the current hook execution
+ revision."""
+ assert not self.revision
+ self.cursor.execute(
+ 'insert into hooks (hook, date) values (?, ?)',
+ (name or sys.argv[0],
+ datetime.datetime.utcnow().isoformat()))
+ self.revision = self.cursor.lastrowid
+ try:
+ yield self.revision
+ self.revision = None
+ except:
+ self.flush(False)
+ self.revision = None
+ raise
+ else:
+ self.flush()
+
+ def flush(self, save=True):
+ if save:
+ self.conn.commit()
+ elif self._closed:
+ return
+ else:
+ self.conn.rollback()
+
+ def _init(self):
+ self.cursor.execute('''
+ create table if not exists kv (
+ key text,
+ data text,
+ primary key (key)
+ )''')
+ self.cursor.execute('''
+ create table if not exists kv_revisions (
+ key text,
+ revision integer,
+ data text,
+ primary key (key, revision)
+ )''')
+ self.cursor.execute('''
+ create table if not exists hooks (
+ version integer primary key autoincrement,
+ hook text,
+ date text
+ )''')
+ self.conn.commit()
+
+ def gethistory(self, key, deserialize=False):
+ self.cursor.execute(
+ '''
+ select kv.revision, kv.key, kv.data, h.hook, h.date
+ from kv_revisions kv,
+ hooks h
+ where kv.key=?
+ and kv.revision = h.version
+ ''', [key])
+ if deserialize is False:
+ return self.cursor.fetchall()
+ return map(_parse_history, self.cursor.fetchall())
+
+ def debug(self, fh=sys.stderr):
+ self.cursor.execute('select * from kv')
+ pprint.pprint(self.cursor.fetchall(), stream=fh)
+ self.cursor.execute('select * from kv_revisions')
+ pprint.pprint(self.cursor.fetchall(), stream=fh)
+
+
+def _parse_history(d):
+ return (d[0], d[1], json.loads(d[2]), d[3],
+ datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
+
+
+class HookData(object):
+ """Simple integration for existing hook exec frameworks.
+
+ Records all unit information, and stores deltas for processing
+ by the hook.
+
+ Sample::
+
+ from charmhelper.core import hookenv, unitdata
+
+ changes = unitdata.HookData()
+ db = unitdata.kv()
+ hooks = hookenv.Hooks()
+
+ @hooks.hook
+ def config_changed():
+ # View all changes to configuration
+ for changed, (prev, cur) in changes.conf.items():
+ print('config changed', changed,
+ 'previous value', prev,
+ 'current value', cur)
+
+ # Get some unit specific bookeeping
+ if not db.get('pkg_key'):
+ key = urllib.urlopen('https://example.com/pkg_key').read()
+ db.set('pkg_key', key)
+
+ if __name__ == '__main__':
+ with changes():
+ hook.execute()
+
+ """
+ def __init__(self):
+ self.kv = kv()
+ self.conf = None
+ self.rels = None
+
+ @contextlib.contextmanager
+ def __call__(self):
+ from charmhelpers.core import hookenv
+ hook_name = hookenv.hook_name()
+
+ with self.kv.hook_scope(hook_name):
+ self._record_charm_version(hookenv.charm_dir())
+ delta_config, delta_relation = self._record_hook(hookenv)
+ yield self.kv, delta_config, delta_relation
+
+ def _record_charm_version(self, charm_dir):
+ # Record revisions.. charm revisions are meaningless
+ # to charm authors as they don't control the revision.
+ # so logic dependnent on revision is not particularly
+ # useful, however it is useful for debugging analysis.
+ charm_rev = open(
+ os.path.join(charm_dir, 'revision')).read().strip()
+ charm_rev = charm_rev or '0'
+ revs = self.kv.get('charm_revisions', [])
+ if charm_rev not in revs:
+ revs.append(charm_rev.strip() or '0')
+ self.kv.set('charm_revisions', revs)
+
+ def _record_hook(self, hookenv):
+ data = hookenv.execution_environment()
+ self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
+ self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
+ self.kv.set('env', dict(data['env']))
+ self.kv.set('unit', data['unit'])
+ self.kv.set('relid', data.get('relid'))
+ return conf_delta, rels_delta
+
+
+class Record(dict):
+
+ __slots__ = ()
+
+ def __getattr__(self, k):
+ if k in self:
+ return self[k]
+ raise AttributeError(k)
+
+
+class DeltaSet(Record):
+
+ __slots__ = ()
+
+
+Delta = collections.namedtuple('Delta', ['previous', 'current'])
+
+
+_KV = None
+
+
+def kv():
+ global _KV
+ if _KV is None:
+ _KV = Storage()
+ return _KV
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/fetch/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/fetch/__init__.py
new file mode 100644
index 0000000..017a696
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/fetch/__init__.py
@@ -0,0 +1,468 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import importlib
+from tempfile import NamedTemporaryFile
+import time
+from yaml import safe_load
+from charmhelpers.core.host import (
+ lsb_release
+)
+import subprocess
+from charmhelpers.core.hookenv import (
+ config,
+ log,
+)
+import os
+
+import six
+if six.PY3:
+ from urllib.parse import urlparse, urlunparse
+else:
+ from urlparse import urlparse, urlunparse
+
+
+CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
+deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
+"""
+PROPOSED_POCKET = """# Proposed
+deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
+"""
+CLOUD_ARCHIVE_POCKETS = {
+ # Folsom
+ 'folsom': 'precise-updates/folsom',
+ 'precise-folsom': 'precise-updates/folsom',
+ 'precise-folsom/updates': 'precise-updates/folsom',
+ 'precise-updates/folsom': 'precise-updates/folsom',
+ 'folsom/proposed': 'precise-proposed/folsom',
+ 'precise-folsom/proposed': 'precise-proposed/folsom',
+ 'precise-proposed/folsom': 'precise-proposed/folsom',
+ # Grizzly
+ 'grizzly': 'precise-updates/grizzly',
+ 'precise-grizzly': 'precise-updates/grizzly',
+ 'precise-grizzly/updates': 'precise-updates/grizzly',
+ 'precise-updates/grizzly': 'precise-updates/grizzly',
+ 'grizzly/proposed': 'precise-proposed/grizzly',
+ 'precise-grizzly/proposed': 'precise-proposed/grizzly',
+ 'precise-proposed/grizzly': 'precise-proposed/grizzly',
+ # Havana
+ 'havana': 'precise-updates/havana',
+ 'precise-havana': 'precise-updates/havana',
+ 'precise-havana/updates': 'precise-updates/havana',
+ 'precise-updates/havana': 'precise-updates/havana',
+ 'havana/proposed': 'precise-proposed/havana',
+ 'precise-havana/proposed': 'precise-proposed/havana',
+ 'precise-proposed/havana': 'precise-proposed/havana',
+ # Icehouse
+ 'icehouse': 'precise-updates/icehouse',
+ 'precise-icehouse': 'precise-updates/icehouse',
+ 'precise-icehouse/updates': 'precise-updates/icehouse',
+ 'precise-updates/icehouse': 'precise-updates/icehouse',
+ 'icehouse/proposed': 'precise-proposed/icehouse',
+ 'precise-icehouse/proposed': 'precise-proposed/icehouse',
+ 'precise-proposed/icehouse': 'precise-proposed/icehouse',
+ # Juno
+ 'juno': 'trusty-updates/juno',
+ 'trusty-juno': 'trusty-updates/juno',
+ 'trusty-juno/updates': 'trusty-updates/juno',
+ 'trusty-updates/juno': 'trusty-updates/juno',
+ 'juno/proposed': 'trusty-proposed/juno',
+ 'trusty-juno/proposed': 'trusty-proposed/juno',
+ 'trusty-proposed/juno': 'trusty-proposed/juno',
+ # Kilo
+ 'kilo': 'trusty-updates/kilo',
+ 'trusty-kilo': 'trusty-updates/kilo',
+ 'trusty-kilo/updates': 'trusty-updates/kilo',
+ 'trusty-updates/kilo': 'trusty-updates/kilo',
+ 'kilo/proposed': 'trusty-proposed/kilo',
+ 'trusty-kilo/proposed': 'trusty-proposed/kilo',
+ 'trusty-proposed/kilo': 'trusty-proposed/kilo',
+ # Liberty
+ 'liberty': 'trusty-updates/liberty',
+ 'trusty-liberty': 'trusty-updates/liberty',
+ 'trusty-liberty/updates': 'trusty-updates/liberty',
+ 'trusty-updates/liberty': 'trusty-updates/liberty',
+ 'liberty/proposed': 'trusty-proposed/liberty',
+ 'trusty-liberty/proposed': 'trusty-proposed/liberty',
+ 'trusty-proposed/liberty': 'trusty-proposed/liberty',
+ # Mitaka
+ 'mitaka': 'trusty-updates/mitaka',
+ 'trusty-mitaka': 'trusty-updates/mitaka',
+ 'trusty-mitaka/updates': 'trusty-updates/mitaka',
+ 'trusty-updates/mitaka': 'trusty-updates/mitaka',
+ 'mitaka/proposed': 'trusty-proposed/mitaka',
+ 'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
+ 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
+}
+
+# The order of this list is very important. Handlers should be listed in from
+# least- to most-specific URL matching.
+FETCH_HANDLERS = (
+ 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
+ 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
+ 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
+)
+
+APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
+APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
+APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
+
+
+class SourceConfigError(Exception):
+ pass
+
+
+class UnhandledSource(Exception):
+ pass
+
+
+class AptLockError(Exception):
+ pass
+
+
+class BaseFetchHandler(object):
+
+ """Base class for FetchHandler implementations in fetch plugins"""
+
+ def can_handle(self, source):
+ """Returns True if the source can be handled. Otherwise returns
+ a string explaining why it cannot"""
+ return "Wrong source type"
+
+ def install(self, source):
+ """Try to download and unpack the source. Return the path to the
+ unpacked files or raise UnhandledSource."""
+ raise UnhandledSource("Wrong source type {}".format(source))
+
+ def parse_url(self, url):
+ return urlparse(url)
+
+ def base_url(self, url):
+ """Return url without querystring or fragment"""
+ parts = list(self.parse_url(url))
+ parts[4:] = ['' for i in parts[4:]]
+ return urlunparse(parts)
+
+
+def filter_installed_packages(packages):
+ """Returns a list of packages that require installation"""
+ cache = apt_cache()
+ _pkgs = []
+ for package in packages:
+ try:
+ p = cache[package]
+ p.current_ver or _pkgs.append(package)
+ except KeyError:
+ log('Package {} has no installation candidate.'.format(package),
+ level='WARNING')
+ _pkgs.append(package)
+ return _pkgs
+
+
+def apt_cache(in_memory=True):
+ """Build and return an apt cache"""
+ from apt import apt_pkg
+ apt_pkg.init()
+ if in_memory:
+ apt_pkg.config.set("Dir::Cache::pkgcache", "")
+ apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
+ return apt_pkg.Cache()
+
+
+def apt_install(packages, options=None, fatal=False):
+ """Install one or more packages"""
+ if options is None:
+ options = ['--option=Dpkg::Options::=--force-confold']
+
+ cmd = ['apt-get', '--assume-yes']
+ cmd.extend(options)
+ cmd.append('install')
+ if isinstance(packages, six.string_types):
+ cmd.append(packages)
+ else:
+ cmd.extend(packages)
+ log("Installing {} with options: {}".format(packages,
+ options))
+ _run_apt_command(cmd, fatal)
+
+
+def apt_upgrade(options=None, fatal=False, dist=False):
+ """Upgrade all packages"""
+ if options is None:
+ options = ['--option=Dpkg::Options::=--force-confold']
+
+ cmd = ['apt-get', '--assume-yes']
+ cmd.extend(options)
+ if dist:
+ cmd.append('dist-upgrade')
+ else:
+ cmd.append('upgrade')
+ log("Upgrading with options: {}".format(options))
+ _run_apt_command(cmd, fatal)
+
+
+def apt_update(fatal=False):
+ """Update local apt cache"""
+ cmd = ['apt-get', 'update']
+ _run_apt_command(cmd, fatal)
+
+
+def apt_purge(packages, fatal=False):
+ """Purge one or more packages"""
+ cmd = ['apt-get', '--assume-yes', 'purge']
+ if isinstance(packages, six.string_types):
+ cmd.append(packages)
+ else:
+ cmd.extend(packages)
+ log("Purging {}".format(packages))
+ _run_apt_command(cmd, fatal)
+
+
+def apt_mark(packages, mark, fatal=False):
+ """Flag one or more packages using apt-mark"""
+ log("Marking {} as {}".format(packages, mark))
+ cmd = ['apt-mark', mark]
+ if isinstance(packages, six.string_types):
+ cmd.append(packages)
+ else:
+ cmd.extend(packages)
+
+ if fatal:
+ subprocess.check_call(cmd, universal_newlines=True)
+ else:
+ subprocess.call(cmd, universal_newlines=True)
+
+
+def apt_hold(packages, fatal=False):
+ return apt_mark(packages, 'hold', fatal=fatal)
+
+
+def apt_unhold(packages, fatal=False):
+ return apt_mark(packages, 'unhold', fatal=fatal)
+
+
+def add_source(source, key=None):
+ """Add a package source to this system.
+
+ @param source: a URL or sources.list entry, as supported by
+ add-apt-repository(1). Examples::
+
+ ppa:charmers/example
+ deb https://stub:key@private.example.com/ubuntu trusty main
+
+ In addition:
+ 'proposed:' may be used to enable the standard 'proposed'
+ pocket for the release.
+ 'cloud:' may be used to activate official cloud archive pockets,
+ such as 'cloud:icehouse'
+ 'distro' may be used as a noop
+
+ @param key: A key to be added to the system's APT keyring and used
+ to verify the signatures on packages. Ideally, this should be an
+ ASCII format GPG public key including the block headers. A GPG key
+ id may also be used, but be aware that only insecure protocols are
+ available to retrieve the actual public key from a public keyserver
+ placing your Juju environment at risk. ppa and cloud archive keys
+ are securely added automtically, so sould not be provided.
+ """
+ if source is None:
+ log('Source is not present. Skipping')
+ return
+
+ if (source.startswith('ppa:') or
+ source.startswith('http') or
+ source.startswith('deb ') or
+ source.startswith('cloud-archive:')):
+ subprocess.check_call(['add-apt-repository', '--yes', source])
+ elif source.startswith('cloud:'):
+ apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
+ fatal=True)
+ pocket = source.split(':')[-1]
+ if pocket not in CLOUD_ARCHIVE_POCKETS:
+ raise SourceConfigError(
+ 'Unsupported cloud: source option %s' %
+ pocket)
+ actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
+ with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
+ apt.write(CLOUD_ARCHIVE.format(actual_pocket))
+ elif source == 'proposed':
+ release = lsb_release()['DISTRIB_CODENAME']
+ with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
+ apt.write(PROPOSED_POCKET.format(release))
+ elif source == 'distro':
+ pass
+ else:
+ log("Unknown source: {!r}".format(source))
+
+ if key:
+ if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
+ with NamedTemporaryFile('w+') as key_file:
+ key_file.write(key)
+ key_file.flush()
+ key_file.seek(0)
+ subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
+ elif 'http://' in key:
+ with NamedTemporaryFile('w+') as key_file:
+ subprocess.check_call(['wget', key, '-O-'], stdout=key_file)
+ subprocess.check_call(['apt-key', 'add', key_file.name])
+ else:
+ # Note that hkp: is in no way a secure protocol. Using a
+ # GPG key id is pointless from a security POV unless you
+ # absolutely trust your network and DNS.
+ subprocess.check_call(['apt-key', 'adv', '--keyserver',
+ 'hkp://keyserver.ubuntu.com:80', '--recv',
+ key])
+
+
+def configure_sources(update=False,
+ sources_var='install_sources',
+ keys_var='install_keys'):
+ """
+ Configure multiple sources from charm configuration.
+
+ The lists are encoded as yaml fragments in the configuration.
+ The frament needs to be included as a string. Sources and their
+ corresponding keys are of the types supported by add_source().
+
+ Example config:
+ install_sources: |
+ - "ppa:foo"
+ - "http://example.com/repo precise main"
+ install_keys: |
+ - null
+ - "a1b2c3d4"
+
+ Note that 'null' (a.k.a. None) should not be quoted.
+ """
+ sources = safe_load((config(sources_var) or '').strip()) or []
+ keys = safe_load((config(keys_var) or '').strip()) or None
+
+ if isinstance(sources, six.string_types):
+ sources = [sources]
+
+ if keys is None:
+ for source in sources:
+ add_source(source, None)
+ else:
+ if isinstance(keys, six.string_types):
+ keys = [keys]
+
+ if len(sources) != len(keys):
+ raise SourceConfigError(
+ 'Install sources and keys lists are different lengths')
+ for source, key in zip(sources, keys):
+ add_source(source, key)
+ if update:
+ apt_update(fatal=True)
+
+
+def install_remote(source, *args, **kwargs):
+ """
+ Install a file tree from a remote source
+
+ The specified source should be a url of the form:
+ scheme://[host]/path[#[option=value][&...]]
+
+ Schemes supported are based on this modules submodules.
+ Options supported are submodule-specific.
+ Additional arguments are passed through to the submodule.
+
+ For example::
+
+ dest = install_remote('http://example.com/archive.tgz',
+ checksum='deadbeef',
+ hash_type='sha1')
+
+ This will download `archive.tgz`, validate it using SHA1 and, if
+ the file is ok, extract it and return the directory in which it
+ was extracted. If the checksum fails, it will raise
+ :class:`charmhelpers.core.host.ChecksumError`.
+ """
+ # We ONLY check for True here because can_handle may return a string
+ # explaining why it can't handle a given source.
+ handlers = [h for h in plugins() if h.can_handle(source) is True]
+ installed_to = None
+ for handler in handlers:
+ try:
+ installed_to = handler.install(source, *args, **kwargs)
+ except UnhandledSource as e:
+ log('Install source attempt unsuccessful: {}'.format(e),
+ level='WARNING')
+ if not installed_to:
+ raise UnhandledSource("No handler found for source {}".format(source))
+ return installed_to
+
+
+def install_from_config(config_var_name):
+ charm_config = config()
+ source = charm_config[config_var_name]
+ return install_remote(source)
+
+
+def plugins(fetch_handlers=None):
+ if not fetch_handlers:
+ fetch_handlers = FETCH_HANDLERS
+ plugin_list = []
+ for handler_name in fetch_handlers:
+ package, classname = handler_name.rsplit('.', 1)
+ try:
+ handler_class = getattr(
+ importlib.import_module(package),
+ classname)
+ plugin_list.append(handler_class())
+ except NotImplementedError:
+ # Skip missing plugins so that they can be ommitted from
+ # installation if desired
+ log("FetchHandler {} not found, skipping plugin".format(
+ handler_name))
+ return plugin_list
+
+
+def _run_apt_command(cmd, fatal=False):
+ """
+ Run an APT command, checking output and retrying if the fatal flag is set
+ to True.
+
+ :param: cmd: str: The apt command to run.
+ :param: fatal: bool: Whether the command's output should be checked and
+ retried.
+ """
+ env = os.environ.copy()
+
+ if 'DEBIAN_FRONTEND' not in env:
+ env['DEBIAN_FRONTEND'] = 'noninteractive'
+
+ if fatal:
+ retry_count = 0
+ result = None
+
+ # If the command is considered "fatal", we need to retry if the apt
+ # lock was not acquired.
+
+ while result is None or result == APT_NO_LOCK:
+ try:
+ result = subprocess.check_call(cmd, env=env)
+ except subprocess.CalledProcessError as e:
+ retry_count = retry_count + 1
+ if retry_count > APT_NO_LOCK_RETRY_COUNT:
+ raise
+ result = e.returncode
+ log("Couldn't acquire DPKG lock. Will retry in {} seconds."
+ "".format(APT_NO_LOCK_RETRY_DELAY))
+ time.sleep(APT_NO_LOCK_RETRY_DELAY)
+
+ else:
+ subprocess.call(cmd, env=env)
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/fetch/archiveurl.py b/charms/trusty/cassandra/hooks/charmhelpers/fetch/archiveurl.py
new file mode 100644
index 0000000..b8e0943
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/fetch/archiveurl.py
@@ -0,0 +1,167 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import hashlib
+import re
+
+from charmhelpers.fetch import (
+ BaseFetchHandler,
+ UnhandledSource
+)
+from charmhelpers.payload.archive import (
+ get_archive_handler,
+ extract,
+)
+from charmhelpers.core.host import mkdir, check_hash
+
+import six
+if six.PY3:
+ from urllib.request import (
+ build_opener, install_opener, urlopen, urlretrieve,
+ HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
+ )
+ from urllib.parse import urlparse, urlunparse, parse_qs
+ from urllib.error import URLError
+else:
+ from urllib import urlretrieve
+ from urllib2 import (
+ build_opener, install_opener, urlopen,
+ HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
+ URLError
+ )
+ from urlparse import urlparse, urlunparse, parse_qs
+
+
+def splituser(host):
+ '''urllib.splituser(), but six's support of this seems broken'''
+ _userprog = re.compile('^(.*)@(.*)$')
+ match = _userprog.match(host)
+ if match:
+ return match.group(1, 2)
+ return None, host
+
+
+def splitpasswd(user):
+ '''urllib.splitpasswd(), but six's support of this is missing'''
+ _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
+ match = _passwdprog.match(user)
+ if match:
+ return match.group(1, 2)
+ return user, None
+
+
+class ArchiveUrlFetchHandler(BaseFetchHandler):
+ """
+ Handler to download archive files from arbitrary URLs.
+
+ Can fetch from http, https, ftp, and file URLs.
+
+ Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
+
+ Installs the contents of the archive in $CHARM_DIR/fetched/.
+ """
+ def can_handle(self, source):
+ url_parts = self.parse_url(source)
+ if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
+ # XXX: Why is this returning a boolean and a string? It's
+ # doomed to fail since "bool(can_handle('foo://'))" will be True.
+ return "Wrong source type"
+ if get_archive_handler(self.base_url(source)):
+ return True
+ return False
+
+ def download(self, source, dest):
+ """
+ Download an archive file.
+
+ :param str source: URL pointing to an archive file.
+ :param str dest: Local path location to download archive file to.
+ """
+ # propogate all exceptions
+ # URLError, OSError, etc
+ proto, netloc, path, params, query, fragment = urlparse(source)
+ if proto in ('http', 'https'):
+ auth, barehost = splituser(netloc)
+ if auth is not None:
+ source = urlunparse((proto, barehost, path, params, query, fragment))
+ username, password = splitpasswd(auth)
+ passman = HTTPPasswordMgrWithDefaultRealm()
+ # Realm is set to None in add_password to force the username and password
+ # to be used whatever the realm
+ passman.add_password(None, source, username, password)
+ authhandler = HTTPBasicAuthHandler(passman)
+ opener = build_opener(authhandler)
+ install_opener(opener)
+ response = urlopen(source)
+ try:
+ with open(dest, 'wb') as dest_file:
+ dest_file.write(response.read())
+ except Exception as e:
+ if os.path.isfile(dest):
+ os.unlink(dest)
+ raise e
+
+ # Mandatory file validation via Sha1 or MD5 hashing.
+ def download_and_validate(self, url, hashsum, validate="sha1"):
+ tempfile, headers = urlretrieve(url)
+ check_hash(tempfile, hashsum, validate)
+ return tempfile
+
+ def install(self, source, dest=None, checksum=None, hash_type='sha1'):
+ """
+ Download and install an archive file, with optional checksum validation.
+
+ The checksum can also be given on the `source` URL's fragment.
+ For example::
+
+ handler.install('http://example.com/file.tgz#sha1=deadbeef')
+
+ :param str source: URL pointing to an archive file.
+ :param str dest: Local destination path to install to. If not given,
+ installs to `$CHARM_DIR/archives/archive_file_name`.
+ :param str checksum: If given, validate the archive file after download.
+ :param str hash_type: Algorithm used to generate `checksum`.
+ Can be any hash alrgorithm supported by :mod:`hashlib`,
+ such as md5, sha1, sha256, sha512, etc.
+
+ """
+ url_parts = self.parse_url(source)
+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
+ if not os.path.exists(dest_dir):
+ mkdir(dest_dir, perms=0o755)
+ dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
+ try:
+ self.download(source, dld_file)
+ except URLError as e:
+ raise UnhandledSource(e.reason)
+ except OSError as e:
+ raise UnhandledSource(e.strerror)
+ options = parse_qs(url_parts.fragment)
+ for key, value in options.items():
+ if not six.PY3:
+ algorithms = hashlib.algorithms
+ else:
+ algorithms = hashlib.algorithms_available
+ if key in algorithms:
+ if len(value) != 1:
+ raise TypeError(
+ "Expected 1 hash value, not %d" % len(value))
+ expected = value[0]
+ check_hash(dld_file, expected, key)
+ if checksum:
+ check_hash(dld_file, checksum, hash_type)
+ return extract(dld_file, dest)
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/fetch/bzrurl.py b/charms/trusty/cassandra/hooks/charmhelpers/fetch/bzrurl.py
new file mode 100644
index 0000000..cafd27f
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/fetch/bzrurl.py
@@ -0,0 +1,68 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+from subprocess import check_call
+from charmhelpers.fetch import (
+ BaseFetchHandler,
+ UnhandledSource,
+ filter_installed_packages,
+ apt_install,
+)
+from charmhelpers.core.host import mkdir
+
+
+if filter_installed_packages(['bzr']) != []:
+ apt_install(['bzr'])
+ if filter_installed_packages(['bzr']) != []:
+ raise NotImplementedError('Unable to install bzr')
+
+
+class BzrUrlFetchHandler(BaseFetchHandler):
+ """Handler for bazaar branches via generic and lp URLs"""
+ def can_handle(self, source):
+ url_parts = self.parse_url(source)
+ if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
+ return False
+ elif not url_parts.scheme:
+ return os.path.exists(os.path.join(source, '.bzr'))
+ else:
+ return True
+
+ def branch(self, source, dest):
+ if not self.can_handle(source):
+ raise UnhandledSource("Cannot handle {}".format(source))
+ if os.path.exists(dest):
+ check_call(['bzr', 'pull', '--overwrite', '-d', dest, source])
+ else:
+ check_call(['bzr', 'branch', source, dest])
+
+ def install(self, source, dest=None):
+ url_parts = self.parse_url(source)
+ branch_name = url_parts.path.strip("/").split("/")[-1]
+ if dest:
+ dest_dir = os.path.join(dest, branch_name)
+ else:
+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
+ branch_name)
+
+ if not os.path.exists(dest_dir):
+ mkdir(dest_dir, perms=0o755)
+ try:
+ self.branch(source, dest_dir)
+ except OSError as e:
+ raise UnhandledSource(e.strerror)
+ return dest_dir
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/fetch/giturl.py b/charms/trusty/cassandra/hooks/charmhelpers/fetch/giturl.py
new file mode 100644
index 0000000..65ed531
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/charmhelpers/fetch/giturl.py
@@ -0,0 +1,70 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+from subprocess import check_call, CalledProcessError
+from charmhelpers.fetch import (
+ BaseFetchHandler,
+ UnhandledSource,
+ filter_installed_packages,
+ apt_install,
+)
+
+if filter_installed_packages(['git']) != []:
+ apt_install(['git'])
+ if filter_installed_packages(['git']) != []:
+ raise NotImplementedError('Unable to install git')
+
+
+class GitUrlFetchHandler(BaseFetchHandler):
+ """Handler for git branches via generic and github URLs"""
+ def can_handle(self, source):
+ url_parts = self.parse_url(source)
+ # TODO (mattyw) no support for ssh git@ yet
+ if url_parts.scheme not in ('http', 'https', 'git', ''):
+ return False
+ elif not url_parts.scheme:
+ return os.path.exists(os.path.join(source, '.git'))
+ else:
+ return True
+
+ def clone(self, source, dest, branch="master", depth=None):
+ if not self.can_handle(source):
+ raise UnhandledSource("Cannot handle {}".format(source))
+
+ if os.path.exists(dest):
+ cmd = ['git', '-C', dest, 'pull', source, branch]
+ else:
+ cmd = ['git', 'clone', source, dest, '--branch', branch]
+ if depth:
+ cmd.extend(['--depth', depth])
+ check_call(cmd)
+
+ def install(self, source, branch="master", dest=None, depth=None):
+ url_parts = self.parse_url(source)
+ branch_name = url_parts.path.strip("/").split("/")[-1]
+ if dest:
+ dest_dir = os.path.join(dest, branch_name)
+ else:
+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
+ branch_name)
+ try:
+ self.clone(source, dest_dir, branch, depth)
+ except CalledProcessError as e:
+ raise UnhandledSource(e)
+ except OSError as e:
+ raise UnhandledSource(e.strerror)
+ return dest_dir
diff --git a/charms/trusty/cassandra/hooks/cluster-relation-changed b/charms/trusty/cassandra/hooks/cluster-relation-changed
new file mode 100755
index 0000000..9128cab
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/cluster-relation-changed
@@ -0,0 +1,20 @@
+#!/usr/bin/python3
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import hooks
+if __name__ == '__main__':
+ hooks.bootstrap()
+ hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/cluster-relation-departed b/charms/trusty/cassandra/hooks/cluster-relation-departed
new file mode 100755
index 0000000..9128cab
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/cluster-relation-departed
@@ -0,0 +1,20 @@
+#!/usr/bin/python3
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import hooks
+if __name__ == '__main__':
+ hooks.bootstrap()
+ hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/config-changed b/charms/trusty/cassandra/hooks/config-changed
new file mode 100755
index 0000000..9128cab
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/config-changed
@@ -0,0 +1,20 @@
+#!/usr/bin/python3
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import hooks
+if __name__ == '__main__':
+ hooks.bootstrap()
+ hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/coordinator.py b/charms/trusty/cassandra/hooks/coordinator.py
new file mode 100644
index 0000000..c353671
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/coordinator.py
@@ -0,0 +1,35 @@
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from charmhelpers.coordinator import BaseCoordinator
+
+
+class CassandraCoordinator(BaseCoordinator):
+ def default_grant(self, lock, unit, granted, queue):
+ '''Grant locks to only one unit at a time, regardless of its name.
+
+ This lets us keep separate locks like repair and restart,
+ while ensuring the operations do not occur on different nodes
+ at the same time.
+ '''
+ # Return True if this unit has already been granted a lock.
+ if self.grants.get(unit):
+ return True
+
+ # Otherwise, return True if the unit is first in the queue.
+ return queue[0] == unit and not granted
+
+
+coordinator = CassandraCoordinator()
diff --git a/charms/trusty/cassandra/hooks/data-relation-changed b/charms/trusty/cassandra/hooks/data-relation-changed
new file mode 100755
index 0000000..9128cab
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/data-relation-changed
@@ -0,0 +1,20 @@
+#!/usr/bin/python3
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import hooks
+if __name__ == '__main__':
+ hooks.bootstrap()
+ hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/data-relation-departed b/charms/trusty/cassandra/hooks/data-relation-departed
new file mode 100755
index 0000000..9128cab
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/data-relation-departed
@@ -0,0 +1,20 @@
+#!/usr/bin/python3
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import hooks
+if __name__ == '__main__':
+ hooks.bootstrap()
+ hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/database-admin-relation-changed b/charms/trusty/cassandra/hooks/database-admin-relation-changed
new file mode 100755
index 0000000..9128cab
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/database-admin-relation-changed
@@ -0,0 +1,20 @@
+#!/usr/bin/python3
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import hooks
+if __name__ == '__main__':
+ hooks.bootstrap()
+ hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/database-relation-changed b/charms/trusty/cassandra/hooks/database-relation-changed
new file mode 100755
index 0000000..9128cab
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/database-relation-changed
@@ -0,0 +1,20 @@
+#!/usr/bin/python3
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import hooks
+if __name__ == '__main__':
+ hooks.bootstrap()
+ hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/definitions.py b/charms/trusty/cassandra/hooks/definitions.py
new file mode 100644
index 0000000..24f9497
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/definitions.py
@@ -0,0 +1,127 @@
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import services
+
+import actions
+import helpers
+import relations
+
+
+def get_service_definitions():
+ # This looks like it could be a module level global list, but
+ # unfortunately that makes the module unimportable outside of a
+ # hook context. The main culprit is RelationContext, which invokes
+ # relation-get from its constructor. By wrapping the service
+ # definition list in this function, we can defer constructing it
+ # until we have constructed enough of a mock context and perform
+ # basic tests.
+ config = hookenv.config()
+
+ return [
+ # Prepare for the Cassandra service.
+ dict(service='install',
+ data_ready=[actions.set_proxy,
+ actions.preinstall,
+ actions.emit_meminfo,
+ actions.revert_unchangeable_config,
+ actions.store_unit_private_ip,
+ actions.add_implicit_package_signing_keys,
+ actions.configure_sources,
+ actions.swapoff,
+ actions.reset_sysctl,
+ actions.reset_limits,
+ actions.install_oracle_jre,
+ actions.install_cassandra_packages,
+ actions.emit_java_version,
+ actions.ensure_cassandra_package_status],
+ start=[], stop=[]),
+
+ # Get Cassandra running.
+ dict(service=helpers.get_cassandra_service(),
+
+ # Open access to client and replication ports. Client
+ # protocols require password authentication. Access to
+ # the unauthenticated replication ports is protected via
+ # ufw firewall rules. We do not open the JMX port, although
+ # we could since it is similarly protected by ufw.
+ ports=[config['rpc_port'], # Thrift clients
+ config['native_transport_port'], # Native clients.
+ config['storage_port'], # Plaintext replication
+ config['ssl_storage_port']], # Encrypted replication.
+
+ required_data=[relations.StorageRelation(),
+ relations.PeerRelation()],
+ provided_data=[relations.StorageRelation()],
+ data_ready=[actions.configure_firewall,
+ actions.update_etc_hosts,
+ actions.maintain_seeds,
+ actions.configure_cassandra_yaml,
+ actions.configure_cassandra_env,
+ actions.configure_cassandra_rackdc,
+ actions.reset_all_io_schedulers,
+ actions.maybe_restart,
+ actions.request_unit_superuser,
+ actions.reset_default_password],
+ start=[services.open_ports],
+ stop=[actions.stop_cassandra, services.close_ports]),
+
+ # Actions that must be done while Cassandra is running.
+ dict(service='post',
+ required_data=[RequiresLiveNode()],
+ data_ready=[actions.post_bootstrap,
+ actions.create_unit_superusers,
+ actions.reset_auth_keyspace_replication,
+ actions.publish_database_relations,
+ actions.publish_database_admin_relations,
+ actions.install_maintenance_crontab,
+ actions.nrpe_external_master_relation,
+ actions.emit_cluster_info,
+ actions.set_active],
+ start=[], stop=[])]
+
+
+class RequiresLiveNode:
+ def __bool__(self):
+ is_live = self.is_live()
+ hookenv.log('Requirement RequiresLiveNode: {}'.format(is_live),
+ hookenv.DEBUG)
+ return is_live
+
+ def is_live(self):
+ if helpers.is_decommissioned():
+ hookenv.log('Node is decommissioned')
+ return False
+
+ if helpers.is_cassandra_running():
+ hookenv.log('Cassandra is running')
+ auth = hookenv.config()['authenticator']
+ if auth == 'AllowAllAuthenticator':
+ return True
+ elif hookenv.local_unit() in helpers.get_unit_superusers():
+ hookenv.log('Credentials created')
+ return True
+ else:
+ hookenv.log('Credentials have not been created')
+ return False
+ else:
+ hookenv.log('Cassandra is not running')
+ return False
+
+
+def get_service_manager():
+ return services.ServiceManager(get_service_definitions())
diff --git a/charms/trusty/cassandra/hooks/helpers.py b/charms/trusty/cassandra/hooks/helpers.py
new file mode 100644
index 0000000..b86a6b1
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/helpers.py
@@ -0,0 +1,1084 @@
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import configparser
+from contextlib import contextmanager
+from datetime import timedelta
+from distutils.version import LooseVersion
+import errno
+from functools import wraps
+import io
+import json
+import os.path
+import re
+import shutil
+import subprocess
+import sys
+import tempfile
+from textwrap import dedent
+import time
+
+import bcrypt
+from cassandra import ConsistencyLevel
+import cassandra.auth
+import cassandra.cluster
+import cassandra.query
+import yaml
+
+from charmhelpers.core import hookenv, host
+from charmhelpers.core.hookenv import DEBUG, ERROR, WARNING
+from charmhelpers import fetch
+
+from coordinator import coordinator
+
+
+RESTART_TIMEOUT = 600
+
+
+def logged(func):
+ @wraps(func)
+ def wrapper(*args, **kw):
+ hookenv.log("* Helper {}/{}".format(hookenv.hook_name(),
+ func.__name__))
+ return func(*args, **kw)
+ return wrapper
+
+
+def backoff(what_for, max_pause=60):
+ i = 0
+ while True:
+ yield True
+ i += 1
+ pause = min(max_pause, 2 ** i)
+ time.sleep(pause)
+ if pause > 10:
+ hookenv.log('Recheck {} for {}'.format(i, what_for))
+
+
+# FOR CHARMHELPERS
+@contextmanager
+def autostart_disabled(services=None, _policy_rc='/usr/sbin/policy-rc.d'):
+ '''Tell well behaved Debian packages to not start services when installed.
+ '''
+ script = ['#!/bin/sh']
+ if services is not None:
+ for service in services:
+ script.append(
+ 'if [ "$1" = "{}" ]; then exit 101; fi'.format(service))
+ script.append('exit 0')
+ else:
+ script.append('exit 101') # By default, all daemons disabled.
+ try:
+ if os.path.exists(_policy_rc):
+ shutil.move(_policy_rc, "{}-orig".format(_policy_rc))
+ host.write_file(_policy_rc, '\n'.join(script).encode('ASCII'),
+ perms=0o555)
+ yield
+ finally:
+ os.unlink(_policy_rc)
+ if os.path.exists("{}-orig".format(_policy_rc)):
+ shutil.move("{}-orig".format(_policy_rc), _policy_rc)
+
+
+# FOR CHARMHELPERS
+@logged
+def install_packages(packages):
+ packages = list(packages)
+ if hookenv.config('extra_packages'):
+ packages.extend(hookenv.config('extra_packages').split())
+ packages = fetch.filter_installed_packages(packages)
+ if packages:
+ # The DSE packages are huge, so this might take some time.
+ status_set('maintenance', 'Installing packages')
+ with autostart_disabled(['cassandra']):
+ fetch.apt_install(packages, fatal=True)
+
+
+# FOR CHARMHELPERS
+@logged
+def ensure_package_status(packages):
+ config_dict = hookenv.config()
+
+ package_status = config_dict['package_status']
+
+ if package_status not in ['install', 'hold']:
+ raise RuntimeError("package_status must be 'install' or 'hold', "
+ "not {!r}".format(package_status))
+
+ selections = []
+ for package in packages:
+ selections.append('{} {}\n'.format(package, package_status))
+ dpkg = subprocess.Popen(['dpkg', '--set-selections'],
+ stdin=subprocess.PIPE)
+ dpkg.communicate(input=''.join(selections).encode('US-ASCII'))
+
+
+def get_seed_ips():
+ '''Return the set of seed ip addresses.
+
+ We use ip addresses rather than unit names, as we may need to use
+ external seed ips at some point.
+ '''
+ return set((hookenv.leader_get('seeds') or '').split(','))
+
+
+def actual_seed_ips():
+ '''Return the seeds currently in cassandra.yaml'''
+ cassandra_yaml = read_cassandra_yaml()
+ s = cassandra_yaml['seed_provider'][0]['parameters'][0]['seeds']
+ return set(s.split(','))
+
+
+def get_database_directory(config_path):
+ '''Convert a database path from the service config to an absolute path.
+
+ Entries in the config file may be absolute, relative to
+ /var/lib/cassandra, or relative to the mountpoint.
+ '''
+ import relations
+ storage = relations.StorageRelation()
+ if storage.mountpoint:
+ root = os.path.join(storage.mountpoint, 'cassandra')
+ else:
+ root = '/var/lib/cassandra'
+ return os.path.join(root, config_path)
+
+
+def ensure_database_directory(config_path):
+ '''Create the database directory if it doesn't exist, resetting
+ ownership and other settings while we are at it.
+
+ Returns the absolute path.
+ '''
+ absdir = get_database_directory(config_path)
+
+ # Work around Bug #1427150 by ensuring components of the path are
+ # created with the required permissions, if necessary.
+ component = os.sep
+ for p in absdir.split(os.sep)[1:-1]:
+ component = os.path.join(component, p)
+ if not os.path.exists(p):
+ host.mkdir(component)
+ assert component == os.path.split(absdir)[0]
+ host.mkdir(absdir, owner='cassandra', group='cassandra', perms=0o750)
+ return absdir
+
+
+def get_all_database_directories():
+ config = hookenv.config()
+ dirs = dict(
+ data_file_directories=[get_database_directory(d)
+ for d in (config['data_file_directories'] or
+ 'data').split()],
+ commitlog_directory=get_database_directory(
+ config['commitlog_directory'] or 'commitlog'),
+ saved_caches_directory=get_database_directory(
+ config['saved_caches_directory'] or 'saved_caches'))
+ if has_cassandra_version('3.0'):
+ # Not yet configurable. Make configurable with Juju native storage.
+ dirs['hints_directory'] = get_database_directory('hints')
+ return dirs
+
+
+def mountpoint(path):
+ '''Return the mountpoint that path exists on.'''
+ path = os.path.realpath(path)
+ while path != '/' and not os.path.ismount(path):
+ path = os.path.dirname(path)
+ return path
+
+
+# FOR CHARMHELPERS
+def is_lxc():
+ '''Return True if we are running inside an LXC container.'''
+ with open('/proc/1/cgroup', 'r') as f:
+ return ':/lxc/' in f.readline()
+
+
+# FOR CHARMHELPERS
+def set_io_scheduler(io_scheduler, directory):
+ '''Set the block device io scheduler.'''
+
+ assert os.path.isdir(directory)
+
+ # The block device regex may be a tad simplistic.
+ block_regex = re.compile('\/dev\/([a-z]*)', re.IGNORECASE)
+
+ output = subprocess.check_output(['df', directory],
+ universal_newlines=True)
+
+ if not is_lxc():
+ hookenv.log("Setting block device of {} to IO scheduler {}"
+ "".format(directory, io_scheduler))
+ try:
+ block_dev = re.findall(block_regex, output)[0]
+ except IndexError:
+ hookenv.log("Unable to locate block device of {} (in container?)"
+ "".format(directory))
+ return
+ sys_file = os.path.join("/", "sys", "block", block_dev,
+ "queue", "scheduler")
+ try:
+ host.write_file(sys_file, io_scheduler.encode('ascii'),
+ perms=0o644)
+ except OSError as e:
+ if e.errno == errno.EACCES:
+ hookenv.log("Got Permission Denied trying to set the "
+ "IO scheduler at {}. We may be in an LXC. "
+ "Exiting gracefully".format(sys_file),
+ WARNING)
+ elif e.errno == errno.ENOENT:
+ hookenv.log("Got no such file or directory trying to "
+ "set the IO scheduler at {}. It may be "
+ "this is an LXC, the device name is as "
+ "yet unknown to the charm, or LVM/RAID is "
+ "hiding the underlying device name. "
+ "Exiting gracefully".format(sys_file),
+ WARNING)
+ else:
+ raise e
+ else:
+ # Make no change if we are in an LXC
+ hookenv.log("In an LXC. Cannot set io scheduler {}"
+ "".format(io_scheduler))
+
+
+# FOR CHARMHELPERS
+def recursive_chown(directory, owner="root", group="root"):
+ '''Change ownership of all files and directories in 'directory'.
+
+ Ownership of 'directory' is also reset.
+ '''
+ shutil.chown(directory, owner, group)
+ for root, dirs, files in os.walk(directory):
+ for dirname in dirs:
+ shutil.chown(os.path.join(root, dirname), owner, group)
+ for filename in files:
+ shutil.chown(os.path.join(root, filename), owner, group)
+
+
+def maybe_backup(path):
+ '''Copy a file to file.orig, if file.orig does not already exist.'''
+ backup_path = path + '.orig'
+ if not os.path.exists(backup_path):
+ with open(path, 'rb') as f:
+ host.write_file(backup_path, f.read(), perms=0o600)
+
+
+# FOR CHARMHELPERS
+def get_package_version(package):
+ cache = fetch.apt_cache()
+ if package not in cache:
+ return None
+ pkgver = cache[package].current_ver
+ if pkgver is not None:
+ return pkgver.ver_str
+ return None
+
+
+def get_jre():
+ # DataStax Enterprise requires the Oracle JRE.
+ if get_cassandra_edition() == 'dse':
+ return 'oracle'
+
+ config = hookenv.config()
+ jre = config['jre'].lower()
+ if jre not in ('openjdk', 'oracle'):
+ hookenv.log('Unknown JRE {!r} specified. Using OpenJDK'.format(jre),
+ ERROR)
+ jre = 'openjdk'
+ return jre
+
+
+def get_cassandra_edition():
+ config = hookenv.config()
+ edition = config['edition'].lower()
+ if edition not in ('community', 'dse'):
+ hookenv.log('Unknown edition {!r}. Using community.'.format(edition),
+ ERROR)
+ edition = 'community'
+ return edition
+
+
+def get_cassandra_service():
+ '''Cassandra upstart service'''
+ if get_cassandra_edition() == 'dse':
+ return 'dse'
+ return 'cassandra'
+
+
+def get_cassandra_version():
+ if get_cassandra_edition() == 'dse':
+ dse_ver = get_package_version('dse-full')
+ if not dse_ver:
+ return None
+ elif LooseVersion(dse_ver) >= LooseVersion('5.0'):
+ return '3.0'
+ elif LooseVersion(dse_ver) >= LooseVersion('4.7'):
+ return '2.1'
+ else:
+ return '2.0'
+ return get_package_version('cassandra')
+
+
+def has_cassandra_version(minimum_ver):
+ cassandra_version = get_cassandra_version()
+ assert cassandra_version is not None, 'Cassandra package not yet installed'
+ return LooseVersion(cassandra_version) >= LooseVersion(minimum_ver)
+
+
+def get_cassandra_config_dir():
+ if get_cassandra_edition() == 'dse':
+ return '/etc/dse/cassandra'
+ else:
+ return '/etc/cassandra'
+
+
+def get_cassandra_yaml_file():
+ return os.path.join(get_cassandra_config_dir(), "cassandra.yaml")
+
+
+def get_cassandra_env_file():
+ return os.path.join(get_cassandra_config_dir(), "cassandra-env.sh")
+
+
+def get_cassandra_rackdc_file():
+ return os.path.join(get_cassandra_config_dir(),
+ "cassandra-rackdc.properties")
+
+
+def get_cassandra_pid_file():
+ edition = get_cassandra_edition()
+ if edition == 'dse':
+ pid_file = "/var/run/dse/dse.pid"
+ else:
+ pid_file = "/var/run/cassandra/cassandra.pid"
+ return pid_file
+
+
+def get_cassandra_packages():
+ edition = get_cassandra_edition()
+ if edition == 'dse':
+ packages = set(['dse-full'])
+ else:
+ packages = set(['cassandra']) # 'cassandra-tools'
+
+ packages.add('ntp')
+ packages.add('run-one')
+ packages.add('netcat')
+
+ jre = get_jre()
+ if jre == 'oracle':
+ # We can't use a packaged version of the Oracle JRE, as we
+ # are not allowed to bypass Oracle's click through license
+ # agreement.
+ pass
+ else:
+ # NB. OpenJDK 8 not available in trusty. This needs to come
+ # from a PPA or some other configured source.
+ packages.add('openjdk-8-jre-headless')
+
+ return packages
+
+
+@logged
+def stop_cassandra():
+ if is_cassandra_running():
+ hookenv.log('Shutting down Cassandra')
+ host.service_stop(get_cassandra_service())
+ if is_cassandra_running():
+ hookenv.status_set('blocked', 'Cassandra failed to shut down')
+ raise SystemExit(0)
+
+
+@logged
+def start_cassandra():
+ if is_cassandra_running():
+ return
+
+ actual_seeds = sorted(actual_seed_ips())
+ assert actual_seeds, 'Attempting to start cassandra with empty seed list'
+ hookenv.config()['configured_seeds'] = actual_seeds
+
+ if is_bootstrapped():
+ status_set('maintenance',
+ 'Starting Cassandra with seeds {!r}'
+ .format(','.join(actual_seeds)))
+ else:
+ status_set('maintenance',
+ 'Bootstrapping with seeds {}'
+ .format(','.join(actual_seeds)))
+
+ host.service_start(get_cassandra_service())
+
+ # Wait for Cassandra to actually start, or abort.
+ timeout = time.time() + RESTART_TIMEOUT
+ while time.time() < timeout:
+ if is_cassandra_running():
+ return
+ time.sleep(1)
+ status_set('blocked', 'Cassandra failed to start')
+ raise SystemExit(0)
+
+
+@logged
+def reconfigure_and_restart_cassandra(overrides={}):
+ stop_cassandra()
+ configure_cassandra_yaml(overrides)
+ start_cassandra()
+
+
+@logged
+def remount_cassandra():
+ '''If a new mountpoint is ready, migrate data across to it.'''
+ assert not is_cassandra_running() # Guard against data loss.
+ import relations
+ storage = relations.StorageRelation()
+ if storage.needs_remount():
+ status_set('maintenance', 'Migrating data to new mountpoint')
+ hookenv.config()['bootstrapped_into_cluster'] = False
+ if storage.mountpoint is None:
+ hookenv.log('External storage AND DATA gone. '
+ 'Reverting to local storage. '
+ 'In danger of resurrecting old data. ',
+ WARNING)
+ else:
+ storage.migrate('/var/lib/cassandra', 'cassandra')
+ root = os.path.join(storage.mountpoint, 'cassandra')
+ os.chmod(root, 0o750)
+
+
+@logged
+def ensure_database_directories():
+ '''Ensure that directories Cassandra expects to store its data in exist.'''
+ # Guard against changing perms on a running db. Although probably
+ # harmless, it causes shutil.chown() to fail.
+ assert not is_cassandra_running()
+ db_dirs = get_all_database_directories()
+ ensure_database_directory(db_dirs['commitlog_directory'])
+ ensure_database_directory(db_dirs['saved_caches_directory'])
+ if 'hints_directory' in db_dirs:
+ ensure_database_directory(db_dirs['hints_directory'])
+ for db_dir in db_dirs['data_file_directories']:
+ ensure_database_directory(db_dir)
+
+
+CONNECT_TIMEOUT = 10
+
+
+@contextmanager
+def connect(username=None, password=None, timeout=CONNECT_TIMEOUT,
+ auth_timeout=CONNECT_TIMEOUT):
+ # We pull the currently configured listen address and port from the
+ # yaml, rather than the service configuration, as it may have been
+ # overridden.
+ cassandra_yaml = read_cassandra_yaml()
+ address = cassandra_yaml['rpc_address']
+ if address == '0.0.0.0':
+ address = 'localhost'
+ port = cassandra_yaml['native_transport_port']
+
+ if username is None or password is None:
+ username, password = superuser_credentials()
+
+ auth = hookenv.config()['authenticator']
+ if auth == 'AllowAllAuthenticator':
+ auth_provider = None
+ else:
+ auth_provider = cassandra.auth.PlainTextAuthProvider(username=username,
+ password=password)
+
+ # Although we specify a reconnection_policy, it does not apply to
+ # the initial connection so we retry in a loop.
+ start = time.time()
+ until = start + timeout
+ auth_until = start + auth_timeout
+ while True:
+ cluster = cassandra.cluster.Cluster([address], port=port,
+ auth_provider=auth_provider)
+ try:
+ session = cluster.connect()
+ session.default_timeout = timeout
+ break
+ except cassandra.cluster.NoHostAvailable as x:
+ cluster.shutdown()
+ now = time.time()
+ # If every node failed auth, reraise one of the
+ # AuthenticationFailed exceptions. Unwrapping the exception
+ # means call sites don't have to sniff the exception bundle.
+ # We don't retry on auth fails; this method should not be
+ # called if the system_auth data is inconsistent.
+ auth_fails = [af for af in x.errors.values()
+ if isinstance(af, cassandra.AuthenticationFailed)]
+ if auth_fails:
+ if now > auth_until:
+ raise auth_fails[0]
+ if now > until:
+ raise
+ time.sleep(1)
+ try:
+ yield session
+ finally:
+ cluster.shutdown()
+
+
+QUERY_TIMEOUT = 60
+
+
+def query(session, statement, consistency_level, args=None):
+ q = cassandra.query.SimpleStatement(statement,
+ consistency_level=consistency_level)
+
+ until = time.time() + QUERY_TIMEOUT
+ for _ in backoff('query to execute'):
+ try:
+ return session.execute(q, args)
+ except Exception:
+ if time.time() > until:
+ raise
+
+
+def encrypt_password(password):
+ return bcrypt.hashpw(password, bcrypt.gensalt())
+
+
+@logged
+def ensure_user(session, username, encrypted_password, superuser=False):
+ '''Create the DB user if it doesn't already exist & reset the password.'''
+ auth = hookenv.config()['authenticator']
+ if auth == 'AllowAllAuthenticator':
+ return # No authentication means we cannot create users
+
+ if superuser:
+ hookenv.log('Creating SUPERUSER {}'.format(username))
+ else:
+ hookenv.log('Creating user {}'.format(username))
+ if has_cassandra_version('2.2'):
+ query(session,
+ 'INSERT INTO system_auth.roles '
+ '(role, can_login, is_superuser, salted_hash) '
+ 'VALUES (%s, TRUE, %s, %s)',
+ ConsistencyLevel.ALL,
+ (username, superuser, encrypted_password))
+ else:
+ query(session,
+ 'INSERT INTO system_auth.users (name, super) VALUES (%s, %s)',
+ ConsistencyLevel.ALL, (username, superuser))
+ query(session,
+ 'INSERT INTO system_auth.credentials (username, salted_hash) '
+ 'VALUES (%s, %s)',
+ ConsistencyLevel.ALL, (username, encrypted_password))
+
+
+@logged
+def create_unit_superuser_hard():
+ '''Create or recreate the unit's superuser account.
+
+ This method is used when there are no known superuser credentials
+ to use. We restart the node using the AllowAllAuthenticator and
+ insert our credentials directly into the system_auth keyspace.
+ '''
+ username, password = superuser_credentials()
+ pwhash = encrypt_password(password)
+ hookenv.log('Creating unit superuser {}'.format(username))
+
+ # Restart cassandra without authentication & listening on localhost.
+ reconfigure_and_restart_cassandra(
+ dict(authenticator='AllowAllAuthenticator', rpc_address='localhost'))
+ for _ in backoff('superuser creation'):
+ try:
+ with connect() as session:
+ ensure_user(session, username, pwhash, superuser=True)
+ break
+ except Exception as x:
+ print(str(x))
+
+ # Restart Cassandra with regular config.
+ nodetool('flush') # Ensure our backdoor updates are flushed.
+ reconfigure_and_restart_cassandra()
+
+
+def get_cqlshrc_path():
+ return os.path.expanduser('~root/.cassandra/cqlshrc')
+
+
+def superuser_username():
+ return 'juju_{}'.format(re.subn(r'\W', '_', hookenv.local_unit())[0])
+
+
+def superuser_credentials():
+ '''Return (username, password) to connect to the Cassandra superuser.
+
+ The credentials are persisted in the root user's cqlshrc file,
+ making them easily accessible to the command line tools.
+ '''
+ cqlshrc_path = get_cqlshrc_path()
+ cqlshrc = configparser.ConfigParser(interpolation=None)
+ cqlshrc.read([cqlshrc_path])
+
+ username = superuser_username()
+
+ try:
+ section = cqlshrc['authentication']
+ # If there happened to be an existing cqlshrc file, it might
+ # contain invalid credentials. Ignore them.
+ if section['username'] == username:
+ return section['username'], section['password']
+ except KeyError:
+ hookenv.log('Generating superuser credentials into {}'.format(
+ cqlshrc_path))
+
+ config = hookenv.config()
+
+ password = host.pwgen()
+
+ hookenv.log('Generated username {}'.format(username))
+
+ # We set items separately, rather than together, so that we have a
+ # defined order for the ConfigParser to preserve and the tests to
+ # rely on.
+ cqlshrc.setdefault('authentication', {})
+ cqlshrc['authentication']['username'] = username
+ cqlshrc['authentication']['password'] = password
+ cqlshrc.setdefault('connection', {})
+ cqlshrc['connection']['hostname'] = hookenv.unit_public_ip()
+ if get_cassandra_version().startswith('2.0'):
+ cqlshrc['connection']['port'] = str(config['rpc_port'])
+ else:
+ cqlshrc['connection']['port'] = str(config['native_transport_port'])
+
+ ini = io.StringIO()
+ cqlshrc.write(ini)
+ host.mkdir(os.path.dirname(cqlshrc_path), perms=0o700)
+ host.write_file(cqlshrc_path, ini.getvalue().encode('UTF-8'), perms=0o400)
+
+ return username, password
+
+
+def emit(*args, **kw):
+ # Just like print, but with plumbing and mocked out in the test suite.
+ print(*args, **kw)
+ sys.stdout.flush()
+
+
+def nodetool(*cmd, timeout=120):
+ cmd = ['nodetool'] + [str(i) for i in cmd]
+ i = 0
+ until = time.time() + timeout
+ for _ in backoff('nodetool to work'):
+ i += 1
+ try:
+ if timeout is not None:
+ timeout = max(0, until - time.time())
+ raw = subprocess.check_output(cmd, universal_newlines=True,
+ timeout=timeout,
+ stderr=subprocess.STDOUT)
+
+ # Work around CASSANDRA-8776.
+ if 'status' in cmd and 'Error:' in raw:
+ hookenv.log('Error detected but nodetool returned success.',
+ WARNING)
+ raise subprocess.CalledProcessError(99, cmd, raw)
+
+ hookenv.log('{} succeeded'.format(' '.join(cmd)), DEBUG)
+ out = raw.expandtabs()
+ emit(out)
+ return out
+
+ except subprocess.CalledProcessError as x:
+ if i > 1:
+ emit(x.output.expandtabs()) # Expand tabs for juju debug-log.
+ if not is_cassandra_running():
+ status_set('blocked',
+ 'Cassandra has unexpectedly shutdown')
+ raise SystemExit(0)
+ if time.time() >= until:
+ raise
+
+
+def num_nodes():
+ return len(get_bootstrapped_ips())
+
+
+def read_cassandra_yaml():
+ cassandra_yaml_path = get_cassandra_yaml_file()
+ with open(cassandra_yaml_path, 'rb') as f:
+ return yaml.safe_load(f)
+
+
+@logged
+def write_cassandra_yaml(cassandra_yaml):
+ cassandra_yaml_path = get_cassandra_yaml_file()
+ host.write_file(cassandra_yaml_path,
+ yaml.safe_dump(cassandra_yaml).encode('UTF-8'))
+
+
+def configure_cassandra_yaml(overrides={}, seeds=None):
+ cassandra_yaml_path = get_cassandra_yaml_file()
+ config = hookenv.config()
+
+ maybe_backup(cassandra_yaml_path) # Its comments may be useful.
+
+ cassandra_yaml = read_cassandra_yaml()
+
+ # Most options just copy from config.yaml keys with the same name.
+ # Using the same name is preferred to match the actual Cassandra
+ # documentation.
+ simple_config_keys = ['cluster_name', 'num_tokens',
+ 'partitioner', 'authorizer', 'authenticator',
+ 'compaction_throughput_mb_per_sec',
+ 'stream_throughput_outbound_megabits_per_sec',
+ 'tombstone_warn_threshold',
+ 'tombstone_failure_threshold',
+ 'native_transport_port', 'rpc_port',
+ 'storage_port', 'ssl_storage_port']
+ cassandra_yaml.update((k, config[k]) for k in simple_config_keys)
+
+ seeds = ','.join(seeds or get_seed_ips()) # Don't include whitespace!
+ hookenv.log('Configuring seeds as {!r}'.format(seeds), DEBUG)
+ cassandra_yaml['seed_provider'][0]['parameters'][0]['seeds'] = seeds
+
+ cassandra_yaml['listen_address'] = hookenv.unit_private_ip()
+ cassandra_yaml['rpc_address'] = '0.0.0.0'
+ if not get_cassandra_version().startswith('2.0'):
+ cassandra_yaml['broadcast_rpc_address'] = hookenv.unit_public_ip()
+
+ dirs = get_all_database_directories()
+ cassandra_yaml.update(dirs)
+
+ # GossipingPropertyFileSnitch is the only snitch recommended for
+ # production. It we allow others, we need to consider how to deal
+ # with the system_auth keyspace replication settings.
+ cassandra_yaml['endpoint_snitch'] = 'GossipingPropertyFileSnitch'
+
+ # Per Bug #1523546 and CASSANDRA-9319, Thrift is disabled by default in
+ # Cassandra 2.2. Ensure it is enabled if rpc_port is non-zero.
+ if int(config['rpc_port']) > 0:
+ cassandra_yaml['start_rpc'] = True
+
+ cassandra_yaml.update(overrides)
+
+ write_cassandra_yaml(cassandra_yaml)
+
+
+def get_pid_from_file(pid_file):
+ try:
+ with open(pid_file, 'r') as f:
+ pid = int(f.read().strip().split()[0])
+ if pid <= 1:
+ raise ValueError('Illegal pid {}'.format(pid))
+ return pid
+ except (ValueError, IndexError) as e:
+ hookenv.log("Invalid PID in {}.".format(pid_file))
+ raise ValueError(e)
+
+
+def is_cassandra_running():
+ pid_file = get_cassandra_pid_file()
+
+ try:
+ for _ in backoff('Cassandra to respond'):
+ # We reload the pid every time, in case it has gone away.
+ # If it goes away, a FileNotFound exception is raised.
+ pid = get_pid_from_file(pid_file)
+
+ # This does not kill the process but checks for its
+ # existence. It raises an ProcessLookupError if the process
+ # is not running.
+ os.kill(pid, 0)
+
+ if subprocess.call(["nodetool", "status"],
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL) == 0:
+ hookenv.log(
+ "Cassandra PID {} is running and responding".format(pid))
+ return True
+ except FileNotFoundError:
+ hookenv.log("Cassandra is not running. PID file does not exist.")
+ return False
+ except ProcessLookupError:
+ if os.path.exists(pid_file):
+ # File disappeared between reading the PID and checking if
+ # the PID is running.
+ hookenv.log("Cassandra is not running, but pid file exists.",
+ WARNING)
+ else:
+ hookenv.log("Cassandra is not running. PID file does not exist.")
+ return False
+
+
+def get_auth_keyspace_replication(session):
+ if has_cassandra_version('3.0'):
+ statement = dedent('''\
+ SELECT replication FROM system_schema.keyspaces
+ WHERE keyspace_name='system_auth'
+ ''')
+ r = query(session, statement, ConsistencyLevel.QUORUM)
+ return dict(r[0][0])
+ else:
+ statement = dedent('''\
+ SELECT strategy_options FROM system.schema_keyspaces
+ WHERE keyspace_name='system_auth'
+ ''')
+ r = query(session, statement, ConsistencyLevel.QUORUM)
+ return json.loads(r[0][0])
+
+
+@logged
+def set_auth_keyspace_replication(session, settings):
+ # Live operation, so keep status the same.
+ status_set(hookenv.status_get()[0],
+ 'Updating system_auth rf to {!r}'.format(settings))
+ statement = 'ALTER KEYSPACE system_auth WITH REPLICATION = %s'
+ query(session, statement, ConsistencyLevel.ALL, (settings,))
+
+
+@logged
+def repair_auth_keyspace():
+ # Repair takes a long time, and may need to be retried due to 'snapshot
+ # creation' errors, but should certainly complete within an hour since
+ # the keyspace is tiny.
+ status_set(hookenv.status_get()[0],
+ 'Repairing system_auth keyspace')
+ nodetool('repair', 'system_auth', timeout=3600)
+
+
+def is_bootstrapped(unit=None):
+ '''Return True if the node has already bootstrapped into the cluster.'''
+ if unit is None or unit == hookenv.local_unit():
+ return hookenv.config().get('bootstrapped', False)
+ elif coordinator.relid:
+ return bool(hookenv.relation_get(rid=coordinator.relid,
+ unit=unit).get('bootstrapped'))
+ else:
+ return False
+
+
+def set_bootstrapped():
+ # We need to store this flag in two locations. The peer relation,
+ # so peers can see it, and local state, for when we haven't joined
+ # the peer relation yet. actions.publish_bootstrapped_flag()
+ # calls this method again when necessary to ensure that state is
+ # propagated # if/when the peer relation is joined.
+ config = hookenv.config()
+ config['bootstrapped'] = True
+ if coordinator.relid is not None:
+ hookenv.relation_set(coordinator.relid, bootstrapped="1")
+ if config.changed('bootstrapped'):
+ hookenv.log('Bootstrapped')
+ else:
+ hookenv.log('Already bootstrapped')
+
+
+def get_bootstrapped():
+ units = [hookenv.local_unit()]
+ if coordinator.relid is not None:
+ units.extend(hookenv.related_units(coordinator.relid))
+ return set([unit for unit in units if is_bootstrapped(unit)])
+
+
+def get_bootstrapped_ips():
+ return set([unit_to_ip(unit) for unit in get_bootstrapped()])
+
+
+def unit_to_ip(unit):
+ if unit is None or unit == hookenv.local_unit():
+ return hookenv.unit_private_ip()
+ elif coordinator.relid:
+ pa = hookenv.relation_get(rid=coordinator.relid,
+ unit=unit).get('private-address')
+ return hookenv._ensure_ip(pa)
+ else:
+ return None
+
+
+def get_node_status():
+ '''Return the Cassandra node status.
+
+ May be NORMAL, JOINING, DECOMMISSIONED etc., or None if we can't tell.
+ '''
+ if not is_cassandra_running():
+ return None
+ raw = nodetool('netstats')
+ m = re.search(r'(?m)^Mode:\s+(\w+)$', raw)
+ if m is None:
+ return None
+ return m.group(1).upper()
+
+
+def is_decommissioned():
+ status = get_node_status()
+ if status in ('DECOMMISSIONED', 'LEAVING'):
+ hookenv.log('This node is {}'.format(status), WARNING)
+ return True
+ return False
+
+
+@logged
+def emit_describe_cluster():
+ '''Run nodetool describecluster for the logs.'''
+ nodetool('describecluster') # Implicit emit
+
+
+@logged
+def emit_status():
+ '''Run 'nodetool status' for the logs.'''
+ nodetool('status') # Implicit emit
+
+
+@logged
+def emit_netstats():
+ '''Run 'nodetool netstats' for the logs.'''
+ nodetool('netstats') # Implicit emit
+
+
+def emit_cluster_info():
+ emit_describe_cluster()
+ emit_status()
+ emit_netstats()
+
+
+# FOR CHARMHELPERS (and think of a better name)
+def week_spread(unit_num):
+ '''Pick a time for a unit's weekly job.
+
+ Jobs are spread out evenly throughout the week as best we can.
+ The chosen time only depends on the unit number, and does not change
+ if other units are added and removed; while the chosen time will not
+ be perfect, we don't have to worry about skipping a weekly job if
+ units are added or removed at the wrong moment.
+
+ Returns (dow, hour, minute) suitable for cron.
+ '''
+ def vdc(n, base=2):
+ '''Van der Corpet sequence. 0, 0.5, 0.25, 0.75, 0.125, 0.625, ...
+
+ http://rosettacode.org/wiki/Van_der_Corput_sequence#Python
+ '''
+ vdc, denom = 0, 1
+ while n:
+ denom *= base
+ n, remainder = divmod(n, base)
+ vdc += remainder / denom
+ return vdc
+ # We could use the vdc() function to distribute jobs evenly throughout
+ # the week, so unit 0==0, unit 1==3.5days, unit 2==1.75 etc. But
+ # plain modulo for the day of week is easier for humans and what
+ # you expect for 7 units or less.
+ sched_dow = unit_num % 7
+ # We spread time of day so each batch of 7 units gets the same time,
+ # as far spread out from the other batches of 7 units as possible.
+ minutes_in_day = 24 * 60
+ sched = timedelta(minutes=int(minutes_in_day * vdc(unit_num // 7)))
+ sched_hour = sched.seconds // (60 * 60)
+ sched_minute = sched.seconds // 60 - sched_hour * 60
+ return (sched_dow, sched_hour, sched_minute)
+
+
+# FOR CHARMHELPERS. This should be a constant in nrpe.py
+def local_plugins_dir():
+ return '/usr/local/lib/nagios/plugins'
+
+
+def leader_ping():
+ '''Make a change in the leader settings, waking the non-leaders.'''
+ assert hookenv.is_leader()
+ last = int(hookenv.leader_get('ping') or 0)
+ hookenv.leader_set(ping=str(last + 1))
+
+
+def get_unit_superusers():
+ '''Return the set of units that have had their superuser accounts created.
+ '''
+ raw = hookenv.leader_get('superusers')
+ return set(json.loads(raw or '[]'))
+
+
+def set_unit_superusers(superusers):
+ hookenv.leader_set(superusers=json.dumps(sorted(superusers)))
+
+
+def status_set(state, message):
+ '''Set the unit status and log a message.'''
+ hookenv.status_set(state, message)
+ hookenv.log('{} unit state: {}'.format(state, message))
+
+
+def service_status_set(state, message):
+ '''Set the service status and log a message.'''
+ subprocess.check_call(['status-set', '--service', state, message])
+ hookenv.log('{} service state: {}'.format(state, message))
+
+
+def get_service_name(relid):
+ '''Return the service name for the other end of relid.'''
+ units = hookenv.related_units(relid)
+ if units:
+ return units[0].split('/', 1)[0]
+ else:
+ return None
+
+
+def peer_relid():
+ return coordinator.relid
+
+
+@logged
+def set_active():
+ '''Set happy state'''
+ if hookenv.unit_private_ip() in get_seed_ips():
+ msg = 'Live seed'
+ else:
+ msg = 'Live node'
+ status_set('active', msg)
+
+ if hookenv.is_leader():
+ n = num_nodes()
+ if n == 1:
+ n = 'Single'
+ service_status_set('active', '{} node cluster'.format(n))
+
+
+def update_hosts_file(hosts_file, hosts_map):
+ """Older versions of Cassandra need own hostname resolution."""
+ with open(hosts_file, 'r') as hosts:
+ lines = hosts.readlines()
+
+ newlines = []
+ for ip, hostname in hosts_map.items():
+ if not ip or not hostname:
+ continue
+
+ keepers = []
+ for line in lines:
+ _line = line.split()
+ if len(_line) < 2 or not (_line[0] == ip or hostname in _line[1:]):
+ keepers.append(line)
+ else:
+ hookenv.log('Marking line {!r} for update or removal'
+ ''.format(line.strip()), level=DEBUG)
+
+ lines = keepers
+ newlines.append('{} {}\n'.format(ip, hostname))
+
+ lines += newlines
+
+ with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
+ with open(tmpfile.name, 'w') as hosts:
+ for line in lines:
+ hosts.write(line)
+
+ os.rename(tmpfile.name, hosts_file)
+ os.chmod(hosts_file, 0o644)
diff --git a/charms/trusty/cassandra/hooks/hooks.py b/charms/trusty/cassandra/hooks/hooks.py
new file mode 100644
index 0000000..e5b64ed
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/hooks.py
@@ -0,0 +1,61 @@
+#!/usr/bin/python3
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from charmhelpers import fetch
+from charmhelpers.core import hookenv
+
+
+def set_proxy():
+ import os
+ config = hookenv.config()
+ if config['http_proxy']:
+ os.environ['ftp_proxy'] = config['http_proxy']
+ os.environ['http_proxy'] = config['http_proxy']
+ os.environ['https_proxy'] = config['http_proxy']
+
+
+def bootstrap():
+ try:
+ import bcrypt # NOQA: flake8
+ import cassandra # NOQA: flake8
+ except ImportError:
+ packages = ['python3-bcrypt', 'python3-cassandra']
+ set_proxy()
+ fetch.configure_sources(update=True)
+ fetch.apt_install(packages, fatal=True)
+ import bcrypt # NOQA: flake8
+ import cassandra # NOQA: flake8
+
+
+def default_hook():
+ if not hookenv.has_juju_version('1.24'):
+ hookenv.status_set('blocked', 'Requires Juju 1.24 or higher')
+ # Error state, since we don't have 1.24 to give a nice blocked state.
+ raise SystemExit(1)
+
+ # These need to be imported after bootstrap() or required Python
+ # packages may not have been installed.
+ import definitions
+
+ # Only useful for debugging, or perhaps have this enabled with a config
+ # option?
+ # from loglog import loglog
+ # loglog('/var/log/cassandra/system.log', prefix='C*: ')
+
+ hookenv.log('*** {} Hook Start'.format(hookenv.hook_name()))
+ sm = definitions.get_service_manager()
+ sm.manage()
+ hookenv.log('*** {} Hook Done'.format(hookenv.hook_name()))
diff --git a/charms/trusty/cassandra/hooks/install b/charms/trusty/cassandra/hooks/install
new file mode 100755
index 0000000..9128cab
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/install
@@ -0,0 +1,20 @@
+#!/usr/bin/python3
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import hooks
+if __name__ == '__main__':
+ hooks.bootstrap()
+ hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/leader-elected b/charms/trusty/cassandra/hooks/leader-elected
new file mode 100755
index 0000000..9128cab
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/leader-elected
@@ -0,0 +1,20 @@
+#!/usr/bin/python3
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import hooks
+if __name__ == '__main__':
+ hooks.bootstrap()
+ hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/leader-settings-changed b/charms/trusty/cassandra/hooks/leader-settings-changed
new file mode 100755
index 0000000..9128cab
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/leader-settings-changed
@@ -0,0 +1,20 @@
+#!/usr/bin/python3
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import hooks
+if __name__ == '__main__':
+ hooks.bootstrap()
+ hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/loglog.py b/charms/trusty/cassandra/hooks/loglog.py
new file mode 100644
index 0000000..33f3af8
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/loglog.py
@@ -0,0 +1,42 @@
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import atexit
+import subprocess
+import threading
+import time
+
+from charmhelpers.core import hookenv
+
+
+def loglog(filename, prefix='', level=hookenv.DEBUG):
+ '''Mirror an arbitrary log file to the Juju hook log in the background.'''
+ tailproc = subprocess.Popen(['tail', '-F', filename],
+ stdout=subprocess.PIPE,
+ universal_newlines=True)
+ atexit.register(tailproc.terminate)
+
+ def loglog_t(tailproc=tailproc):
+ while True:
+ line = tailproc.stdout.readline()
+ if line:
+ hookenv.log('{}{}'.format(prefix, line), level)
+ else:
+ time.sleep(0.1)
+ continue
+
+ t = threading.Thread(target=loglog_t, daemon=True)
+ t.start()
diff --git a/charms/trusty/cassandra/hooks/nrpe-external-master-relation-changed b/charms/trusty/cassandra/hooks/nrpe-external-master-relation-changed
new file mode 100755
index 0000000..9128cab
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/nrpe-external-master-relation-changed
@@ -0,0 +1,20 @@
+#!/usr/bin/python3
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import hooks
+if __name__ == '__main__':
+ hooks.bootstrap()
+ hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/relations.py b/charms/trusty/cassandra/hooks/relations.py
new file mode 100644
index 0000000..f7870a1
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/relations.py
@@ -0,0 +1,139 @@
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os.path
+
+import yaml
+
+from charmhelpers.core import hookenv, host
+from charmhelpers.core.hookenv import log, WARNING
+from charmhelpers.core.services.helpers import RelationContext
+
+from coordinator import coordinator
+
+
+class PeerRelation(RelationContext):
+ interface = 'cassandra-cluster'
+ name = 'cluster'
+
+ def is_ready(self):
+ # All units except the leader need to wait until the peer
+ # relation is available.
+ if coordinator.relid is not None or hookenv.is_leader():
+ return True
+ return False
+
+
+# FOR CHARMHELPERS (if we can integrate Juju 1.24 storage too)
+class StorageRelation(RelationContext):
+ '''Wait for the block storage mount to become available.
+
+ Charms using this should add a 'wait_for_storage_broker' boolean
+ configuration option in their config.yaml file. This is necessary
+ to avoid potential data loss race conditions, because otherwise a
+ unit will be started up using local disk before it becomes aware
+ that it should be using external storage.
+
+ 'relname' is the relation name.
+
+ 'mountpount' is the mountpoint. Use the default if you have a single
+ block storage broker relation. The default is calculated to avoid
+ configs using the unit name (/srv/${service}_${unitnumber}).
+ '''
+ interface = 'block-storage'
+ mountpoint = None
+
+ def __init__(self, name=None, mountpoint=None):
+ if name is None:
+ name = self._get_relation_name()
+ super(StorageRelation, self).__init__(name)
+
+ if mountpoint is None:
+ mountpoint = os.path.join('/srv/',
+ hookenv.local_unit().replace('/', '_'))
+ self._requested_mountpoint = mountpoint
+
+ if len(self.get('data', [])) == 0:
+ self.mountpoint = None
+ elif mountpoint == self['data'][0].get('mountpoint', None):
+ self.mountpoint = mountpoint
+ else:
+ self.mountpoint = None
+
+ def _get_relation_name(self):
+ with open(os.path.join(hookenv.charm_dir(),
+ 'metadata.yaml'), 'r') as mdf:
+ md = yaml.safe_load(mdf)
+ for section in ['requires', 'provides']:
+ for relname in md.get(section, {}).keys():
+ if md[section][relname]['interface'] == 'block-storage':
+ return relname
+ raise LookupError('No block-storage relation defined')
+
+ def is_ready(self):
+ if hookenv.config('wait_for_storage_broker'):
+ if self.mountpoint:
+ log("External storage mounted at {}".format(self.mountpoint))
+ return True
+ else:
+ log("Waiting for block storage broker to mount {}".format(
+ self._requested_mountpoint), WARNING)
+ return False
+ return True
+
+ def provide_data(self, remote_service, service_ready):
+ hookenv.log('Requesting mountpoint {} from {}'
+ .format(self._requested_mountpoint, remote_service))
+ return dict(mountpoint=self._requested_mountpoint)
+
+ def needs_remount(self):
+ config = hookenv.config()
+ return config.get('live_mountpoint') != self.mountpoint
+
+ def migrate(self, src_dir, subdir):
+ assert self.needs_remount()
+ assert subdir, 'Can only migrate to a subdirectory on a mount'
+
+ config = hookenv.config()
+ config['live_mountpoint'] = self.mountpoint
+
+ if self.mountpoint is None:
+ hookenv.log('External storage AND DATA gone.'
+ 'Reverting to original local storage', WARNING)
+ return
+
+ dst_dir = os.path.join(self.mountpoint, subdir)
+ if os.path.exists(dst_dir):
+ hookenv.log('{} already exists. Not migrating data.'.format(
+ dst_dir))
+ return
+
+ # We are migrating the contents of src_dir, so we want a
+ # trailing slash to ensure rsync's behavior.
+ if not src_dir.endswith('/'):
+ src_dir += '/'
+
+ # We don't migrate data directly into the new destination,
+ # which allows us to detect a failed migration and recover.
+ tmp_dst_dir = dst_dir + '.migrating'
+ hookenv.log('Migrating data from {} to {}'.format(
+ src_dir, tmp_dst_dir))
+ host.rsync(src_dir, tmp_dst_dir, flags='-av')
+
+ hookenv.log('Moving {} to {}'.format(tmp_dst_dir, dst_dir))
+ os.rename(tmp_dst_dir, dst_dir)
+
+ assert not self.needs_remount()
diff --git a/charms/trusty/cassandra/hooks/stop b/charms/trusty/cassandra/hooks/stop
new file mode 100755
index 0000000..9128cab
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/stop
@@ -0,0 +1,20 @@
+#!/usr/bin/python3
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import hooks
+if __name__ == '__main__':
+ hooks.bootstrap()
+ hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/upgrade-charm b/charms/trusty/cassandra/hooks/upgrade-charm
new file mode 100755
index 0000000..9128cab
--- /dev/null
+++ b/charms/trusty/cassandra/hooks/upgrade-charm
@@ -0,0 +1,20 @@
+#!/usr/bin/python3
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import hooks
+if __name__ == '__main__':
+ hooks.bootstrap()
+ hooks.default_hook()
diff --git a/charms/trusty/cassandra/icon.svg b/charms/trusty/cassandra/icon.svg
new file mode 100644
index 0000000..7615021
--- /dev/null
+++ b/charms/trusty/cassandra/icon.svg
@@ -0,0 +1,650 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="96"
+ height="96"
+ id="svg6517"
+ version="1.1"
+ inkscape:version="0.48+devel r12304"
+ sodipodi:docname="cassandra01.svg">
+ <defs
+ id="defs6519">
+ <linearGradient
+ inkscape:collect="always"
+ id="linearGradient1105">
+ <stop
+ style="stop-color:#4ba6bd;stop-opacity:1"
+ offset="0"
+ id="stop1107" />
+ <stop
+ style="stop-color:#94ccda;stop-opacity:1"
+ offset="1"
+ id="stop1109" />
+ </linearGradient>
+ <linearGradient
+ id="Background">
+ <stop
+ id="stop4178"
+ offset="0"
+ style="stop-color:#d3effc;stop-opacity:1" />
+ <stop
+ id="stop4180"
+ offset="1"
+ style="stop-color:#e8f3f8;stop-opacity:1" />
+ </linearGradient>
+ <filter
+ style="color-interpolation-filters:sRGB;"
+ inkscape:label="Inner Shadow"
+ id="filter1121">
+ <feFlood
+ flood-opacity="0.59999999999999998"
+ flood-color="rgb(0,0,0)"
+ result="flood"
+ id="feFlood1123" />
+ <feComposite
+ in="flood"
+ in2="SourceGraphic"
+ operator="out"
+ result="composite1"
+ id="feComposite1125" />
+ <feGaussianBlur
+ in="composite1"
+ stdDeviation="1"
+ result="blur"
+ id="feGaussianBlur1127" />
+ <feOffset
+ dx="0"
+ dy="2"
+ result="offset"
+ id="feOffset1129" />
+ <feComposite
+ in="offset"
+ in2="SourceGraphic"
+ operator="atop"
+ result="composite2"
+ id="feComposite1131" />
+ </filter>
+ <filter
+ style="color-interpolation-filters:sRGB;"
+ inkscape:label="Drop Shadow"
+ id="filter950">
+ <feFlood
+ flood-opacity="0.25"
+ flood-color="rgb(0,0,0)"
+ result="flood"
+ id="feFlood952" />
+ <feComposite
+ in="flood"
+ in2="SourceGraphic"
+ operator="in"
+ result="composite1"
+ id="feComposite954" />
+ <feGaussianBlur
+ in="composite1"
+ stdDeviation="1"
+ result="blur"
+ id="feGaussianBlur956" />
+ <feOffset
+ dx="0"
+ dy="1"
+ result="offset"
+ id="feOffset958" />
+ <feComposite
+ in="SourceGraphic"
+ in2="offset"
+ operator="over"
+ result="composite2"
+ id="feComposite960" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath873">
+ <g
+ transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)"
+ id="g875"
+ inkscape:label="Layer 1"
+ style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline">
+ <path
+ style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
+ d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
+ id="path877"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="sssssssss" />
+ </g>
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter891"
+ inkscape:label="Badge Shadow">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.71999962"
+ id="feGaussianBlur893" />
+ </filter>
+ <clipPath
+ id="clipPath2832">
+ <path
+ id="path2834"
+ d="M 0,792 612,792 612,0 0,0 0,792 z" />
+ </clipPath>
+ <clipPath
+ id="clipPath2844">
+ <path
+ id="path2846"
+ d="M 0,792 612,792 612,0 0,0 0,792 z" />
+ </clipPath>
+ <clipPath
+ id="clipPath2852">
+ <path
+ id="path2854"
+ d="m 96.0078,715.93 88.2902,0 0,-62.176 -88.2902,0 0,62.176 z" />
+ </clipPath>
+ <clipPath
+ id="clipPath2868">
+ <path
+ id="path2870"
+ d="M 0,792 612,792 612,0 0,0 0,792 z" />
+ </clipPath>
+ <clipPath
+ id="clipPath2880">
+ <path
+ id="path2882"
+ d="M 0,792 612,792 612,0 0,0 0,792 z" />
+ </clipPath>
+ <clipPath
+ id="clipPath2908">
+ <path
+ id="path2910"
+ d="M 0,792 612,792 612,0 0,0 0,792 z" />
+ </clipPath>
+ <clipPath
+ id="clipPath2936">
+ <path
+ id="path2938"
+ d="M 0,792 612,792 612,0 0,0 0,792 z" />
+ </clipPath>
+ <clipPath
+ id="clipPath2944">
+ <path
+ id="path2946"
+ d="m 121.202,708.378 45.899,0 0,-45.859 -45.899,0 0,45.859 z" />
+ </clipPath>
+ <clipPath
+ id="clipPath2960">
+ <path
+ id="path2962"
+ d="M 0,792 612,792 612,0 0,0 0,792 z" />
+ </clipPath>
+ <clipPath
+ id="clipPath2968">
+ <path
+ id="path2970"
+ d="m 40.4033,726.188 212.4017,0 0,-61.818 -212.4017,0 0,61.818 z" />
+ </clipPath>
+ <clipPath
+ id="clipPath2988">
+ <path
+ id="path2990"
+ d="M 0,792 612,792 612,0 0,0 0,792 z" />
+ </clipPath>
+ <clipPath
+ id="clipPath2996">
+ <path
+ id="path2998"
+ d="m 39.5195,688.644 199.3805,0 0,-73.818 -199.3805,0 0,73.818 z" />
+ </clipPath>
+ <clipPath
+ id="clipPath3016">
+ <path
+ id="path3018"
+ d="M 0,792 612,792 612,0 0,0 0,792 z" />
+ </clipPath>
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#linearGradient1105"
+ id="linearGradient1111"
+ x1="-220"
+ y1="731.29077"
+ x2="-220"
+ y2="635.29077"
+ gradientUnits="userSpaceOnUse" />
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="3.259629"
+ inkscape:cx="51.812246"
+ inkscape:cy="27.005007"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer1"
+ showgrid="true"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:window-width="1920"
+ inkscape:window-height="1029"
+ inkscape:window-x="0"
+ inkscape:window-y="24"
+ inkscape:window-maximized="1"
+ showborder="true"
+ showguides="true"
+ inkscape:guide-bbox="true"
+ inkscape:showpageshadow="false">
+ <inkscape:grid
+ type="xygrid"
+ id="grid821" />
+ <sodipodi:guide
+ orientation="1,0"
+ position="16,48"
+ id="guide823" />
+ <sodipodi:guide
+ orientation="0,1"
+ position="64,80"
+ id="guide825" />
+ <sodipodi:guide
+ orientation="1,0"
+ position="80,40"
+ id="guide827" />
+ <sodipodi:guide
+ orientation="0,1"
+ position="64,16"
+ id="guide829" />
+ </sodipodi:namedview>
+ <metadata
+ id="metadata6522">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="BACKGROUND"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(268,-635.29076)"
+ style="display:inline">
+ <path
+ style="fill:url(#linearGradient1111);fill-opacity:1;stroke:none;display:inline;filter:url(#filter1121)"
+ d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 C -264.11215,731.29077 -268,727.39888 -268,700.15563 Z"
+ id="path6455"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="sssssssss" />
+ <g
+ id="g1012"
+ transform="matrix(0.31314985,0,0,0.31314985,-263.71323,659.42827)">
+ <g
+ style="display:inline"
+ id="layer2-4"
+ transform="translate(-62.668647,-74.06425)">
+ <g
+ id="g4555"
+ transform="matrix(1.25,0,0,-1.25,19.117647,990)">
+ <g
+ id="g2828">
+ <g
+ id="g2830"
+ clip-path="url(#clipPath2832)">
+ <g
+ id="g2836"
+ transform="translate(210.8784,690.4834)">
+ <path
+ style="fill:#bbe6fb;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="path2838"
+ d="m 0,0 c 1.584,-18.452 -27.455,-36.014 -64.859,-39.223 -37.404,-3.209 -69.01,9.151 -70.592,27.602 -1.584,18.455 27.455,36.016 64.859,39.225 C -33.188,30.812 -1.582,18.455 0,0"
+ inkscape:connector-curvature="0" />
+ </g>
+ </g>
+ </g>
+ <g
+ id="g2840">
+ <g
+ id="g2842"
+ clip-path="url(#clipPath2844)">
+ <g
+ id="g2848">
+ <g
+ id="g2850" />
+ <g
+ id="g2856">
+ <g
+ style="opacity:0.35000604"
+ id="g2858"
+ clip-path="url(#clipPath2852)">
+ <g
+ id="g2860"
+ transform="translate(141.3843,715.9233)">
+ <path
+ style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="path2862"
+ d="m 0,0 c -14.268,0.232 -30.964,-5.433 -43.387,-10.738 -1.293,-3.726 -1.989,-7.689 -1.989,-11.797 0,-21.888 19.764,-39.634 44.145,-39.634 24.381,0 44.145,17.746 44.145,39.634 0,6.927 -1.984,13.435 -5.463,19.101 C 27.512,-1.889 13.842,-0.225 0,0"
+ inkscape:connector-curvature="0" />
+ </g>
+ </g>
+ </g>
+ </g>
+ </g>
+ </g>
+ <g
+ id="g2864">
+ <g
+ id="g2866"
+ clip-path="url(#clipPath2868)">
+ <g
+ id="g2872"
+ transform="translate(140.1528,715.9277)">
+ <path
+ style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="path2874"
+ d="m 0,0 c -7.899,0.482 -21.514,-3.639 -32.867,-7.75 -1.725,-4.071 -2.683,-8.526 -2.683,-13.201 0,-19.178 17.388,-34.725 35.782,-34.725 18.273,0 34.44,15.572 35.782,34.725 0.436,6.237 -1.711,12.114 -4.692,17.181 C 19.552,-1.697 7.061,-0.431 0,0"
+ inkscape:connector-curvature="0" />
+ </g>
+ </g>
+ </g>
+ <g
+ id="g2876">
+ <g
+ id="g2878"
+ clip-path="url(#clipPath2880)">
+ <g
+ id="g2884"
+ transform="translate(119.8818,697.4946)">
+ <path
+ style="fill:#373535;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="path2886"
+ d="M 0,0 C 0.969,2.146 2.437,3.197 3.859,4.996 3.701,5.422 3.355,6.815 3.355,7.298 c 0,2.156 1.749,3.906 3.906,3.906 0.509,0 0.995,-0.101 1.44,-0.278 6.465,4.927 14.976,7.075 23.529,5.163 0.781,-0.176 1.547,-0.389 2.299,-0.623 C 26.076,16.638 16.548,13.644 10.067,8.413 10.265,7.946 10.814,6.611 10.814,6.074 10.814,3.917 9.418,3.392 7.261,3.392 6.771,3.392 6.303,3.486 5.87,3.651 4.406,1.685 2.612,-2.06 1.734,-4.401 c 3.584,-3.206 6.822,-4.368 11.042,-5.945 -0.011,0.201 0.145,0.387 0.145,0.592 0,6.503 5.725,11.788 12.229,11.788 5.828,0 10.654,-4.238 11.596,-9.798 2.908,1.85 5.72,3.268 7.863,6.01 -0.5,0.61 -1.039,2.337 -1.039,3.187 0,1.957 1.588,3.544 3.545,3.544 0.277,0 0.543,-0.04 0.802,-0.1 1.088,2.236 1.909,4.606 2.434,7.05 -10.17,7.529 -29.847,6.502 -29.847,6.502 0,0 -15.658,0.817 -26.258,-4.349 C -5.047,8.969 -3.008,4.11 0,0"
+ inkscape:connector-curvature="0" />
+ </g>
+ <g
+ id="g2888"
+ transform="translate(168.4907,700.4282)">
+ <path
+ style="fill:#373535;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="path2890"
+ d="m 0,0 c 0.719,-0.648 1.111,-1.217 1.42,-1.771 0.951,-1.71 -0.957,-3.275 -2.914,-3.275 -0.199,0 -0.391,0.027 -0.582,0.059 -2.205,-3.446 -6.067,-7.865 -9.498,-10.089 5.261,-0.862 10.222,-2.969 14.17,-6.225 2.875,5.151 5.08,12.589 5.08,18.907 0,4.809 -2.123,8.334 -5.328,10.92 C 2.18,5.95 0.805,2.347 0,0"
+ inkscape:connector-curvature="0" />
+ </g>
+ <g
+ id="g2892"
+ transform="translate(125.7842,667.8032)">
+ <path
+ style="fill:#373535;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="path2894"
+ d="M 0,0 C 1.753,4.841 6.065,8.592 10.144,11.892 9.547,12.709 8.652,14.732 8.279,15.69 3.304,17.203 -1.098,20.035 -4.512,23.784 -4.537,23.675 -4.568,23.569 -4.594,23.46 -5.237,20.579 -5.355,17.692 -5.035,14.876 -2.653,14.432 -0.85,12.345 -0.85,9.834 -0.85,8.345 -2.155,6.187 -3.168,5.248 -2.067,2.872 -1.316,1.726 0,0"
+ inkscape:connector-curvature="0" />
+ </g>
+ <g
+ id="g2896"
+ transform="translate(125.4756,663.7393)">
+ <path
+ style="fill:#373535;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="path2898"
+ d="m 0,0 c -2.091,2.079 -3.537,6.226 -4.894,8.83 -0.254,-0.039 -0.514,-0.066 -0.78,-0.066 -2.836,0 -5.807,2.38 -5.135,5.134 0.372,1.524 1.424,2.521 3.137,3.353 -0.39,3.157 -0.496,7.695 0.237,10.977 0.21,0.939 0.655,1.379 0.95,2.273 -3.129,4.579 -5.151,10.589 -5.151,16.552 0,0.218 0.011,0.433 0.016,0.649 -5.288,-2.652 -9.253,-6.83 -9.253,-13.407 0,-14.548 8.379,-28.819 20.846,-34.413 C -0.018,-0.079 -0.01,-0.039 0,0"
+ inkscape:connector-curvature="0" />
+ </g>
+ <g
+ id="g2900"
+ transform="translate(156.1313,683.8511)">
+ <path
+ style="fill:#373535;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="path2902"
+ d="m 0,0 c -1.611,-4.582 -5.967,-7.873 -11.1,-7.873 -2.746,0 -5.265,0.947 -7.267,2.521 -4.127,-3.214 -7.871,-8.86 -9.774,-13.758 0.854,-0.919 1.449,-1.675 2.407,-2.49 2.887,-0.752 6.863,0 9.988,0 12.57,0 23.703,5.592 30.086,15.398 C 10.096,-3.263 5.09,-0.466 0,0"
+ inkscape:connector-curvature="0" />
+ </g>
+ </g>
+ </g>
+ <g
+ id="g2904">
+ <g
+ id="g2906"
+ clip-path="url(#clipPath2908)">
+ <g
+ id="g2912"
+ transform="translate(119.5596,695.7944)">
+ <path
+ style="fill:#1287b1;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="path2914"
+ d="m 0,0 c 0.969,2.146 2.184,4.132 3.605,5.931 -0.158,0.425 -0.25,0.884 -0.25,1.367 0,2.156 1.749,3.906 3.906,3.906 0.509,0 0.995,-0.101 1.44,-0.278 6.465,4.927 14.976,7.075 23.529,5.163 0.781,-0.176 1.547,-0.389 2.299,-0.623 -8.453,1.172 -17.187,-1.419 -23.668,-6.651 0.198,-0.466 0.306,-0.98 0.306,-1.517 0,-2.157 -1.749,-3.906 -3.906,-3.906 -0.49,0 -0.958,0.094 -1.391,0.259 -1.464,-1.966 -2.661,-4.138 -3.539,-6.48 3.078,-3.317 6.856,-5.94 11.075,-7.517 -0.01,0.201 -0.031,0.4 -0.031,0.605 0,6.503 5.271,11.775 11.775,11.775 5.828,0 10.654,-4.238 11.596,-9.798 2.908,1.85 5.492,4.226 7.634,6.968 -0.5,0.61 -0.81,1.379 -0.81,2.229 0,1.957 1.588,3.544 3.545,3.544 0.277,0 0.543,-0.04 0.802,-0.1 1.088,2.236 1.909,4.606 2.434,7.05 -10.17,7.529 -29.847,6.502 -29.847,6.502 0,0 -15.658,0.817 -26.258,-4.349 C -5.047,8.969 -3.008,4.11 0,0"
+ inkscape:connector-curvature="0" />
+ </g>
+ <g
+ id="g2916"
+ transform="translate(169.0396,699.8481)">
+ <path
+ style="fill:#1287b1;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="path2918"
+ d="m 0,0 c 0.719,-0.648 1.18,-1.577 1.18,-2.621 0,-1.957 -1.588,-3.545 -3.545,-3.545 -0.199,0 -0.391,0.027 -0.582,0.059 -2.205,-3.446 -5.127,-6.384 -8.559,-8.608 5.072,-0.793 9.846,-2.945 13.793,-6.201 2.875,5.151 4.518,11.084 4.518,17.402 0,4.809 -2.123,8.334 -5.328,10.92 C 1.309,4.83 0.805,2.347 0,0"
+ inkscape:connector-curvature="0" />
+ </g>
+ <g
+ id="g2920"
+ transform="translate(126.3252,666.6401)">
+ <path
+ style="fill:#1287b1;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="path2922"
+ d="M 0,0 C 1.753,4.841 4.799,9.185 8.878,12.484 8.281,13.302 7.789,14.195 7.416,15.153 2.44,16.666 -1.961,19.498 -5.375,23.247 -5.4,23.138 -5.432,23.032 -5.457,22.923 -6.101,20.042 -6.219,17.155 -5.898,14.339 -3.517,13.895 -1.713,11.808 -1.713,9.297 -1.713,7.808 -2.352,6.469 -3.365,5.53 -2.446,3.582 -1.316,1.726 0,0"
+ inkscape:connector-curvature="0" />
+ </g>
+ <g
+ id="g2924"
+ transform="translate(125.4619,663.7983)">
+ <path
+ style="fill:#1287b1;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="path2926"
+ d="m 0,0 c -2.091,2.079 -3.846,4.467 -5.202,7.07 -0.255,-0.039 -0.515,-0.065 -0.78,-0.065 -2.836,0 -5.135,2.299 -5.135,5.134 0,2.032 1.184,3.784 2.897,4.616 -0.389,3.156 -0.257,6.432 0.477,9.714 0.21,0.938 0.466,1.854 0.761,2.749 -3.129,4.578 -4.962,10.113 -4.962,16.076 0,0.218 0.01,0.433 0.015,0.648 -5.288,-2.651 -9.253,-6.83 -9.253,-13.406 0,-14.549 8.688,-27.06 21.155,-32.654 C -0.018,-0.079 -0.01,-0.039 0,0"
+ inkscape:connector-curvature="0" />
+ </g>
+ <g
+ id="g2928"
+ transform="translate(155.8091,682.1509)">
+ <path
+ style="fill:#1287b1;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="path2930"
+ d="m 0,0 c -1.611,-4.582 -5.967,-7.873 -11.1,-7.873 -2.746,0 -5.265,0.947 -7.267,2.521 -4.127,-3.214 -7.242,-7.595 -9.144,-12.494 0.853,-0.919 1.765,-1.785 2.723,-2.599 2.888,-0.752 5.917,-1.155 9.042,-1.155 12.57,0 23.621,6.49 30.004,16.295 C 10.014,-2.365 5.09,-0.466 0,0"
+ inkscape:connector-curvature="0" />
+ </g>
+ </g>
+ </g>
+ <g
+ id="g2932">
+ <g
+ id="g2934"
+ clip-path="url(#clipPath2936)">
+ <g
+ id="g2940">
+ <g
+ id="g2942" />
+ <g
+ id="g2948">
+ <g
+ id="g2950"
+ clip-path="url(#clipPath2944)">
+ <g
+ id="g2952"
+ transform="translate(156.2222,685.187)">
+ <path
+ style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="path2954"
+ d="M 0,0 10.879,2.595 -0.041,3.152 8.846,9.944 -1.238,6.329 5.615,15.826 -3.85,9.535 l 3.309,11.117 -6.5,-9.163 -0.148,11.579 -4.277,-10.314 -3.566,10.437 0.193,-12.295 -6.163,11.021 3.335,-11.702 -9.997,7.27 7.831,-9.84 -12.411,4.564 9.795,-7.247 -12.56,-0.386 12.842,-3.314 -12.853,-2.779 12.687,-0.92 -10.699,-6.851 11.017,3.994 -7.644,-9.681 9.659,7.79 -3.478,-12.991 7.457,10.572 -1.045,-12.486 4.233,11.319 3.603,-11.897 0.876,11.933 5.348,-10.181 -3.16,11.645 9.793,-7.586 -6.322,9.672 10.744,-4.186 -8.215,8.073 L 10.85,-4.164 0,0 Z"
+ inkscape:connector-curvature="0" />
+ </g>
+ </g>
+ </g>
+ </g>
+ </g>
+ </g>
+ <g
+ id="g2956">
+ <g
+ id="g2958"
+ clip-path="url(#clipPath2960)">
+ <g
+ id="g2964">
+ <g
+ id="g2966" />
+ <g
+ id="g2972">
+ <g
+ style="opacity:0.35000604"
+ id="g2974"
+ clip-path="url(#clipPath2968)">
+ <g
+ id="g2976"
+ transform="translate(40.4033,664.3701)">
+ <path
+ style="fill:#373535;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="path2978"
+ d="m 0,0 c 33.74,33.739 60.687,44.155 85.143,48.91 3.236,0.629 3.848,7.7 3.848,7.7 0,0 0.453,-5.208 2.718,-5.887 2.264,-0.68 5.207,8.152 5.207,8.152 0,0 -2.717,-7.926 0,-8.379 2.718,-0.453 7.699,7.699 7.699,7.699 0,0 -2.037,-7.019 -0.678,-7.472 1.357,-0.453 8.15,10.189 8.15,10.189 0,0 -4.076,-7.019 -0.226,-7.699 3.851,-0.679 9.467,4.791 9.467,4.791 0,0 -4.416,-5.005 -2.448,-5.696 8.379,-2.945 15.159,7.945 15.159,7.945 0,0 -1.571,-4.775 -5.647,-9.983 8.83,-2.264 15.389,11.039 15.389,11.039 l -6.559,-13.303 c 3.397,-1.813 16.985,13.812 16.985,13.812 0,0 -7.02,-12.228 -11.096,-14.718 2.264,-1.812 10.416,5.434 10.416,5.434 0,0 -6.567,-8.151 -4.076,-8.604 3.623,-2.944 16.982,15.171 16.982,15.171 0,0 -5.207,-10.642 -12.906,-19.021 6.435,-3.219 22.418,17.436 22.418,17.436 0,0 -0.453,-6.567 -12.002,-16.983 8.605,1.132 19.701,17.436 19.701,17.436 0,0 -4.076,-12.228 -13.814,-20.832 8.449,0.879 21.964,21.738 21.964,21.738 0,0 -5.207,-14.492 -15.849,-22.871 11.775,-2.604 28.758,14.945 28.758,14.945 0,0 -6.68,-12.455 -15.399,-17.549 9.738,-3.736 23.098,11.662 23.098,11.662 0,0 -13.36,-20.607 -34.645,-19.701 -6.984,0.297 -28.109,21.188 -73.368,19.474 C 44.609,42.57 31.929,17.209 0,0"
+ inkscape:connector-curvature="0" />
+ </g>
+ </g>
+ </g>
+ </g>
+ <g
+ id="g2980"
+ transform="translate(41.7861,666.9326)">
+ <path
+ style="fill:#373535;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="path2982"
+ d="m 0,0 c 33.74,33.739 60.686,44.154 85.142,48.91 3.237,0.629 3.849,7.699 3.849,7.699 0,0 0.452,-5.209 2.718,-5.887 2.264,-0.679 5.207,8.151 5.207,8.151 0,0 -2.717,-7.926 0,-8.378 2.718,-0.452 7.699,7.699 7.699,7.699 0,0 -2.037,-7.019 -0.68,-7.472 1.359,-0.453 8.152,10.19 8.152,10.19 0,0 -4.076,-7.02 -0.226,-7.699 3.849,-0.68 9.467,4.79 9.467,4.79 0,0 -4.416,-5.005 -2.448,-5.696 8.379,-2.944 15.157,7.945 15.157,7.945 0,0 -1.571,-4.775 -5.645,-9.983 8.83,-2.265 15.389,11.04 15.389,11.04 l -6.559,-13.305 c 3.397,-1.811 16.983,13.812 16.983,13.812 0,0 -7.018,-12.226 -11.094,-14.717 2.264,-1.812 10.416,5.434 10.416,5.434 0,0 -6.567,-8.152 -4.076,-8.604 3.623,-2.945 16.982,15.171 16.982,15.171 0,0 -5.209,-10.643 -12.906,-19.021 6.435,-3.22 22.418,17.436 22.418,17.436 0,0 -0.453,-6.568 -12.002,-16.984 8.605,1.133 19.701,17.437 19.701,17.437 0,0 -4.076,-12.228 -13.814,-20.833 8.449,0.879 21.964,21.738 21.964,21.738 0,0 -5.207,-14.492 -15.849,-22.87 11.775,-2.604 28.758,14.944 28.758,14.944 0,0 -6.68,-12.453 -15.399,-17.548 9.738,-3.736 23.098,11.662 23.098,11.662 0,0 -13.36,-20.607 -34.647,-19.701 -6.982,0.298 -28.107,21.189 -73.367,19.474 C 44.609,42.57 31.928,17.209 0,0"
+ inkscape:connector-curvature="0" />
+ </g>
+ </g>
+ </g>
+ <g
+ id="g2984">
+ <g
+ id="g2986"
+ clip-path="url(#clipPath2988)">
+ <g
+ id="g2992">
+ <g
+ id="g2994" />
+ <g
+ id="g3000">
+ <g
+ style="opacity:0.35000604"
+ id="g3002"
+ clip-path="url(#clipPath2996)">
+ <g
+ id="g3004"
+ transform="translate(39.5195,660.6802)">
+ <path
+ style="fill:#373535;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="path3006"
+ d="m 0,0 c 17.832,-8.945 34.137,1.358 54.686,-4.433 15.623,-4.404 34.645,-9.833 60.458,-6.096 25.814,3.735 47.893,14.944 58.424,34.985 3.283,8.943 16.642,-2.039 16.642,-2.039 0,0 -9.736,4.076 -9.509,2.151 0.226,-1.924 14.605,-8.604 14.605,-8.604 0,0 -13.021,4.076 -12.228,1.019 0.793,-3.057 16.302,-15.285 16.302,-15.285 0,0 -17.548,13.36 -19.019,11.549 -1.473,-1.812 7.472,-9.172 7.472,-9.172 0,0 -14.832,9.172 -20.041,6.467 -3.746,-1.943 15.399,-14.506 15.399,-14.506 0,0 -12.455,9.512 -15.399,7.021 -2.943,-2.492 14.04,-22.871 14.04,-22.871 0,0 -19.249,20.833 -21.172,19.814 -1.926,-1.019 5.32,-10.983 5.32,-10.983 0,0 -9.51,10.417 -12.113,8.605 -2.604,-1.812 13.586,-28.871 13.586,-28.871 0,0 -17.549,27.738 -24.795,23.098 11.379,-24.966 7.133,-28.533 7.133,-28.533 0,0 -1.452,25.47 -15.625,24.796 -7.133,-0.34 3.396,-19.021 3.396,-19.021 0,0 -9.691,17.062 -16.145,16.722 11.895,-22.511 7.655,-31.667 7.655,-31.667 0,0 1.967,19.226 -14.166,29.925 6.113,-5.433 -3.836,-29.925 -3.836,-29.925 0,0 8.752,36.091 -6.455,29.21 -2.403,-1.085 -0.17,-18.002 -0.17,-18.002 0,0 -3.057,19.362 -7.641,18.342 -2.673,-0.593 -16.984,-26.833 -16.984,-26.833 0,0 11.719,28.362 8.153,27.173 -2.598,-0.867 -7.473,-12.568 -7.473,-12.568 0,0 2.377,11.549 0,12.228 -2.377,0.68 -15.625,-12.228 -15.625,-12.228 0,0 9.851,11.549 8.152,13.927 -2.574,3.603 -5.591,3.772 -9.171,2.377 -5.209,-2.03 -12.227,-11.548 -12.227,-11.548 0,0 6.996,9.637 5.773,13.247 -1.963,5.8 -22.077,-11.209 -22.077,-11.209 0,0 11.888,11.209 9.171,13.587 -2.717,2.377 -17.471,1.642 -22.078,1.655 C 8.832,-6.454 4.124,-3.267 0,0"
+ inkscape:connector-curvature="0" />
+ </g>
+ </g>
+ </g>
+ </g>
+ <g
+ id="g3008"
+ transform="translate(38.8408,662.7183)">
+ <path
+ style="fill:#373535;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="path3010"
+ d="m 0,0 c 17.832,-8.945 34.136,1.358 54.685,-4.434 15.623,-4.402 34.646,-9.832 60.46,-6.095 25.814,3.736 47.891,14.945 58.422,34.984 3.283,8.944 16.642,-2.037 16.642,-2.037 0,0 -9.736,4.075 -9.509,2.15 0.226,-1.924 14.605,-8.604 14.605,-8.604 0,0 -13.021,4.075 -12.228,1.018 0.793,-3.056 16.304,-15.284 16.304,-15.284 0,0 -17.55,13.361 -19.021,11.548 -1.471,-1.811 7.473,-9.17 7.473,-9.17 0,0 -14.833,9.17 -20.041,6.467 -3.747,-1.944 15.398,-14.506 15.398,-14.506 0,0 -12.455,9.511 -15.398,7.02 -2.944,-2.492 14.041,-22.871 14.041,-22.871 0,0 -19.25,20.833 -21.174,19.814 -1.924,-1.02 5.322,-10.982 5.322,-10.982 0,0 -9.512,10.416 -12.115,8.604 -2.604,-1.811 13.586,-28.871 13.586,-28.871 0,0 -17.549,27.739 -24.795,23.097 11.379,-24.965 7.133,-28.532 7.133,-28.532 0,0 -1.452,25.47 -15.625,24.795 -7.133,-0.34 3.396,-19.02 3.396,-19.02 0,0 -9.691,17.063 -16.144,16.723 11.896,-22.512 7.654,-31.668 7.654,-31.668 0,0 1.967,19.227 -14.166,29.926 6.113,-5.434 -3.836,-29.926 -3.836,-29.926 0,0 8.754,36.091 -6.453,29.21 -2.403,-1.086 -0.17,-18.002 -0.17,-18.002 0,0 -3.059,19.361 -7.642,18.342 -2.674,-0.593 -16.985,-26.833 -16.985,-26.833 0,0 11.719,28.362 8.153,27.172 -2.598,-0.865 -7.473,-12.566 -7.473,-12.566 0,0 2.378,11.548 0,12.227 -2.377,0.679 -15.624,-12.227 -15.624,-12.227 0,0 9.851,11.548 8.151,13.926 -2.574,3.603 -5.591,3.771 -9.17,2.376 -5.21,-2.029 -12.228,-11.547 -12.228,-11.547 0,0 6.996,9.638 5.774,13.247 -1.964,5.799 -22.077,-11.209 -22.077,-11.209 0,0 11.888,11.209 9.17,13.586 C 41.778,-5.774 27.024,-6.51 22.417,-6.496 8.831,-6.453 4.124,-3.267 0,0"
+ inkscape:connector-curvature="0" />
+ </g>
+ </g>
+ </g>
+ </g>
+ </g>
+ </g>
+ </g>
+ <g
+ inkscape:groupmode="layer"
+ id="layer3"
+ inkscape:label="PLACE YOUR PICTOGRAM HERE"
+ style="display:inline" />
+ <g
+ inkscape:groupmode="layer"
+ id="layer2"
+ inkscape:label="BADGE"
+ style="display:none"
+ sodipodi:insensitive="true">
+ <g
+ style="display:inline"
+ transform="translate(-340.00001,-581)"
+ id="g4394"
+ clip-path="none">
+ <g
+ id="g855">
+ <g
+ inkscape:groupmode="maskhelper"
+ id="g870"
+ clip-path="url(#clipPath873)"
+ style="opacity:0.6;filter:url(#filter891)">
+ <path
+ transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)"
+ d="m 264,552.36218 c 0,6.62742 -5.37258,12 -12,12 -6.62742,0 -12,-5.37258 -12,-12 0,-6.62741 5.37258,-12 12,-12 C 258.62742,540.36218 264,545.73477 264,552.36218 Z"
+ sodipodi:ry="12"
+ sodipodi:rx="12"
+ sodipodi:cy="552.36218"
+ sodipodi:cx="252"
+ id="path844"
+ style="color:#000000;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
+ sodipodi:type="arc" />
+ </g>
+ <g
+ id="g862">
+ <path
+ sodipodi:type="arc"
+ style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
+ id="path4398"
+ sodipodi:cx="252"
+ sodipodi:cy="552.36218"
+ sodipodi:rx="12"
+ sodipodi:ry="12"
+ d="m 264,552.36218 c 0,6.62742 -5.37258,12 -12,12 -6.62742,0 -12,-5.37258 -12,-12 0,-6.62741 5.37258,-12 12,-12 C 258.62742,540.36218 264,545.73477 264,552.36218 Z"
+ transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)" />
+ <path
+ transform="matrix(1.25,0,0,1.25,33,-100.45273)"
+ d="m 264,552.36218 c 0,6.62742 -5.37258,12 -12,12 -6.62742,0 -12,-5.37258 -12,-12 0,-6.62741 5.37258,-12 12,-12 C 258.62742,540.36218 264,545.73477 264,552.36218 Z"
+ sodipodi:ry="12"
+ sodipodi:rx="12"
+ sodipodi:cy="552.36218"
+ sodipodi:cx="252"
+ id="path4400"
+ style="color:#000000;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
+ sodipodi:type="arc" />
+ <path
+ sodipodi:type="star"
+ style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
+ id="path4459"
+ sodipodi:sides="5"
+ sodipodi:cx="666.19574"
+ sodipodi:cy="589.50385"
+ sodipodi:r1="7.2431178"
+ sodipodi:r2="4.3458705"
+ sodipodi:arg1="1.0471976"
+ sodipodi:arg2="1.6755161"
+ inkscape:flatsided="false"
+ inkscape:rounded="0.1"
+ inkscape:randomized="0"
+ d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 C 669.9821,591.68426 670.20862,595.55064 669.8173,595.77657 Z"
+ transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)" />
+ </g>
+ </g>
+ </g>
+ </g>
+</svg>
diff --git a/charms/trusty/cassandra/lib/apache.key b/charms/trusty/cassandra/lib/apache.key
new file mode 100644
index 0000000..6dfb1fd
--- /dev/null
+++ b/charms/trusty/cassandra/lib/apache.key
@@ -0,0 +1,53 @@
+Apache Cassandra signing key (2014-11-14)
+
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mQINBFQJvgUBEAC0KcYCTj0hd15p4fiXBsbob0sKgsvN5Lm7N9jzJWlGshJ0peMi
+kH8YhDXw5Lh+mPEHksL7t1L8CIr1a+ntns/Opt65ZPO38ENVkOqEVAn9Z5sIoZsb
+7rX3USHJEnp1vAtG6YFTLpHOwgoIhY//D8qHb89MTYeqLHkep9h16vrybfglLLlJ
+qdph+lRC/hpJIdAUCmybym2arnLqBfYJTfU3LyRYQoRPIzrE38Y/oNvYN6mkwCPS
+fwNoPB7hrT0u6otNckxftR01To614+Jnl81RyTePGC/wahrHll7mbMEFw9nlMECt
+GgQYFRPmcojLvMxXnBWBQ4LjTSMvToFCPdnjzMeWkeN6qE1c2/S/qmxOXBSP5DCz
+UKuqAAZZoXTbMcEWYngUioFYUU1TSrK85Q8xqxAOUhYDSfv2brZp7h2lNY1RsQ9L
+6inMwNBTyLnH1b9WXH6XUNH51yUThJzRP2CUYw3P0lATrS62aTpehsnddQ8yWskq
++7nx6dN+khXqvAm+WyO2NojfeRFgHRIDlESTPkEekWzmZgI3R7ZzEBd6Nqx0k8mh
+ePW7ImChqyazjsZevBxJDOOjzFrvHEVMhQld2aQ3g4SYUZSCBbVZw7GqoqDaN3dH
+Vs1pFCpIOqIGg4qRID27AKvO0YdHDX8tIQIqCkvTrIJD8fJxyBDsXvqn0QARAQAB
+tCBUIEpha2UgTHVjaWFuaSA8amFrZUBhcGFjaGUub3JnPokCOAQTAQIAIgUCVAm+
+BQIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQdJ1u7ANTsSyMIA//Zqhl
+uNA7PSEz6+jhi2H3i8eOg2kNeu4Bz8jLOpu8LhrNzqZp5fbRJ9jchbkVWFeUW9wp
+tQfqPHIB7REMmwojS9Sf0jYmxR45qI26MTFQDb2vucMSVbrxYpK/afZywvYkIVHm
+RBPERyXezJoBfj23Gezft2OPRozs4OrsKfeyFZy+RfHDQkEX51eZ1co5kr1uLd1F
+UbtH42f8cCn9n5RaDAuDWmbHWfDQAHngxb+TAlLvBWEknRstc8lMIf/g6fv9EQBt
+hIaK4NwQhB2QYg4GMUzKR/eHX5nrVu6YtNyXN/rp8P3BoEwc4MRlOGnvJWrrsaqR
+uoxZX1PDHRTmypzygcnkdqciguy4Zz8fYkKjHZQt+CiuNqxkOwCg1dfCwTxCP1uM
+fd9Aw6zEb7cgBwcGqixp0M9zYJFAbPpFJ+/oZHiVQiEdRlB0BXhdWsBWKlBafoXC
+4/esVB7cNeG7kipF4zlFzrQv7xrQhMTV66RmiheiZs6FPNd8sQunrBsrcPqlMnb2
+yf0sX4yXiDxln4zvja0V1SaVKRfL91mLZOPzr3ccGfpog/3OxuzMiOlqVwCJz91g
+aNSg6OY2Wioop0t0LZ3K0TjamYLn4gEx+t7t/9UZZVutd/uWXS+zr5Kz32R2F0mJ
+eE7Gg2S8rMfplsYObSIK3jBXUIMgskNSaNmdX725Ag0EVAm+BQEQAK50pLVwBriP
+dC5chmDvBinbmB3rWN8lTrFb5IOjQxkrhA89d90i9Jjq49RG+7LsHX4E24SvBKZd
+2J5y04LjOeOweTieVSXNz/XqLTQcbat4btP0FCiE5rSCmfT9tGs185ilBEB3UOhQ
+P/0rkURNULHFbiP4WT36bkaSZ3lyP0lH0qckRXW3tez207+Ckpl58TCfKcyMLlky
+D3jO4+n3FfTNYSTLX40dW1MjfYJRQabNcsO9s5qxV1ir8ZqLBfvIG+FoPnVDP+7L
+lk4yEdugiKpOlGKsF9MSy6g0gKd3FO0jr/w0DCacvF3QH0rXqo0KYyb7GgQgtJku
+9ckfJxaW8Aobcj5xhcjgDwR0TeG0xTLlnKsBSebunujqRSWJSXCeEy9yPC2yvznd
+pal/kKt2vQaIAYpvYgvr7kKqvsAH5w5wf+p5wCrb+Pqhv3BTQonDNe/qo4DgsS0/
+VdQKiFvUxQiQUblDQVkpwX6UpPUBzIMsjJHWg4mGZSH4MjA0Qi7SqUCYJJCfvaEa
+0ad/rj3nYivzlXD4oWC9ZzYhIOVus8hvuAAjuK4JOM8MLvhph/dqwNr5kzdKVM4i
+3sStrx2KIsPO2ak702DJUYPjgsz2pe/D46BCpKiuJEAvzOKc0X+PQICwSHnKHkXV
+zbbaonvKdJVepMtE3oa5wqsH6tmP9b+fABEBAAGJAh8EGAECAAkFAlQJvgUCGwwA
+CgkQdJ1u7ANTsSyHNhAAmKsNTiBjadx6JQ94Dsw30EGHCUwpXNK3/+nXzFnKWDhm
+zd9SlFRFhqYiZHTLOK2cFUykcnPWiUepMEPz5JUK3qHvEu9LneZsejbF2yWFKZQ3
+Vu7bAFc8V92u9vwj1q3ZGHe+lEvDhUg4a97tfhFZrCyu0RZ0by+oPabfNbq3QkA1
+1lQDCuvBS8L0Y/oZM6Af+x2+vkERRgsPFmKUbkARnZxzIvlq8hwN0rp+AlyC+7/L
+LwA67Y+ekVimnjPyahCowPIROXNZgl65c4e06zpPgUSnbp5nI5SKp2NlyxNEpYBk
+cElEAqO+qH1oYaDO3QsHtSIq+qi+gWxNBuMNAJphys/82u7arHOgonsCqNvbS6Pt
+iaz3TUrQi+SFa1oQ67Gb1DQZ3EOraEdD/ooFWxEFS2aXo0bKs7nx0VgpxAFsuTN+
+niHnpmaOxdyUP8nQnc+GRMPxTB1/5906ww/PR6aLgW6+Jhc5pNGUI/gBSK7nLReq
+wEWi68zsTb9rh9I+ILdnbM/Loq1vCFLlGgc26U7aRj+N/os5ys5TPkOpuyMoz7Rq
+PkjrM2CQoOMxeLWjXJSwjWeLGPalw2/to9NFClznS6hYUETn2HB/Px0DOMiUzm3B
+AUeLlJzSeRLTKhcOugK7UcsQD2FHnMBJz50bxis9X7pjmnc/tWpjAGJfaWdjDIo=
+=yiQ4
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/charms/trusty/cassandra/lib/datastax.key b/charms/trusty/cassandra/lib/datastax.key
new file mode 100644
index 0000000..adb7cd8
--- /dev/null
+++ b/charms/trusty/cassandra/lib/datastax.key
@@ -0,0 +1,49 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mQENBExkbXsBCACgUAbMWASAz/fmnMoWE4yJ/YHeuFHTK8zloJ/mApwizlQXTIVp
+U4UV8nbLJrbkFY92VTcC2/IBtvnHpZl8eVm/JSI7nojXc5Kmm4Ek/cY7uW2KKPr4
+cuka/5cNsOg2vsgTIMOZT6vWAbag2BGHtEJbriMLhT3v1tlu9caJfybu3QFWpahC
+wRYtG3B4tkypt21ssWwNnmp2bjFRGpLssc5HCCxUCBFLYoIkAGAFRZ6ymglsLDBn
+SCEzCkn9zQfmyqs0lZk4odBx6rzE350xgEnzFktT2uekFYqRqPQY8f7AhVfj2DJF
+gVM4wXbSoVrTnDiFsaJt/Ea4OJ263jRUHeIRABEBAAG0LVJpcHRhbm8gUGFja2Fn
+ZSBSZXBvc2l0b3J5IDxwYXVsQHJpcHRhbm8uY29tPokBPgQTAQIAKAIbAwYLCQgH
+AwIGFQgCCQoLBBYCAwECHgECF4AFAlW/zKMFCRLBYKQACgkQNQIA8rmZo3LebAgA
+gAwWkvBrPaD5Kf8H4uw9rXtHnHYxX5G6cOVJ3vuWCs1ov7m3JWq918q00hWfLtOs
+zb15kFcjcEJ7kiRFJmAXZhcX2I0DHTmTZSl9orKzoUlXQqAANJGdek8pzdTDUQfz
+V26k63d6eLqjXotrb0hFzg7B8VSolxRE44S5k1xhzUCedOqYYsWVv3xnRIP6UBPt
+WLvzrLa0o9x/hT4w81dOP4rzZMuq2RApnenoz9AZwJrmZ14QW2ncy4RbqK6pKdRJ
+y57vBv8F0LkGlLwBd/JYWwQ85lUTkNG5wCWdj0IEYTO3+fGyO1LHU6bVZCrNtkUE
+ahSZUiRdidiktIkbtNXImYkCHAQQAQgABgUCTGRt2QAKCRATbpzxe100LaUfD/9D
+q84HarIQMEoUiRBklg+afgTMaNNdvhU3V59KoMja2vMeE4JjE3SvNoKCHjPZj6Ti
+720KL6V5O/Uo1VjtSXzAPRJywcE9aS5HRjM2Dr1mp5GnmpvbiKBdl91G9aPc3D2Z
+LpG7vZr8E/vYLc5h1DMz2XDqi6gAqW2yxb2vnmHL4FiAdoXfpZimC9KZpUdTsGPO
+VbXEDEn3y/AiIC35Bq66Sp3W4gVNakV7Y5RUPPDDBIsTZEOhzd9nl5FXOnPtONp5
+dtp5NoWl6q3BjYe2P52TloCp+BJ62donfFTRSGfqyvtaRgmnHHEIWgypMghW6wSb
+O/BxFpdggHTItMfBg2a8tWDFjYmBoFd3iP9SfcmBb/7zB5YXC5b1/s3RNCtR76hf
++iXjm/zy22tb6qy5XJsnCoORjEoFaWNH6ckgACK7HQyJZ2Lo2MuCYYaQLs6gTd6a
+zMEQHT08cPF+I5It9mOzAtUOkCcVK8dIXRFETXFVdQqFMTmZmuK1Iv1CFBeUIHnM
+iyoYv1bzNsUg/hJpW8ximVmBg5Apza2K0p3XKHkw9MPBqnQ4PbBM1nqb/+o56p+o
+8mVZmjn4bdraB8c0Br15Mi19Zne7b65OZ5k+SVripUk5/XeJD9M9U6+DG+/uxemD
+Fzp9XjnnAe8T/u8JpqHYQ2mRONFM7ZMOAFeEe4yIEIkBPgQTAQIAKAUCTGRtewIb
+AwUJA8JnAAYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQNQIA8rmZo3K3HAf/
+V+6OSdt/Zwdsk+WsUwi75ndOIz60TN8Wg16WOMq5KOBuYIneG2+CEFJHTppNLc2j
+r/ugTjTPeS/DAo5MtnK+zzHxT7JmMKypb23t6MaahSlER4THbYvWUwsw5mm2LsTe
+PTlb5mkvQnXkt6pN2UzZVyIdNFXRv1YZLdTcf4aJ0pZySvCdYoE9RaoP4/JI9GfS
+NXH7oOxI8YaxRGK5i6w/LZyhxkfbkPX+pbbe1Ept+SZCcwWVc/S6veGZWQ1pNHR2
+RW6F3WE0Mle6xWtvW1NlMs4ATEqS13GS4RVlgE07KTe/oBRkd+4NwXAQoEzUvoRr
+j5Ad7LVKeygeUUyaWP+qN7kBDQRMZG17AQgAypZBEfm9pM8Tr4ktsHp1xThYHvzT
+OScLPZcCaF1Gjg8em0cQI4z4yN+yffsmUD4/dGcRxZgVms/jTexKQ8Z/Ps3e4vRG
+b4RCFaY0KhW4t+TTJJ9I5wvFzXZj7zNFxiQWpueiq/cDiBY+Liv3zMSOBaXzxR6L
+7igNPKi/0ELLyCIU/okUwqc0O/4r5PgFANkMyvvVNqzxjC5s8MXbGivJXiML67/Y
+0M/siNqDSia/TGItpXjvi7v1zulbiIV0iSBkO3vsxNE0xXGBXY/UztAShN3FTbx9
+CZDupi35wgqK7McJ3WSjEDzwkElmwkmh7JdLziyH09kS1wRqiLcB+wSTywARAQAB
+iQElBBgBAgAPAhsMBQJVv8zOBQkSwWDOAAoJEDUCAPK5maNyLl4H/3n/+xZsuKia
+fHtBUMh44YRabEX1Bd10LAfxGlOZtKV/Dr1RaKetci6RRa5sJj0wKra6FhIryuqS
+jFTalPF3o8WjVEA5AjJ3ddSgAwX5gGJ3u+C0XMI0E6h/vAXh6meFxHtGinYr1Gcp
+P1/S3/Jy+0cmTt3FvqBtXtU3VIyb/4vUNZ+dY+jcw/gs/yS+s+jtR8hWUDbSrbU9
+pja+p1icNwU5pMbEfx1HYB7JCKuE0iJNbAFagRtPCOKq4vUTPDUQUB5MjWV+89+f
+cizh+doQR9z8e+/02drCCMWiUf4iiFs2dNHwaIPDOJ8Xn9xcxiUaKk32sjT3sict
+XO5tB2KhE3A=
+=YO7C
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/charms/trusty/cassandra/lib/juju-deployer-wrapper.py b/charms/trusty/cassandra/lib/juju-deployer-wrapper.py
new file mode 100755
index 0000000..bf792f2
--- /dev/null
+++ b/charms/trusty/cassandra/lib/juju-deployer-wrapper.py
@@ -0,0 +1,15 @@
+#!/usr/bin/python
+
+import subprocess
+import sys
+
+# Strip the -W option, as its noise messes with test output.
+args = list(sys.argv[1:])
+if '-W' in args:
+ args.remove('-W')
+cmd = ['juju-deployer'] + args
+try:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+except subprocess.CalledProcessError as x:
+ sys.stderr.write(x.output)
+ sys.exit(x.returncode)
diff --git a/charms/trusty/cassandra/lib/testcharms/empty/hooks/install b/charms/trusty/cassandra/lib/testcharms/empty/hooks/install
new file mode 100755
index 0000000..ce28f62
--- /dev/null
+++ b/charms/trusty/cassandra/lib/testcharms/empty/hooks/install
@@ -0,0 +1,6 @@
+#!/bin/sh -ex
+# apt-get install \
+# python3-pip python3-dev python3-six python3-yaml \
+# build-essential libev4 libev-dev
+# pip3 install blist
+# pip3 install cassandra-driver
diff --git a/charms/trusty/cassandra/lib/testcharms/empty/metadata.yaml b/charms/trusty/cassandra/lib/testcharms/empty/metadata.yaml
new file mode 100644
index 0000000..bc3e0f0
--- /dev/null
+++ b/charms/trusty/cassandra/lib/testcharms/empty/metadata.yaml
@@ -0,0 +1,11 @@
+name: empty
+maintainer: Somchai Turdsak <somchai@example.com>
+summary: A charm that does nothing except define some relations.
+description: |
+ This is a client charm for testing the Cassandra client relations.
+tags: ["databases"]
+requires:
+ database:
+ interface: cassandra
+ database-admin:
+ interface: cassandra-admin
diff --git a/charms/trusty/cassandra/metadata.yaml b/charms/trusty/cassandra/metadata.yaml
new file mode 100644
index 0000000..7ec537a
--- /dev/null
+++ b/charms/trusty/cassandra/metadata.yaml
@@ -0,0 +1,38 @@
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+name: cassandra
+maintainer: Stuart Bishop <stuart.bishop@canonical.com>
+summary: distributed storage system for structured data
+description: |
+ Cassandra is a distributed (peer-to-peer) system for the management and
+ storage of structured data.
+tags: ["databases"]
+provides:
+ database:
+ interface: cassandra
+ database-admin:
+ interface: cassandra-admin
+ nrpe-external-master:
+ interface: nrpe-external-master
+ scope: container
+ data:
+ interface: block-storage
+ scope: container
+ optional: true
+peers:
+ cluster:
+ interface: cassandra-cluster
diff --git a/charms/trusty/cassandra/scripts/volume-common.sh b/charms/trusty/cassandra/scripts/volume-common.sh
new file mode 100755
index 0000000..3af8ff1
--- /dev/null
+++ b/charms/trusty/cassandra/scripts/volume-common.sh
@@ -0,0 +1,220 @@
+#!/bin/bash
+# Author: JuanJo Ciarlante <jjo@canonical.com>
+# Copyright: Canonical Ltd. 2012
+# License: GPLv2
+#
+# juju storage common shell library
+#
+
+#------------------------------
+# Returns a mount point from passed vol-id, e.g. /srv/juju/vol-000012345
+#
+# @param $1 volume id
+# @echoes mntpoint-path eg /srv/juju/vol-000012345
+#------------------------------
+_mntpoint_from_volid() {
+ local volid=${1?missing volid}
+ [[ ${volid} != "" ]] && echo /srv/juju/${volid} || echo ""
+}
+
+
+#------------------------------
+# Assert that passed mount points hold different filesystems
+#
+# @param $1 mntpoint1
+# @param $2 mntpoint2
+# @return 0 different FS
+# 1 same FS
+#------------------------------
+_assert_diff_fs() {
+ local mnt1="${1:?missing mntpoint1}"
+ local mnt2="${2:?missing mntpoint2}"
+ local fsid1 fsid2
+ fsid1=$(stat --file-system -c '%i' "${mnt1}" 2>/dev/null)
+ fsid2=$(stat --file-system -c '%i' "${mnt2}" 2>/dev/null)
+ [[ ${fsid1} != ${fsid2} ]]
+ return $?
+}
+
+#------------------------------
+# Initialize volume (sfdisk, mkfs.ext4) IFF NOT already, mount it at
+# /srv/juju/<volume-id>
+#
+# @param $1 volume-id, can be any arbitrary string, better if
+# equal to EC2/OS vol-id name (just for consistency)
+# @return 0 success
+# 1 nil volid/etc
+# 2 error while handling the device (non-block device, sfdisk error, etc)
+#------------------------------
+volume_init_and_mount() {
+ ## Find 1st unused device (reverse sort /dev/vdX)
+ local volid=${1:?missing volid}
+ local dev_regexp
+ local dev found_dev=
+ local label="${volid}"
+ local func=${FUNCNAME[0]}
+ dev_regexp=$(config-get volume-dev-regexp) || return 1
+ mntpoint=$(_mntpoint_from_volid ${volid})
+
+ [[ -z ${mntpoint} ]] && return 1
+ if mount | egrep -qw "${mntpoint}";then
+ _assert_diff_fs "/" "${mntpoint}" || {
+ juju-log "ERROR: returning from ${func} with '${mntpoint}' still at '/' filesystem"
+ return 1
+ }
+ juju-log "NOTICE: mntpoint=${mntpoint} already mounted, skipping volume_init_and_mount"
+ return 0
+ fi
+
+ # Sanitize
+ case "${dev_regexp?}" in
+ # Careful: this is glob matching against an regexp -
+ # quite narrowed
+ /dev/*|/dev/disk/by-*)
+ ;; ## Ok
+ *)
+ juju-log "ERROR: invalid 'volume-dev-regexp' specified"
+ return 1
+ ;;
+ esac
+
+ # Assume udev will create only existing devices
+ for dev in $(ls -rd1 /dev/* | egrep "${dev_regexp}" | egrep -v "[1-9]$" 2>/dev/null);do
+ ## Check it's not already mounted
+ mount | egrep -q "${dev}[1-9]?" || { found_dev=${dev}; break;}
+ done
+ [[ -n "${found_dev}" ]] || {
+ juju-log "ERROR: ${func}: coult not find an unused device for regexp: ${dev_regexp}"
+ return 1
+ }
+ partition1_dev=${found_dev}1
+
+ juju-log "INFO: ${func}: found_dev=${found_dev}"
+ [[ -b ${found_dev?} ]] || {
+ juju-log "ERROR: ${func}: ${found_dev} is not a blockdevice"
+ return 2
+ }
+
+ # Run next set of "dangerous" commands as 'set -e', in a subshell
+ (
+ set -e
+ # Re-read partition - will fail if already in use
+ blockdev --rereadpt ${found_dev}
+
+ # IFF not present, create partition with full disk
+ if [[ -b ${partition1_dev?} ]];then
+ juju-log "INFO: ${func}: ${partition1_dev} already present - skipping sfdisk."
+ else
+ juju-log "NOTICE: ${func}: ${partition1_dev} not present at ${found_dev}, running: sfdisk ${found_dev} ..."
+ # Format partition1_dev as max sized
+ echo ",+," | sfdisk ${found_dev}
+ fi
+
+ # Create an ext4 filesystem if NOT already present
+ # use e.g. LABEl=vol-000012345
+ if file -s ${partition1_dev} | egrep -q ext4 ; then
+ juju-log "INFO: ${func}: ${partition1_dev} already formatted as ext4 - skipping mkfs.ext4."
+ ## Check e2label - log if it has changed (e.g. already used / initialized with a diff label)
+ local curr_label=$(e2label "${partition1_dev}")
+ if [[ ${curr_label} != ${label} ]]; then
+ juju-log "WARNING: ${func}: ${partition1_dev} had label=${curr_label}, overwritting with label=${label}"
+ e2label ${partition1_dev} "${label}"
+ fi
+ else
+ juju-log "NOTICE: ${func}: running: mkfs.ext4 -L ${label} ${partition1_dev}"
+ mkfs.ext4 -L "${label}" ${partition1_dev}
+ fi
+
+ # Mount it at e.g. /srv/juju/vol-000012345
+ [[ -d "${mntpoint}" ]] || mkdir -p "${mntpoint}"
+ mount | fgrep -wq "${partition1_dev}" || {
+ local files_below_mntpoint="$(cd ${mntpoint}; ls -1A |wc -l )"
+ if [[ ${files_below_mntpoint} -ne 0 ]]; then
+ juju-log "ERROR: *not* doing 'mount "${partition1_dev}" "${mntpoint}"' because there are already ${files_below_mntpoint} files/dirs beneath '${mntpoint}'"
+ exit 1
+ fi
+ ## should always fsck before mounting (e.g. fsck after max time (-i) / max mounts (-c) )
+ fsck "${partition1_dev}"
+ mount "${partition1_dev}" "${mntpoint}"
+ juju-log "INFO: ${func}: mounted as: '$(mount | fgrep -w ${partition1_dev})'"
+ }
+
+ # Add it to fstab is not already there
+ fgrep -wq "LABEL=${label}" /etc/fstab || {
+ echo "LABEL=${label} ${mntpoint} ext4 defaults,nobootwait,comment=${volid}" | tee -a /etc/fstab
+ juju-log "INFO: ${func}: LABEL=${label} added to /etc/fstab"
+ }
+ )
+ # Final assertion: mounted filesystem id is different from '/' (effectively mounted)
+ _assert_diff_fs "/" "${mntpoint}" || {
+ juju-log "ERROR: returning from ${func} with '${mntpoint}' still at '/' filesystem (couldn't mount new volume)"
+ ## try to rmdir mntpoint directory - should not be 'mistakenly' used
+ rmdir ${mntpoint}
+ return 1
+ }
+ return $?
+}
+
+#------------------------------
+# Get volume-id from juju config "volume-map" dictionary as
+# volume-map[JUJU_UNIT_NAME]
+# @return 0 if volume-map value found ( does echo volid or ""), else:
+# 1 if not found or None
+#
+#------------------------------
+volume_get_volid_from_volume_map() {
+ local volid=$(config-get "volume-map"|python -c$'import sys;import os;from yaml import load;from itertools import chain; volume_map = load(sys.stdin)\nif volume_map: print volume_map.get(os.environ["JUJU_UNIT_NAME"])')
+ [[ $volid == None ]] && return 1
+ echo "$volid"
+}
+
+# Returns true if permanent storage (considers --ephemeral)
+# @returns 0 if volid set and not --ephemeral, else:
+# 1
+volume_is_permanent() {
+ local volid=${1:?missing volid}
+ [[ -n ${volid} && ${volid} != --ephemeral ]] && return 0 || return 1
+}
+volume_mount_point_from_volid(){
+ local volid=${1:?missing volid}
+ if volume_is_permanent "${volid}";then
+ echo "/srv/juju/${volid}"
+ return 0
+ else
+ return 1
+ fi
+}
+# Do we have a valid storage state?
+# @returns 0 does echo $volid (can be "--ephemeral")
+# 1 config state is invalid - we should not serve
+volume_get_volume_id() {
+ local ephemeral_storage
+ local volid
+ ephemeral_storage=$(config-get volume-ephemeral-storage) || return 1
+ volid=$(volume_get_volid_from_volume_map) || return 1
+ if [[ $ephemeral_storage == True ]];then
+ # Ephemeral -> should not have a valid volid
+ if [[ $volid != "" ]];then
+ juju-log "ERROR: volume-ephemeral-storage is True, but $JUJU_UNIT_NAME maps to volid=${volid}"
+ return 1
+ fi
+ else
+ # Durable (not ephemeral) -> must have a valid volid for this unit
+ if [[ $volid == "" ]];then
+ juju-log "ERROR: volume-ephemeral-storage is False, but no volid found for: $JUJU_UNIT_NAME"
+ return 1
+ fi
+ fi
+ echo "$volid"
+ return 0
+}
+
+case "$1" in
+ ## allow non SHELL scripts to call helper functions
+ call)
+ : ${JUJU_UNIT_NAME?} ## Must be called in juju environment
+ shift;
+ function="${1:?usage: ${0##*/} call function arg1 arg2 ...}"
+ shift;
+ ${function} "$@" && exit 0 || exit 1
+esac
diff --git a/charms/trusty/cassandra/templates/cassandra_maintenance_cron.tmpl b/charms/trusty/cassandra/templates/cassandra_maintenance_cron.tmpl
new file mode 100644
index 0000000..3a9b9d4
--- /dev/null
+++ b/charms/trusty/cassandra/templates/cassandra_maintenance_cron.tmpl
@@ -0,0 +1,6 @@
+# Cassandra maintenance
+# Staggered weekly repairs
+# m h dom mon dow user command
+{{minute}} {{hour}} * * {{dow}} cassandra run-one-until-success nodetool repair -pr >> /var/log/cassandra/maintenance.log 2>&1
+
+# EOF
diff --git a/charms/trusty/cassandra/templates/nrpe_cmd_file.tmpl b/charms/trusty/cassandra/templates/nrpe_cmd_file.tmpl
new file mode 100644
index 0000000..53318fa
--- /dev/null
+++ b/charms/trusty/cassandra/templates/nrpe_cmd_file.tmpl
@@ -0,0 +1,6 @@
+#---------------------------------------------------
+# This file is Juju managed
+#---------------------------------------------------
+# ${NAGIOS_CONTEXT} ${SERVICE_DESCRIPTION}
+command[${NRPE_CMD_NAME}]=${NRPE_CMD}
+
diff --git a/charms/trusty/cassandra/templates/nrpe_service_file.tmpl b/charms/trusty/cassandra/templates/nrpe_service_file.tmpl
new file mode 100644
index 0000000..ab0b76f
--- /dev/null
+++ b/charms/trusty/cassandra/templates/nrpe_service_file.tmpl
@@ -0,0 +1,10 @@
+#---------------------------------------------------
+# This file is Juju managed
+#---------------------------------------------------
+define service {
+ use active-service
+ host_name ${NAGIOS_HOSTNAME}
+ service_description ${NAGIOS_HOSTNAME} ${SERVICE_DESCRIPTION}
+ check_command check_nrpe!${NRPE_CMD_NAME}
+ servicegroups ${NAGIOS_SERVICEGROUP},
+}
diff --git a/charms/trusty/cassandra/testing/__init__.py b/charms/trusty/cassandra/testing/__init__.py
new file mode 100644
index 0000000..b1b7fcd
--- /dev/null
+++ b/charms/trusty/cassandra/testing/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/cassandra/testing/amuletfixture.py b/charms/trusty/cassandra/testing/amuletfixture.py
new file mode 100644
index 0000000..988267f
--- /dev/null
+++ b/charms/trusty/cassandra/testing/amuletfixture.py
@@ -0,0 +1,234 @@
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from functools import wraps
+import json
+import os
+import shutil
+import subprocess
+import tempfile
+import time
+
+import amulet
+import yaml
+
+
+class AmuletFixture(amulet.Deployment):
+ def __init__(self, series, verbose=False):
+ if verbose:
+ super(AmuletFixture, self).__init__(series=series)
+ else:
+ # We use a wrapper around juju-deployer so we can fix how it is
+ # invoked. In particular, turn off all the noise so we can
+ # actually read our test output.
+ juju_deployer = os.path.abspath(os.path.join(
+ os.path.dirname(__file__), os.pardir, 'lib',
+ 'juju-deployer-wrapper.py'))
+ super(AmuletFixture, self).__init__(series=series,
+ juju_deployer=juju_deployer)
+ assert self.series == series
+
+ def setUp(self):
+ self._temp_dirs = []
+
+ self.reset_environment(force=True)
+
+ # Repackage our charm to a temporary directory, allowing us
+ # to strip our virtualenv symlinks that would otherwise cause
+ # juju to abort. We also strip the .bzr directory, working
+ # around Bug #1394078.
+ self.repackage_charm()
+
+ # Fix amulet.Deployment so it doesn't depend on environment
+ # variables or the current working directory, but rather the
+ # environment we have introspected.
+ with open(os.path.join(self.charm_dir, 'metadata.yaml'), 'r') as s:
+ self.charm_name = yaml.safe_load(s)['name']
+ self.charm_cache.test_charm = None
+ self.charm_cache.fetch(self.charm_name, self.charm_dir,
+ series=self.series)
+
+ # Explicitly reset $JUJU_REPOSITORY to ensure amulet and
+ # juju-deployer does not mess with the real one, per Bug #1393792
+ self.org_repo = os.environ.get('JUJU_REPOSITORY', None)
+ temp_repo = tempfile.mkdtemp(suffix='.repo')
+ self._temp_dirs.append(temp_repo)
+ os.environ['JUJU_REPOSITORY'] = temp_repo
+ os.makedirs(os.path.join(temp_repo, self.series), mode=0o700)
+
+ def tearDown(self, reset_environment=True):
+ if reset_environment:
+ self.reset_environment()
+ if self.org_repo is None:
+ del os.environ['JUJU_REPOSITORY']
+ else:
+ os.environ['JUJU_REPOSITORY'] = self.org_repo
+
+ def deploy(self, timeout=None):
+ '''Deploying or updating the configured system.
+
+ Invokes amulet.Deployer.setup with a nicer name and standard
+ timeout handling.
+ '''
+ if timeout is None:
+ timeout = int(os.environ.get('AMULET_TIMEOUT', 900))
+
+ # juju-deployer is buried under here, and has race conditions.
+ # Sleep a bit before invoking it, so its cached view of the
+ # environment matches reality.
+ time.sleep(15)
+
+ # If setUp fails, tearDown is never called leaving the
+ # environment setup. This is useful for debugging.
+ self.setup(timeout=timeout)
+ self.wait(timeout=timeout)
+
+ def __del__(self):
+ for temp_dir in self._temp_dirs:
+ if os.path.exists(temp_dir):
+ shutil.rmtree(temp_dir, ignore_errors=True)
+
+ def get_status(self):
+ try:
+ raw = subprocess.check_output(['juju', 'status', '--format=json'],
+ universal_newlines=True)
+ except subprocess.CalledProcessError as x:
+ print(x.output)
+ raise
+ if raw:
+ return json.loads(raw)
+ return None
+
+ def wait(self, timeout=None):
+ '''Wait until the environment has reached a stable state.'''
+ if timeout is None:
+ timeout = int(os.environ.get('AMULET_TIMEOUT', 900))
+ cmd = ['timeout', str(timeout), 'juju', 'wait', '-q']
+ try:
+ subprocess.check_output(cmd, universal_newlines=True)
+ except subprocess.CalledProcessError as x:
+ print(x.output)
+ raise
+
+ def reset_environment(self, force=False):
+ if force:
+ status = self.get_status()
+ machines = [m for m in status.get('machines', {}).keys()
+ if m != '0']
+ if machines:
+ subprocess.call(['juju', 'destroy-machine',
+ '--force'] + machines,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL)
+ fails = dict()
+ while True:
+ status = self.get_status()
+ service_items = status.get('services', {}).items()
+ if not service_items:
+ break
+ for service_name, service in service_items:
+ if service.get('life', '') not in ('dying', 'dead'):
+ subprocess.call(['juju', 'destroy-service', service_name],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ for unit_name, unit in service.get('units', {}).items():
+ if unit.get('agent-state', None) == 'error':
+ if force:
+ # If any units have failed hooks, unstick them.
+ # This should no longer happen now we are
+ # using the 'destroy-machine --force' command
+ # earlier.
+ try:
+ subprocess.check_output(
+ ['juju', 'resolved', unit_name],
+ stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError:
+ # A previous 'resolved' call make cause a
+ # subsequent one to fail if it is still
+ # being processed. However, we need to keep
+ # retrying because after a successful
+ # resolution a subsequent hook may cause an
+ # error state.
+ pass
+ else:
+ fails[unit_name] = unit
+ time.sleep(1)
+
+ harvest_machines = []
+ for machine, state in status.get('machines', {}).items():
+ if machine != "0" and state.get('life') not in ('dying', 'dead'):
+ harvest_machines.append(machine)
+
+ if harvest_machines:
+ cmd = ['juju', 'remove-machine', '--force'] + harvest_machines
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+
+ if fails:
+ raise Exception("Teardown failed", fails)
+
+ def repackage_charm(self):
+ """Mirror the charm into a staging area.
+
+ We do this to work around issues with Amulet, juju-deployer
+ and juju. In particular:
+ - symlinks in the Python virtual env pointing outside of the
+ charm directory.
+ - odd bzr interactions, such as tests being run on the committed
+ version of the charm, rather than the working tree.
+
+ Returns the test charm directory.
+ """
+ # Find the charm_dir we are testing
+ src_charm_dir = os.path.dirname(__file__)
+ while True:
+ if os.path.exists(os.path.join(src_charm_dir,
+ 'metadata.yaml')):
+ break
+ assert src_charm_dir != os.sep, 'metadata.yaml not found'
+ src_charm_dir = os.path.abspath(os.path.join(src_charm_dir,
+ os.pardir))
+
+ with open(os.path.join(src_charm_dir, 'metadata.yaml'), 'r') as s:
+ self.charm_name = yaml.safe_load(s)['name']
+
+ repack_root = tempfile.mkdtemp(suffix='.charm')
+ self._temp_dirs.append(repack_root)
+ # juju-deployer now requires the series in the path when
+ # deploying from an absolute path.
+ repack_root = os.path.join(repack_root, self.series)
+ os.makedirs(repack_root, mode=0o700)
+
+ self.charm_dir = os.path.join(repack_root, self.charm_name)
+
+ # Ignore .bzr to work around weird bzr interactions with
+ # juju-deployer, per Bug #1394078, and ignore .venv
+ # due to a) it containing symlinks juju will reject and b) to avoid
+ # infinite recursion.
+ shutil.copytree(src_charm_dir, self.charm_dir, symlinks=True,
+ ignore=shutil.ignore_patterns('.venv?', '.bzr'))
+
+
+# Bug #1417097 means we need to monkey patch Amulet for now.
+real_juju = amulet.helpers.juju
+
+
+@wraps(real_juju)
+def patched_juju(args, env=None):
+ args = [str(a) for a in args]
+ return real_juju(args, env)
+
+amulet.helpers.juju = patched_juju
+amulet.deployer.juju = patched_juju
diff --git a/charms/trusty/cassandra/testing/mocks.py b/charms/trusty/cassandra/testing/mocks.py
new file mode 100644
index 0000000..7d03f23
--- /dev/null
+++ b/charms/trusty/cassandra/testing/mocks.py
@@ -0,0 +1,182 @@
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+'''
+charm-helpers mocks.
+'''
+import os.path
+import shutil
+import tempfile
+from unittest.mock import patch
+
+import yaml
+
+from charmhelpers import fetch
+from charmhelpers.core import hookenv
+
+
+CHARM_DIR = os.path.abspath(os.path.join(
+ os.path.dirname(__file__), os.pardir))
+
+
+def mock_charmhelpers(test_case):
+ hookenv.cache.clear() # Clear the hookenv memorisation.
+
+ mocks = []
+
+ # Mock environment
+ charm_dir = tempfile.TemporaryDirectory()
+ test_case.addCleanup(charm_dir.cleanup)
+ mock_env = patch.dict(os.environ, dict(CHARM_DIR=charm_dir.name))
+ mock_env.start()
+ test_case.addCleanup(mock_env.stop)
+ shutil.copyfile(os.path.join(CHARM_DIR, 'metadata.yaml'),
+ os.path.join(charm_dir.name, 'metadata.yaml'))
+
+ # Mock config.
+ # Set items:
+ # hookenv.config()['foo'] = 'bar'
+ # Reset 'previous' state:
+ # hookenv.config().save();
+ # hookenv.config().load_previous()
+ config = hookenv.Config()
+ tmp = tempfile.NamedTemporaryFile(suffix='.config')
+ config.CONFIG_FILE_NAME = tmp.name
+ test_case.addCleanup(tmp.close)
+ with open(os.path.join(CHARM_DIR, 'config.yaml'), 'rb') as f:
+ defaults = yaml.safe_load(f)['options']
+ for k, v in defaults.items():
+ opt_type = v.get('type', 'string')
+ opt_val = v.get('default', None)
+ if opt_val is None:
+ config[k] = None
+ elif opt_type == 'string':
+ config[k] = str(opt_val)
+ elif opt_type == 'int':
+ config[k] = int(opt_val)
+ elif opt_type == 'boolean':
+ config[k] = bool(opt_val)
+
+ def mock_config(scope=None):
+ if scope is None:
+ return config
+ return config.get(scope, None)
+ mocks.append(patch('charmhelpers.core.hookenv.config',
+ side_effect=mock_config, autospec=True))
+
+ # Magic mocks.
+ methods = [
+ 'charmhelpers.core.hookenv.log',
+ 'charmhelpers.core.hookenv.hook_name',
+ 'charmhelpers.core.hookenv.related_units',
+ 'charmhelpers.core.hookenv.relation_get',
+ 'charmhelpers.core.hookenv.relation_set',
+ 'charmhelpers.core.hookenv.relation_ids',
+ 'charmhelpers.core.hookenv.relation_type',
+ 'charmhelpers.core.hookenv.service_name',
+ 'charmhelpers.core.hookenv.local_unit',
+ 'charmhelpers.core.hookenv.unit_private_ip',
+ 'charmhelpers.core.hookenv.unit_public_ip',
+ 'charmhelpers.core.host.log',
+ 'charmhelpers.fetch.filter_installed_packages',
+ 'os.chown', 'os.fchown',
+ ]
+ for m in methods:
+ mocks.append(patch(m, autospec=True))
+
+ for mock in mocks:
+ mock.start()
+ test_case.addCleanup(mock.stop)
+
+ hookenv.local_unit.return_value = 'service/1'
+
+ def mock_unit_private_ip():
+ return '10.20.0.{}'.format(hookenv.local_unit().split('/')[-1])
+ hookenv.unit_private_ip.side_effect = mock_unit_private_ip
+
+ def mock_unit_public_ip():
+ return '10.30.0.{}'.format(hookenv.local_unit().split('/')[-1])
+ hookenv.unit_public_ip.side_effect = mock_unit_public_ip
+
+ def mock_service_name():
+ return hookenv.local_unit().split('/')[0]
+ hookenv.service_name.side_effect = mock_service_name
+
+ hookenv.relation_ids.side_effect = (
+ lambda x: ['{}:1'.format(x)] if x else [])
+ hookenv.related_units.return_value = ('service/2', 'service/3')
+
+ relinfos = dict()
+
+ def mock_relation_set(relation_id=None, relation_settings=None, **kwargs):
+ if relation_id is None:
+ relation_id = hookenv.relation_id()
+ unit = hookenv.local_unit()
+ relinfo = mock_relation_get(unit=unit, rid=relation_id)
+ if relation_settings is not None:
+ relinfo.update(relation_settings)
+ relinfo.update(kwargs)
+ return None
+ hookenv.relation_set.side_effect = mock_relation_set
+
+ def mock_relation_get(attribute=None, unit=None, rid=None):
+ if rid is None:
+ rid = hookenv.relation_id()
+ if unit is None:
+ unit = hookenv.remove_unit()
+ service, unit_num = unit.split('/')
+ unit_num = int(unit_num)
+ relinfos.setdefault(rid, {})
+ relinfos[rid].setdefault(
+ unit, {'private-address': '10.20.0.{}'.format(unit_num)})
+ relinfo = relinfos[rid][unit]
+ if attribute is None or attribute == '-':
+ return relinfo
+ return relinfo.get(attribute)
+ hookenv.relation_get.side_effect = mock_relation_get
+
+ def mock_chown(target, uid, gid):
+ assert uid == 0
+ assert gid == 0
+ assert os.path.exists(target)
+ os.chown.side_effect = mock_chown
+
+ def mock_fchown(fd, uid, gid):
+ assert uid == 0
+ assert gid == 0
+ os.fchown.side_effect = mock_fchown
+
+ fetch.filter_installed_packages.side_effect = lambda pkgs: list(pkgs)
+
+ def mock_relation_for_unit(unit=None, rid=None):
+ if unit is None:
+ unit = hookenv.remote_unit()
+ service, unit_num = unit.split('/')
+ unit_num = int(unit_num)
+ return {'private-address': '10.20.0.{}'.format(unit_num)}
+ hookenv.relation_for_unit.side_effect = mock_relation_for_unit
+
+ def mock_chown(target, uid, gid):
+ assert uid == 0
+ assert gid == 0
+ assert os.path.exists(target)
+ os.chown.side_effect = mock_chown
+
+ def mock_fchown(fd, uid, gid):
+ assert uid == 0
+ assert gid == 0
+ os.fchown.side_effect = mock_fchown
+
+ fetch.filter_installed_packages.side_effect = lambda pkgs: list(pkgs)
diff --git a/charms/trusty/cassandra/tests/__init__.py b/charms/trusty/cassandra/tests/__init__.py
new file mode 100644
index 0000000..b1b7fcd
--- /dev/null
+++ b/charms/trusty/cassandra/tests/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/cassandra/tests/base.py b/charms/trusty/cassandra/tests/base.py
new file mode 100755
index 0000000..d308985
--- /dev/null
+++ b/charms/trusty/cassandra/tests/base.py
@@ -0,0 +1,43 @@
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import functools
+from itertools import count
+import unittest
+from unittest.mock import patch
+
+from testing.mocks import mock_charmhelpers
+
+patch = functools.partial(patch, autospec=True) # autospec by default.
+
+
+class TestCaseBase(unittest.TestCase):
+ def setUp(self):
+ super(TestCaseBase, self).setUp()
+
+ mock_charmhelpers(self)
+
+ is_lxc = patch('helpers.is_lxc', return_value=False)
+ is_lxc.start()
+ self.addCleanup(is_lxc.stop)
+
+ emit = patch('helpers.emit')
+ emit.start()
+ self.addCleanup(emit.stop)
+
+ time = patch('time.time', side_effect=count(1))
+ time.start()
+ self.addCleanup(time.stop)
diff --git a/charms/trusty/cassandra/tests/test_actions.py b/charms/trusty/cassandra/tests/test_actions.py
new file mode 100755
index 0000000..f97df0c
--- /dev/null
+++ b/charms/trusty/cassandra/tests/test_actions.py
@@ -0,0 +1,1156 @@
+#!.venv3/bin/python3
+
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import errno
+from itertools import repeat
+import os.path
+import re
+import shutil
+import subprocess
+import tempfile
+from textwrap import dedent
+import unittest
+from unittest.mock import ANY, call, patch, sentinel
+import yaml
+
+import cassandra
+from charmhelpers.core import hookenv
+
+from tests.base import TestCaseBase
+import actions
+from coordinator import coordinator
+import helpers
+
+
+class TestActions(TestCaseBase):
+ def test_action_wrapper(self):
+ @actions.action
+ def somefunc(*args, **kw):
+ return 42, args, kw
+
+ hookenv.hook_name.return_value = 'catch-fire'
+
+ # The wrapper stripts the servicename argument, which we have no
+ # use for, logs a message and invokes the wrapped function.
+ hookenv.remote_unit.return_value = None
+ self.assertEqual(somefunc('sn', 1, foo=4), (42, (1,), dict(foo=4)))
+ hookenv.log.assert_called_once_with('** Action catch-fire/somefunc')
+
+ # Different log message if there is a remote unit.
+ hookenv.log.reset_mock()
+ os.environ['JUJU_REMOTE_UNIT'] = 'foo'
+ self.assertEqual(somefunc('sn', 1, foo=4), (42, (1,), dict(foo=4)))
+ hookenv.log.assert_called_once_with(
+ '** Action catch-fire/somefunc (foo)')
+
+ def test_revert_unchangeable_config(self):
+ config = hookenv.config()
+
+ self.assertIn('datacenter', actions.UNCHANGEABLE_KEYS)
+
+ # In the first hook, revert does nothing as there is nothing to
+ # revert too.
+ config['datacenter'] = 'mission_control'
+ self.assertTrue(config.changed('datacenter'))
+ actions.revert_unchangeable_config('')
+ self.assertEqual(config['datacenter'], 'mission_control')
+
+ config.save()
+ config.load_previous()
+ config['datacenter'] = 'orbital_1'
+
+ actions.revert_unchangeable_config('')
+ self.assertEqual(config['datacenter'], 'mission_control') # Reverted
+
+ hookenv.log.assert_any_call(ANY, hookenv.ERROR) # Logged the problem.
+
+ @patch('charmhelpers.core.hookenv.is_leader')
+ def test_leader_only(self, is_leader):
+
+ @actions.leader_only
+ def f(*args, **kw):
+ return args, kw
+
+ is_leader.return_value = False
+ self.assertIsNone(f(1, foo='bar'))
+
+ is_leader.return_value = True
+ self.assertEqual(f(1, foo='bar'), ((1,), dict(foo='bar')))
+
+ def test_set_proxy(self):
+ # NB. Environment is already mocked.
+ os.environ['http_proxy'] = ''
+ os.environ['https_proxy'] = ''
+ actions.set_proxy('')
+ self.assertEqual(os.environ['http_proxy'], '')
+ self.assertEqual(os.environ['https_proxy'], '')
+ hookenv.config()['http_proxy'] = 'foo'
+ actions.set_proxy('')
+ self.assertEqual(os.environ['http_proxy'], 'foo')
+ self.assertEqual(os.environ['https_proxy'], 'foo')
+
+ @patch('subprocess.check_call')
+ def test_preinstall(self, check_call):
+ # Noop if there are no preinstall hooks found running the
+ # install hook.
+ hookenv.hook_name.return_value = 'install'
+ actions.preinstall('')
+ self.assertFalse(check_call.called)
+ hookenv.log.assert_any_call('No preinstall hooks found')
+
+ # If preinstall hooks are found running the install hook,
+ # the preinstall hooks are run.
+ hook_dirs = []
+ hook_files = []
+ for i in range(1, 3):
+ hook_dirs.append(os.path.join(hookenv.charm_dir(),
+ 'exec.d', str(i)))
+ hook_files.append(os.path.join(hook_dirs[-1], 'charm-pre-install'))
+
+ os.makedirs(hook_dirs[-1])
+ with open(hook_files[-1], 'w') as f1:
+ print('mocked', file=f1)
+ os.chmod(hook_files[-1], 0o755)
+
+ check_call.reset_mock()
+ actions.preinstall('')
+
+ calls = [call(['sh', '-c', f2]) for f2 in hook_files]
+ check_call.assert_has_calls(calls)
+
+ # If a preinstall hook is not executable, a warning is raised.
+ hook_dir = os.path.join(hookenv.charm_dir(), 'exec.d', '55')
+ hook_file = os.path.join(hook_dir, 'charm-pre-install')
+ os.makedirs(hook_dir)
+ with open(hook_file, 'w') as f1:
+ print('whoops', file=f1)
+ os.chmod(hook_file, 0o644)
+ check_call.reset_mock()
+ hookenv.log.reset_mock()
+ actions.preinstall('')
+ check_call.assert_has_calls(calls) # Only previous hooks run.
+ hookenv.log.assert_has_calls([
+ call(ANY),
+ call(ANY),
+ call(ANY, hookenv.WARNING)])
+
+ # Nothing happens if the install hook is not being run.
+ hookenv.hook_name.return_value = 'config-changed'
+ check_call.reset_mock()
+ actions.preinstall('')
+ self.assertFalse(check_call.called)
+
+ @patch('subprocess.check_call')
+ def test_swapoff(self, check_call):
+ fstab = (
+ b'UUID=abc / ext4 errors=remount-ro 0 1\n'
+ b'/dev/mapper/cryptswap1 none swap sw 0 0')
+ with tempfile.NamedTemporaryFile() as f:
+ f.write(fstab)
+ f.flush()
+ actions.swapoff('', f.name)
+ f.seek(0)
+ self.assertTrue(b'swap' not in f.read())
+
+ check_call.assert_called_once_with(['swapoff', '-a'])
+
+ @patch('subprocess.check_call')
+ def test_swapoff_fails(self, check_call):
+ check_call.side_effect = RuntimeError()
+ actions.swapoff('', '')
+ # A warning is generated if swapoff fails.
+ hookenv.log.assert_any_call(ANY, hookenv.WARNING)
+
+ @patch('subprocess.check_call')
+ def test_swapoff_lxc(self, check_call):
+ # Under LXC, the swapoff action does nothing except log.
+ helpers.is_lxc.return_value = True
+ actions.swapoff('')
+ self.assertFalse(check_call.called)
+
+ @patch('charmhelpers.fetch.configure_sources')
+ def test_configure_sources(self, configure_sources):
+ config = hookenv.config()
+
+ # fetch.configure_sources called the first time
+ actions.configure_sources('')
+ configure_sources.assert_called_once_with(True)
+
+ # fetch.configure_sources not called if relevant config is unchanged.
+ config.save()
+ config.load_previous()
+ configure_sources.reset_mock()
+ actions.configure_sources('')
+ self.assertFalse(configure_sources.called)
+
+ # Changing install_sources causes fetch.configure_sources to be
+ # called.
+ config.save()
+ config.load_previous()
+ configure_sources.reset_mock()
+ config['install_sources'] = 'foo'
+ actions.configure_sources('')
+ configure_sources.assert_called_once_with(True)
+
+ # Changing install_keys causes fetch.configure_sources to be
+ # called.
+ config.save()
+ config.load_previous()
+ configure_sources.reset_mock()
+ config['install_keys'] = 'foo'
+ actions.configure_sources('')
+ configure_sources.assert_called_once_with(True)
+
+ @patch('charmhelpers.core.hookenv.charm_dir')
+ @patch('subprocess.check_call')
+ def test_add_implicit_package_signing_keys(self, check_call, charm_dir):
+ charm_dir.return_value = os.path.join(os.path.dirname(__file__),
+ os.pardir)
+ actions.add_implicit_package_signing_keys('')
+
+ keys = ['apache', 'datastax']
+
+ self.assertEqual(check_call.call_count, len(keys))
+
+ for k in keys:
+ with self.subTest(key=k):
+ path = os.path.join(hookenv.charm_dir(),
+ 'lib', '{}.key'.format(k))
+ self.assertTrue(os.path.exists(path))
+ check_call.assert_any_call(['apt-key', 'add', path],
+ stdin=subprocess.DEVNULL)
+
+ @patch('charmhelpers.core.host.write_file')
+ @patch('subprocess.check_call')
+ def test_reset_sysctl(self, check_call, write_file):
+ actions.reset_sysctl('')
+
+ ctl_file = '/etc/sysctl.d/99-cassandra.conf'
+ # Magic value per Cassandra best practice.
+ write_file.assert_called_once_with(ctl_file,
+ b"vm.max_map_count = 131072\n")
+ check_call.assert_called_once_with(['sysctl', '-p',
+ '/etc/sysctl.d/99-cassandra.conf'])
+
+ @patch('subprocess.check_call')
+ @patch('charmhelpers.core.host.write_file')
+ def test_reset_sysctl_expected_fails(self, write_file, check_call):
+ check_call.side_effect = repeat(OSError(errno.EACCES,
+ 'Permission Denied'))
+ actions.reset_sysctl('')
+ # A warning is generated if permission denied was raised.
+ hookenv.log.assert_any_call(ANY, hookenv.WARNING)
+
+ @patch('subprocess.check_call')
+ @patch('charmhelpers.core.host.write_file')
+ def test_reset_sysctl_fails_badly(self, write_file, check_call):
+ # Other OSErrors are reraised since we don't know how to handle
+ # them.
+ check_call.side_effect = repeat(OSError(errno.EFAULT, 'Whoops'))
+ self.assertRaises(OSError, actions.reset_sysctl, '')
+
+ @patch('subprocess.check_call')
+ def test_reset_sysctl_lxc(self, check_call):
+ helpers.is_lxc.return_value = True
+ actions.reset_sysctl('')
+ self.assertFalse(check_call.called)
+ hookenv.log.assert_any_call('In an LXC. '
+ 'Leaving sysctl unchanged.')
+
+ @patch('helpers.get_cassandra_packages')
+ @patch('helpers.ensure_package_status')
+ def test_ensure_cassandra_package_status(self, ensure_package_status,
+ get_cassandra_packages):
+ get_cassandra_packages.return_value = sentinel.cassandra_packages
+ actions.ensure_cassandra_package_status('')
+ ensure_package_status.assert_called_once_with(
+ sentinel.cassandra_packages)
+
+ @patch('subprocess.check_call')
+ @patch('helpers.get_jre')
+ @patch('helpers.get_cassandra_packages')
+ @patch('helpers.install_packages')
+ def test_install_cassandra_packages(self, install_packages,
+ get_cassandra_packages,
+ get_jre, check_call):
+ get_cassandra_packages.return_value = sentinel.cassandra_packages
+ get_jre.return_value = 'openjdk'
+ actions.install_cassandra_packages('')
+ install_packages.assert_called_once_with(sentinel.cassandra_packages)
+ check_call.assert_called_once_with(['update-java-alternatives',
+ '--jre-headless', '--set',
+ 'java-1.8.0-openjdk-amd64'])
+
+ @patch('subprocess.check_call')
+ @patch('helpers.get_jre')
+ @patch('helpers.get_cassandra_packages')
+ @patch('helpers.install_packages')
+ def test_install_cassandra_packages_oracle(self, install_packages,
+ get_cassandra_packages,
+ get_jre, check_call):
+ get_cassandra_packages.return_value = sentinel.cassandra_packages
+ get_jre.return_value = 'oracle'
+ actions.install_cassandra_packages('')
+ install_packages.assert_called_once_with(sentinel.cassandra_packages)
+ # No alternatives selected, as the Oracle JRE installer method
+ # handles this.
+ self.assertFalse(check_call.called)
+
+ @patch('actions._install_oracle_jre_tarball')
+ @patch('actions._fetch_oracle_jre')
+ def test_install_oracle_jre(self, fetch, install_tarball):
+ fetch.return_value = sentinel.tarball
+
+ actions.install_oracle_jre('')
+ self.assertFalse(fetch.called)
+ self.assertFalse(install_tarball.called)
+
+ hookenv.config()['jre'] = 'oracle'
+ actions.install_oracle_jre('')
+ fetch.assert_called_once_with()
+ install_tarball.assert_called_once_with(sentinel.tarball)
+
+ @patch('helpers.status_set')
+ @patch('urllib.request')
+ def test_fetch_oracle_jre(self, req, status_set):
+ config = hookenv.config()
+ url = 'https://foo.example.com/server-jre-7u42-linux-x64.tar.gz'
+ expected_tarball = os.path.join(hookenv.charm_dir(), 'lib',
+ 'server-jre-7u42-linux-x64.tar.gz')
+ config['private_jre_url'] = url
+
+ # Create a dummy tarball, since the mock urlretrieve won't.
+ os.makedirs(os.path.dirname(expected_tarball))
+ with open(expected_tarball, 'w'):
+ pass # Empty file
+
+ self.assertEqual(actions._fetch_oracle_jre(), expected_tarball)
+ req.urlretrieve.assert_called_once_with(url, expected_tarball)
+
+ def test_fetch_oracle_jre_local(self):
+ # Create an existing tarball. If it is found, it will be used
+ # without needing to specify a remote url or actually download
+ # anything.
+ expected_tarball = os.path.join(hookenv.charm_dir(), 'lib',
+ 'server-jre-7u42-linux-x64.tar.gz')
+ os.makedirs(os.path.dirname(expected_tarball))
+ with open(expected_tarball, 'w'):
+ pass # Empty file
+
+ self.assertEqual(actions._fetch_oracle_jre(), expected_tarball)
+
+ @patch('helpers.status_set')
+ def test_fetch_oracle_jre_notfound(self, status_set):
+ with self.assertRaises(SystemExit) as x:
+ actions._fetch_oracle_jre()
+ self.assertEqual(x.code, 0)
+ status_set.assert_called_once_with('blocked', ANY)
+
+ @patch('subprocess.check_call')
+ @patch('charmhelpers.core.host.mkdir')
+ @patch('os.path.isdir')
+ def test_install_oracle_jre_tarball(self, isdir, mkdir, check_call):
+ isdir.return_value = False
+
+ dest = '/usr/lib/jvm/java-8-oracle'
+
+ actions._install_oracle_jre_tarball(sentinel.tarball)
+ mkdir.assert_called_once_with(dest)
+ check_call.assert_has_calls([
+ call(['tar', '-xz', '-C', dest,
+ '--strip-components=1', '-f', sentinel.tarball]),
+ call(['update-alternatives', '--install',
+ '/usr/bin/java', 'java',
+ os.path.join(dest, 'bin', 'java'), '1']),
+ call(['update-alternatives', '--set', 'java',
+ os.path.join(dest, 'bin', 'java')]),
+ call(['update-alternatives', '--install',
+ '/usr/bin/javac', 'javac',
+ os.path.join(dest, 'bin', 'javac'), '1']),
+ call(['update-alternatives', '--set', 'javac',
+ os.path.join(dest, 'bin', 'javac')])])
+
+ @patch('os.path.exists')
+ @patch('subprocess.check_call')
+ @patch('charmhelpers.core.host.mkdir')
+ @patch('os.path.isdir')
+ def test_install_oracle_jre_tarball_already(self, isdir,
+ mkdir, check_call, exists):
+ isdir.return_value = True
+ exists.return_value = True # jre already installed
+
+ # Store the version previously installed.
+ hookenv.config()['oracle_jre_tarball'] = sentinel.tarball
+
+ dest = '/usr/lib/jvm/java-8-oracle'
+
+ actions._install_oracle_jre_tarball(sentinel.tarball)
+
+ self.assertFalse(mkdir.called) # The jvm dir already existed.
+
+ exists.assert_called_once_with('/usr/lib/jvm/java-8-oracle/bin/java')
+
+ # update-alternatives done, but tarball not extracted.
+ check_call.assert_has_calls([
+ call(['update-alternatives', '--install',
+ '/usr/bin/java', 'java',
+ os.path.join(dest, 'bin', 'java'), '1']),
+ call(['update-alternatives', '--set', 'java',
+ os.path.join(dest, 'bin', 'java')]),
+ call(['update-alternatives', '--install',
+ '/usr/bin/javac', 'javac',
+ os.path.join(dest, 'bin', 'javac'), '1']),
+ call(['update-alternatives', '--set', 'javac',
+ os.path.join(dest, 'bin', 'javac')])])
+
+ @patch('subprocess.check_output')
+ def test_emit_java_version(self, check_output):
+ check_output.return_value = 'Line 1\nLine 2'
+ actions.emit_java_version('')
+ check_output.assert_called_once_with(['java', '-version'],
+ universal_newlines=True)
+ hookenv.log.assert_has_calls([call(ANY),
+ call('JRE: Line 1'),
+ call('JRE: Line 2')])
+
+ @patch('helpers.configure_cassandra_yaml')
+ def test_configure_cassandra_yaml(self, configure_cassandra_yaml):
+ # actions.configure_cassandra_yaml is just a wrapper around the
+ # helper.
+ actions.configure_cassandra_yaml('')
+ configure_cassandra_yaml.assert_called_once_with()
+
+ @patch('helpers.get_cassandra_env_file')
+ @patch('charmhelpers.core.host.write_file')
+ def test_configure_cassandra_env(self, write_file, env_file):
+ def _wf(path, contents, perms=None):
+ with open(path, 'wb') as f:
+ f.write(contents)
+ write_file.side_effect = _wf
+
+ # cassandra-env.sh is a shell script that unfortunately
+ # embeds configuration we need to change.
+ existing_config = dedent('''\
+ Everything is ignored
+ unless a regexp matches
+ #MAX_HEAP_SIZE="1G"
+ #HEAP_NEWSIZE="800M"
+ #JMX_PORT="1234"
+ And done
+ ''')
+
+ with tempfile.TemporaryDirectory() as tempdir:
+ cassandra_env = os.path.join(tempdir, 'c.sh')
+ env_file.return_value = cassandra_env
+
+ with open(cassandra_env, 'w', encoding='UTF-8') as f:
+ f.write(existing_config)
+
+ overrides = dict(
+ max_heap_size=re.compile('^MAX_HEAP_SIZE=(.*)$', re.M),
+ heap_newsize=re.compile('^HEAP_NEWSIZE=(.*)$', re.M))
+
+ for key in overrides:
+ hookenv.config()[key] = ''
+
+ # By default, the settings will be commented out.
+ actions.configure_cassandra_env('')
+ with open(cassandra_env, 'r', encoding='UTF-8') as f:
+ generated_env = f.read()
+ for config_key, regexp in overrides.items():
+ with self.subTest(override=config_key):
+ self.assertIsNone(regexp.search(generated_env))
+
+ # Settings can be overridden.
+ for config_key, regexp in overrides.items():
+ hookenv.config()[config_key] = '{} val'.format(config_key)
+ actions.configure_cassandra_env('')
+ with open(cassandra_env, 'r') as f:
+ generated_env = f.read()
+ for config_key, regexp in overrides.items():
+ with self.subTest(override=config_key):
+ match = regexp.search(generated_env)
+ self.assertIsNotNone(match)
+ # Note the value has been shell quoted.
+ self.assertTrue(
+ match.group(1).startswith(
+ "'{} val'".format(config_key)))
+
+ # Settings can be returned to the defaults.
+ for config_key, regexp in overrides.items():
+ hookenv.config()[config_key] = ''
+ actions.configure_cassandra_env('')
+ with open(cassandra_env, 'r', encoding='UTF-8') as f:
+ generated_env = f.read()
+ for config_key, regexp in overrides.items():
+ with self.subTest(override=config_key):
+ self.assertIsNone(regexp.search(generated_env))
+
+ @patch('helpers.get_cassandra_rackdc_file')
+ def test_configure_cassandra_rackdc(self, rackdc_file):
+ hookenv.config()['datacenter'] = 'test_dc'
+ hookenv.config()['rack'] = 'test_rack'
+ with tempfile.NamedTemporaryFile() as rackdc:
+ rackdc_file.return_value = rackdc.name
+ actions.configure_cassandra_rackdc('')
+ with open(rackdc.name, 'r') as f:
+ self.assertEqual(f.read().strip(),
+ 'dc=test_dc\nrack=test_rack')
+
+ @patch('helpers.connect')
+ @patch('helpers.get_auth_keyspace_replication')
+ @patch('helpers.num_nodes')
+ def test_needs_reset_auth_keyspace_replication(self, num_nodes,
+ get_auth_ks_rep,
+ connect):
+ num_nodes.return_value = 4
+ connect().__enter__.return_value = sentinel.session
+ connect().__exit__.return_value = False
+ get_auth_ks_rep.return_value = {'another': '8'}
+ self.assertTrue(actions.needs_reset_auth_keyspace_replication())
+
+ @patch('helpers.connect')
+ @patch('helpers.get_auth_keyspace_replication')
+ @patch('helpers.num_nodes')
+ def test_needs_reset_auth_keyspace_replication_false(self, num_nodes,
+ get_auth_ks_rep,
+ connect):
+ config = hookenv.config()
+ config['datacenter'] = 'mydc'
+ connect().__enter__.return_value = sentinel.session
+ connect().__exit__.return_value = False
+
+ num_nodes.return_value = 3
+ get_auth_ks_rep.return_value = {'another': '8',
+ 'mydc': '3'}
+ self.assertFalse(actions.needs_reset_auth_keyspace_replication())
+
+ @patch('helpers.set_active')
+ @patch('helpers.repair_auth_keyspace')
+ @patch('helpers.connect')
+ @patch('helpers.set_auth_keyspace_replication')
+ @patch('helpers.get_auth_keyspace_replication')
+ @patch('helpers.num_nodes')
+ @patch('charmhelpers.core.hookenv.is_leader')
+ def test_reset_auth_keyspace_replication(self, is_leader, num_nodes,
+ get_auth_ks_rep,
+ set_auth_ks_rep,
+ connect, repair, set_active):
+ is_leader.return_value = True
+ num_nodes.return_value = 4
+ coordinator.grants = {}
+ coordinator.requests = {hookenv.local_unit(): {}}
+ coordinator.grant('repair', hookenv.local_unit())
+ config = hookenv.config()
+ config['datacenter'] = 'mydc'
+ connect().__enter__.return_value = sentinel.session
+ connect().__exit__.return_value = False
+ get_auth_ks_rep.return_value = {'another': '8'}
+ self.assertTrue(actions.needs_reset_auth_keyspace_replication())
+ actions.reset_auth_keyspace_replication('')
+ set_auth_ks_rep.assert_called_once_with(
+ sentinel.session,
+ {'class': 'NetworkTopologyStrategy', 'another': '8', 'mydc': 4})
+ repair.assert_called_once_with()
+ set_active.assert_called_once_with()
+
+ def test_store_unit_private_ip(self):
+ hookenv.unit_private_ip.side_effect = None
+ hookenv.unit_private_ip.return_value = sentinel.ip
+ actions.store_unit_private_ip('')
+ self.assertEqual(hookenv.config()['unit_private_ip'], sentinel.ip)
+
+ @patch('charmhelpers.core.host.service_start')
+ @patch('helpers.status_set')
+ @patch('helpers.actual_seed_ips')
+ @patch('helpers.get_seed_ips')
+ @patch('relations.StorageRelation.needs_remount')
+ @patch('helpers.is_bootstrapped')
+ @patch('helpers.is_cassandra_running')
+ @patch('helpers.is_decommissioned')
+ def test_needs_restart(self, is_decom, is_running, is_bootstrapped,
+ needs_remount, seed_ips, actual_seeds,
+ status_set, service_start):
+ is_decom.return_value = False
+ is_running.return_value = True
+ needs_remount.return_value = False
+ seed_ips.return_value = set(['1.2.3.4'])
+ actual_seeds.return_value = set(['1.2.3.4'])
+
+ config = hookenv.config()
+ config['configured_seeds'] = list(sorted(seed_ips()))
+ config.save()
+ config.load_previous() # Ensure everything flagged as unchanged.
+
+ self.assertFalse(actions.needs_restart())
+
+ # Decommissioned nodes are not restarted.
+ is_decom.return_value = True
+ self.assertFalse(actions.needs_restart())
+ is_decom.return_value = False
+ self.assertFalse(actions.needs_restart())
+
+ # Nodes not running need to be restarted.
+ is_running.return_value = False
+ self.assertTrue(actions.needs_restart())
+ is_running.return_value = True
+ self.assertFalse(actions.needs_restart())
+
+ # If we have a new mountpoint, we need to restart in order to
+ # migrate data.
+ needs_remount.return_value = True
+ self.assertTrue(actions.needs_restart())
+ needs_remount.return_value = False
+ self.assertFalse(actions.needs_restart())
+
+ # Certain changed config items trigger a restart.
+ config['max_heap_size'] = '512M'
+ self.assertTrue(actions.needs_restart())
+ config.save()
+ config.load_previous()
+ self.assertFalse(actions.needs_restart())
+
+ # A new IP address requires a restart.
+ config['unit_private_ip'] = 'new'
+ self.assertTrue(actions.needs_restart())
+ config.save()
+ config.load_previous()
+ self.assertFalse(actions.needs_restart())
+
+ # If the seeds have changed, we need to restart.
+ seed_ips.return_value = set(['9.8.7.6'])
+ actual_seeds.return_value = set(['9.8.7.6'])
+ self.assertTrue(actions.needs_restart())
+ is_running.side_effect = iter([False, True])
+ helpers.start_cassandra()
+ is_running.side_effect = None
+ is_running.return_value = True
+ self.assertFalse(actions.needs_restart())
+
+ @patch('charmhelpers.core.hookenv.is_leader')
+ @patch('helpers.is_bootstrapped')
+ @patch('helpers.ensure_database_directories')
+ @patch('helpers.remount_cassandra')
+ @patch('helpers.start_cassandra')
+ @patch('helpers.stop_cassandra')
+ @patch('helpers.status_set')
+ def test_maybe_restart(self, status_set, stop_cassandra, start_cassandra,
+ remount, ensure_directories, is_bootstrapped,
+ is_leader):
+ coordinator.grants = {}
+ coordinator.requests = {hookenv.local_unit(): {}}
+ coordinator.relid = 'cluster:1'
+ coordinator.grant('restart', hookenv.local_unit())
+ actions.maybe_restart('')
+ stop_cassandra.assert_called_once_with()
+ remount.assert_called_once_with()
+ ensure_directories.assert_called_once_with()
+ start_cassandra.assert_called_once_with()
+
+ @patch('helpers.stop_cassandra')
+ def test_stop_cassandra(self, helpers_stop_cassandra):
+ actions.stop_cassandra('ignored')
+ helpers_stop_cassandra.assert_called_once_with()
+
+ @patch('helpers.start_cassandra')
+ def test_start_cassandra(self, helpers_start_cassandra):
+ actions.start_cassandra('ignored')
+ helpers_start_cassandra.assert_called_once_with()
+
+ @patch('os.path.isdir')
+ @patch('helpers.get_all_database_directories')
+ @patch('helpers.set_io_scheduler')
+ def test_reset_all_io_schedulers(self, set_io_scheduler, dbdirs, isdir):
+ hookenv.config()['io_scheduler'] = sentinel.io_scheduler
+ dbdirs.return_value = dict(
+ data_file_directories=[sentinel.d1, sentinel.d2],
+ commitlog_directory=sentinel.cl,
+ saved_caches_directory=sentinel.sc)
+ isdir.return_value = True
+ actions.reset_all_io_schedulers('')
+ set_io_scheduler.assert_has_calls([
+ call(sentinel.io_scheduler, sentinel.d1),
+ call(sentinel.io_scheduler, sentinel.d2),
+ call(sentinel.io_scheduler, sentinel.cl),
+ call(sentinel.io_scheduler, sentinel.sc)],
+ any_order=True)
+
+ # If directories don't exist yet, nothing happens.
+ set_io_scheduler.reset_mock()
+ isdir.return_value = False
+ actions.reset_all_io_schedulers('')
+ self.assertFalse(set_io_scheduler.called)
+
+ def test_config_key_lists_complete(self):
+ # Ensure that we have listed all keys in either
+ # RESTART_REQUIRED_KEYS, RESTART_NOT_REQUIRED_KEYS or
+ # UNCHANGEABLE_KEYS. This is to ensure that RESTART_REQUIRED_KEYS
+ # is maintained as new config items are added over time.
+ config_path = os.path.join(os.path.dirname(__file__), os.pardir,
+ 'config.yaml')
+ with open(config_path, 'r') as f:
+ config = yaml.safe_load(f)
+
+ combined = actions.RESTART_REQUIRED_KEYS.union(
+ actions.RESTART_NOT_REQUIRED_KEYS).union(
+ actions.UNCHANGEABLE_KEYS)
+
+ for key in config['options']:
+ with self.subTest(key=key):
+ self.assertIn(key, combined)
+
+ @patch('actions._publish_database_relation')
+ def test_publish_database_relations(self, publish_db_rel):
+ actions.publish_database_relations('')
+ publish_db_rel.assert_called_once_with('database:1', superuser=False)
+
+ @patch('actions._publish_database_relation')
+ def test_publish_database_admin_relations(self, publish_db_rel):
+ actions.publish_database_admin_relations('')
+ publish_db_rel.assert_called_once_with('database-admin:1',
+ superuser=True)
+
+ @patch('helpers.leader_ping')
+ @patch('helpers.ensure_user')
+ @patch('helpers.connect')
+ @patch('helpers.get_service_name')
+ @patch('helpers.encrypt_password')
+ @patch('charmhelpers.core.host.pwgen')
+ @patch('charmhelpers.core.hookenv.is_leader')
+ @patch('actions._client_credentials')
+ def test_publish_database_relation_leader(self, client_creds, is_leader,
+ pwgen, encrypt_password,
+ get_service_name,
+ connect, ensure_user,
+ leader_ping):
+ is_leader.return_value = True # We are the leader.
+ client_creds.return_value = (None, None) # No creds published yet.
+
+ get_service_name.return_value = 'cservice'
+ pwgen.side_effect = iter(['secret1', 'secret2'])
+ encrypt_password.side_effect = iter(['crypt1', 'crypt2'])
+ connect().__enter__.return_value = sentinel.session
+
+ config = hookenv.config()
+ config['native_transport_port'] = 666
+ config['rpc_port'] = 777
+ config['cluster_name'] = 'fred'
+ config['datacenter'] = 'mission_control'
+ config['rack'] = '01'
+
+ actions._publish_database_relation('database:1', superuser=False)
+
+ ensure_user.assert_called_once_with(sentinel.session,
+ 'juju_cservice', 'crypt1',
+ False)
+ leader_ping.assert_called_once_with() # Peers woken.
+
+ hookenv.relation_set.assert_has_calls([
+ call('database:1',
+ username='juju_cservice', password='secret1',
+ host='10.30.0.1', native_transport_port=666, rpc_port=777,
+ cluster_name='fred', datacenter='mission_control',
+ rack='01')])
+
+ @patch('helpers.leader_ping')
+ @patch('helpers.ensure_user')
+ @patch('helpers.connect')
+ @patch('helpers.get_service_name')
+ @patch('helpers.encrypt_password')
+ @patch('charmhelpers.core.host.pwgen')
+ @patch('charmhelpers.core.hookenv.is_leader')
+ @patch('actions._client_credentials')
+ def test_publish_database_relation_super(self, client_creds, is_leader,
+ pwgen, encrypt_password,
+ get_service_name,
+ connect, ensure_user,
+ leader_ping):
+ is_leader.return_value = True # We are the leader.
+ client_creds.return_value = (None, None) # No creds published yet.
+
+ get_service_name.return_value = 'cservice'
+ pwgen.side_effect = iter(['secret1', 'secret2'])
+ encrypt_password.side_effect = iter(['crypt1', 'crypt2'])
+ connect().__enter__.return_value = sentinel.session
+
+ config = hookenv.config()
+ config['native_transport_port'] = 666
+ config['rpc_port'] = 777
+ config['cluster_name'] = 'fred'
+ config['datacenter'] = 'mission_control'
+ config['rack'] = '01'
+
+ actions._publish_database_relation('database:1', superuser=True)
+
+ ensure_user.assert_called_once_with(sentinel.session,
+ 'juju_cservice_admin', 'crypt1',
+ True)
+
+ @patch('charmhelpers.core.host.write_file')
+ def test_install_maintenance_crontab(self, write_file):
+ # First 7 units get distributed, one job per day.
+ hookenv.local_unit.return_value = 'foo/0'
+ actions.install_maintenance_crontab('')
+ write_file.assert_called_once_with('/etc/cron.d/cassandra-maintenance',
+ ANY)
+ contents = write_file.call_args[0][1]
+ # Not the complete command, but includes all the expanded
+ # variables.
+ expected = (b'\n0 0 * * 0 cassandra run-one-until-success '
+ b'nodetool repair -pr')
+ self.assertIn(expected, contents)
+
+ # Next 7 units distributed 12 hours out of sync with the first
+ # batch.
+ hookenv.local_unit.return_value = 'foo/8'
+ actions.install_maintenance_crontab('')
+ contents = write_file.call_args[0][1]
+ expected = (b'\n0 12 * * 1 cassandra run-one-until-success '
+ b'nodetool repair -pr')
+ self.assertIn(expected, contents)
+
+ # Later units per helpers.week_spread()
+ hookenv.local_unit.return_value = 'foo/411'
+ actions.install_maintenance_crontab('')
+ contents = write_file.call_args[0][1]
+ expected = (b'\n37 8 * * 5 cassandra run-one-until-success '
+ b'nodetool repair -pr')
+ self.assertIn(expected, contents)
+
+ @patch('helpers.emit_netstats')
+ @patch('helpers.emit_status')
+ @patch('helpers.emit_describe_cluster')
+ def test_emit_cluster_info(self, emit_desc, emit_status, emit_netstats):
+ actions.emit_cluster_info('')
+ emit_desc.assert_called_once_with()
+ emit_status.assert_called_once_with()
+ emit_netstats.assert_called_once_with()
+
+ @patch('charmhelpers.core.hookenv.relations_of_type')
+ @patch('actions.ufw')
+ def test_configure_firewall(self, ufw, rel_of_type):
+ rel_of_type.side_effect = iter([[{'private-address': '1.1.0.1'},
+ {'private-address': '1.1.0.2'}],
+ []])
+ actions.configure_firewall('')
+
+ # Confirm our mock provided the expected data.
+ rel_of_type.assert_has_calls([call('cluster'), call('database-admin')])
+
+ ufw.enable.assert_called_once_with(soft_fail=True) # Always enabled.
+
+ # SSH and the client protocol ports are always fully open.
+ ufw.service.assert_has_calls([call('ssh', 'open'),
+ call('nrpe', 'open'),
+ call('rsync', 'open'),
+ call(9042, 'open'),
+ call(9160, 'open')])
+
+ # This test is running for the first time, so there are no
+ # previously applied rules to remove. It opens necessary access
+ # to peers and other related units. The 1.1.* addresses are
+ # peers, and they get storage (7000), ssl_storage (7001),
+ # JMX (7199), Thrift (9160) and native (9042). The remaining
+ # addresses are clients, getting just Thrift and native.
+ ufw.grant_access.assert_has_calls([call('1.1.0.1', 'any', 7000),
+ call('1.1.0.1', 'any', 7001),
+
+ call('1.1.0.2', 'any', 7000),
+ call('1.1.0.2', 'any', 7001)],
+ any_order=True)
+
+ # If things change in a later hook, unwanted rules are removed
+ # and new ones added.
+ config = hookenv.config()
+ config.save()
+ config.load_previous()
+ config['native_transport_port'] = 7777 # 9042 -> 7777
+ config['storage_port'] = 7002 # 7000 -> 7002
+ config['open_client_ports'] = True
+ ufw.reset_mock()
+
+ rel_of_type.side_effect = iter([[],
+ [{'private-address': '1.1.0.1'},
+ {'private-address': '1.1.0.2'}]])
+ actions.configure_firewall('')
+
+ # Three ports now globally open. Yes, having the globally open
+ # native and Thrift ports does make the later more specific
+ # rules meaningless, but we add the specific rules anyway.
+ ufw.service.assert_has_calls([call('ssh', 'open'),
+ call('nrpe', 'open'),
+ call(9042, 'close'),
+ call(7777, 'open'),
+ call(9160, 'open')], any_order=True)
+ ufw.revoke_access.assert_has_calls([call('1.1.0.1', 'any', 7000),
+ call('1.1.0.2', 'any', 7000)],
+ any_order=True)
+ ufw.grant_access.assert_has_calls([call('1.1.0.1', 'any', 7001),
+ call('1.1.0.1', 'any', 7002),
+ call('1.1.0.2', 'any', 7001),
+ call('1.1.0.2', 'any', 7002)],
+ any_order=True)
+
+ @patch('helpers.mountpoint')
+ @patch('helpers.get_cassandra_version')
+ @patch('charmhelpers.core.host.write_file')
+ @patch('charmhelpers.contrib.charmsupport.nrpe.NRPE')
+ @patch('helpers.local_plugins_dir')
+ def test_nrpe_external_master_relation(self, local_plugins_dir, nrpe,
+ write_file, cassandra_version,
+ mountpoint):
+ mountpoint.side_effect = os.path.dirname
+ cassandra_version.return_value = '2.2'
+ # The fake charm_dir() needs populating.
+ plugin_src_dir = os.path.join(os.path.dirname(__file__),
+ os.pardir, 'files')
+ shutil.copytree(plugin_src_dir,
+ os.path.join(hookenv.charm_dir(), 'files'))
+
+ with tempfile.TemporaryDirectory() as d:
+ local_plugins_dir.return_value = d
+ actions.nrpe_external_master_relation('')
+
+ # The expected file was written to the expected filename
+ # with required perms.
+ with open(os.path.join(plugin_src_dir, 'check_cassandra_heap.sh'),
+ 'rb') as f:
+ write_file.assert_called_once_with(
+ os.path.join(d, 'check_cassandra_heap.sh'), f.read(),
+ perms=0o555)
+
+ nrpe().add_check.assert_has_calls([
+ call(shortname='cassandra_heap',
+ description='Check Cassandra Heap',
+ check_cmd='check_cassandra_heap.sh localhost 80 90'),
+ call(description=('Check Cassandra Disk '
+ '/var/lib/cassandra'),
+ shortname='cassandra_disk_var_lib_cassandra',
+ check_cmd=('check_disk -u GB -w 50% -c 25% -K 5% '
+ '-p /var/lib/cassandra'))],
+ any_order=True)
+
+ nrpe().write.assert_called_once_with()
+
+ @patch('helpers.get_cassandra_version')
+ @patch('charmhelpers.core.host.write_file')
+ @patch('os.path.exists')
+ @patch('charmhelpers.contrib.charmsupport.nrpe.NRPE')
+ def test_nrpe_external_master_relation_no_local(self, nrpe, exists,
+ write_file, ver):
+ ver.return_value = '2.2'
+ # If the local plugins directory doesn't exist, we don't attempt
+ # to write files to it. Wait until the subordinate has set it
+ # up.
+ exists.return_value = False
+ actions.nrpe_external_master_relation('')
+ self.assertFalse(write_file.called)
+
+ @patch('helpers.mountpoint')
+ @patch('helpers.get_cassandra_version')
+ @patch('os.path.exists')
+ @patch('charmhelpers.contrib.charmsupport.nrpe.NRPE')
+ def test_nrpe_external_master_relation_disable_heapchk(self, nrpe, exists,
+ ver, mountpoint):
+ ver.return_value = '2.2'
+ exists.return_value = False
+ mountpoint.side_effect = os.path.dirname
+
+ # Disable our checks
+ config = hookenv.config()
+ config['nagios_heapchk_warn_pct'] = 0 # Only one needs to be disabled.
+ config['nagios_heapchk_crit_pct'] = 90
+
+ actions.nrpe_external_master_relation('')
+ exists.assert_called_once_with(helpers.local_plugins_dir())
+
+ nrpe().add_check.assert_has_calls([
+ call(shortname='cassandra_disk_var_lib_cassandra',
+ description=ANY, check_cmd=ANY)], any_order=True)
+
+ @patch('helpers.get_cassandra_version')
+ @patch('os.path.exists')
+ @patch('charmhelpers.contrib.charmsupport.nrpe.NRPE')
+ def test_nrpe_external_master_relation_disable_diskchk(self, nrpe,
+ exists, ver):
+ ver.return_value = '2.2'
+ exists.return_value = False
+
+ # Disable our checks
+ config = hookenv.config()
+ config['nagios_disk_warn_pct'] = 0 # Only one needs to be disabled.
+ config['magios_disk_crit_pct'] = 50
+
+ actions.nrpe_external_master_relation('')
+ exists.assert_called_once_with(helpers.local_plugins_dir())
+
+ nrpe().add_check.assert_called_once_with(shortname='cassandra_heap',
+ description=ANY,
+ check_cmd=ANY)
+
+ @patch('helpers.get_bootstrapped_ips')
+ @patch('helpers.get_seed_ips')
+ @patch('charmhelpers.core.hookenv.leader_set')
+ @patch('charmhelpers.core.hookenv.is_leader')
+ def test_maintain_seeds(self, is_leader, leader_set,
+ seed_ips, bootstrapped_ips):
+ is_leader.return_value = True
+
+ seed_ips.return_value = set(['1.2.3.4'])
+ bootstrapped_ips.return_value = set(['2.2.3.4', '3.2.3.4',
+ '4.2.3.4', '5.2.3.4'])
+
+ actions.maintain_seeds('')
+ leader_set.assert_called_once_with(seeds='2.2.3.4,3.2.3.4,4.2.3.4')
+
+ @patch('helpers.get_bootstrapped_ips')
+ @patch('helpers.get_seed_ips')
+ @patch('charmhelpers.core.hookenv.leader_set')
+ @patch('charmhelpers.core.hookenv.is_leader')
+ def test_maintain_seeds_start(self, is_leader, leader_set,
+ seed_ips, bootstrapped_ips):
+ seed_ips.return_value = set()
+ bootstrapped_ips.return_value = set()
+ actions.maintain_seeds('')
+ # First seed is the first leader, which lets is get everything
+ # started.
+ leader_set.assert_called_once_with(seeds=hookenv.unit_private_ip())
+
+ @patch('charmhelpers.core.host.pwgen')
+ @patch('helpers.query')
+ @patch('helpers.set_unit_superusers')
+ @patch('helpers.ensure_user')
+ @patch('helpers.encrypt_password')
+ @patch('helpers.superuser_credentials')
+ @patch('helpers.connect')
+ @patch('charmhelpers.core.hookenv.is_leader')
+ @patch('charmhelpers.core.hookenv.leader_set')
+ @patch('charmhelpers.core.hookenv.leader_get')
+ def test_reset_default_password(self, leader_get, leader_set, is_leader,
+ connect, sup_creds, encrypt_password,
+ ensure_user, set_sups, query, pwgen):
+ is_leader.return_value = True
+ leader_get.return_value = None
+ connect().__enter__.return_value = sentinel.session
+ connect().__exit__.return_value = False
+ connect.reset_mock()
+
+ sup_creds.return_value = (sentinel.username, sentinel.password)
+ encrypt_password.return_value = sentinel.pwhash
+ pwgen.return_value = sentinel.random_password
+
+ actions.reset_default_password('')
+
+ # First, a superuser account for the unit was created.
+ connect.assert_called_once_with('cassandra', 'cassandra',
+ timeout=120, auth_timeout=120)
+ encrypt_password.assert_called_once_with(sentinel.password)
+ ensure_user.assert_called_once_with(sentinel.session,
+ sentinel.username,
+ sentinel.pwhash,
+ superuser=True)
+ set_sups.assert_called_once_with([hookenv.local_unit()])
+
+ # After that, the default password is reset.
+ query.assert_called_once_with(sentinel.session,
+ 'ALTER USER cassandra WITH PASSWORD %s',
+ cassandra.ConsistencyLevel.ALL,
+ (sentinel.random_password,))
+
+ # Flag stored to avoid attempting this again.
+ leader_set.assert_called_once_with(default_admin_password_changed=True)
+
+ @patch('helpers.connect')
+ @patch('charmhelpers.core.hookenv.is_leader')
+ @patch('charmhelpers.core.hookenv.leader_get')
+ def test_reset_default_password_noop(self, leader_get, is_leader, connect):
+ leader_get.return_value = True
+ is_leader.return_value = True
+ actions.reset_default_password('') # noop
+ self.assertFalse(connect.called)
+
+ @patch('helpers.get_seed_ips')
+ @patch('helpers.status_set')
+ @patch('charmhelpers.core.hookenv.status_get')
+ @patch('charmhelpers.core.hookenv.is_leader')
+ def test_set_active(self, is_leader, status_get, status_set, seed_ips):
+ is_leader.return_value = False
+ status_get.return_value = ('waiting', '')
+ seed_ips.return_value = set()
+ actions.set_active('')
+ status_set.assert_called_once_with('active', 'Live node')
+
+ @patch('helpers.get_seed_ips')
+ @patch('helpers.status_set')
+ @patch('charmhelpers.core.hookenv.status_get')
+ @patch('charmhelpers.core.hookenv.is_leader')
+ def test_set_active_seed(self, is_leader,
+ status_get, status_set, seed_ips):
+ is_leader.return_value = False
+ status_get.return_value = ('waiting', '')
+ seed_ips.return_value = set([hookenv.unit_private_ip()])
+ actions.set_active('')
+ status_set.assert_called_once_with('active', 'Live seed')
+
+ @patch('helpers.num_nodes')
+ @patch('helpers.get_seed_ips')
+ @patch('helpers.service_status_set')
+ @patch('helpers.status_set')
+ @patch('charmhelpers.core.hookenv.status_get')
+ @patch('charmhelpers.core.hookenv.is_leader')
+ def test_set_active_service(self, is_leader,
+ status_get, status_set, service_status_set,
+ seed_ips, num_nodes):
+ status_get.return_value = ('waiting', '')
+ is_leader.return_value = True
+ seed_ips.return_value = set([hookenv.unit_private_ip()])
+ num_nodes.return_value = 1
+ actions.set_active('')
+ service_status_set.assert_called_once_with('active',
+ 'Single node cluster')
+
+ service_status_set.reset_mock()
+ num_nodes.return_value = 6
+ actions.set_active('')
+ service_status_set.assert_called_once_with('active',
+ '6 node cluster')
+
+ @patch('helpers.encrypt_password')
+ @patch('helpers.superuser_credentials')
+ @patch('helpers.peer_relid')
+ def test_request_unit_superuser(self, peer_relid, sup_creds, crypt):
+ peer_relid.return_value = sentinel.peer_relid
+ sup_creds.return_value = (sentinel.username, sentinel.password)
+ crypt.return_value = sentinel.pwhash
+ hookenv.relation_get.return_value = dict()
+ actions.request_unit_superuser('')
+ hookenv.relation_set.assert_called_once_with(
+ sentinel.peer_relid,
+ username=sentinel.username, pwhash=sentinel.pwhash)
+
+ @patch('helpers.update_hosts_file')
+ @patch('socket.gethostname')
+ def test_update_etc_hosts(self, gethostname, update_hosts_file):
+ gethostname.return_value = sentinel.hostname
+ actions.update_etc_hosts('')
+ update_hosts_file.assert_called_once_with(
+ '/etc/hosts', {'10.20.0.1': sentinel.hostname})
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/charms/trusty/cassandra/tests/test_definitions.py b/charms/trusty/cassandra/tests/test_definitions.py
new file mode 100755
index 0000000..98103c0
--- /dev/null
+++ b/charms/trusty/cassandra/tests/test_definitions.py
@@ -0,0 +1,104 @@
+#!.venv3/bin/python3
+
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from itertools import chain
+import functools
+import unittest
+from unittest.mock import patch
+
+from charmhelpers.core import hookenv
+from charmhelpers.core.services import ServiceManager
+
+from tests.base import TestCaseBase
+
+import definitions
+
+
+patch = functools.partial(patch, autospec=True)
+
+
+class TestDefinitions(TestCaseBase):
+ def test_get_service_definitions(self):
+ # We can't really test this in unit tests, but at least we can
+ # ensure the basic data structure is returned and accepted.
+ defs = definitions.get_service_definitions()
+ self.assertIsInstance(defs, list)
+ for d in defs:
+ with self.subTest(d=d):
+ self.assertIsInstance(d, dict)
+
+ def test_get_service_definitions_open_ports(self):
+ config = hookenv.config()
+ defs = definitions.get_service_definitions()
+ expected_ports = set([config['rpc_port'],
+ config['native_transport_port'],
+ config['storage_port'],
+ config['ssl_storage_port']])
+ opened_ports = set(chain(*(d.get('ports', []) for d in defs)))
+ self.assertSetEqual(opened_ports, expected_ports)
+
+ def test_get_service_manager(self):
+ self.assertIsInstance(definitions.get_service_manager(),
+ ServiceManager)
+
+ @patch('helpers.get_unit_superusers')
+ @patch('helpers.is_decommissioned')
+ @patch('helpers.is_cassandra_running')
+ def test_requires_live_node(self, is_running, is_decommissioned, get_sup):
+ is_decommissioned.return_value = False # Is not decommissioned.
+ is_running.return_value = True # Is running.
+ get_sup.return_value = set([hookenv.local_unit()]) # Creds exist.
+
+ self.assertTrue(bool(definitions.RequiresLiveNode()))
+
+ @patch('helpers.get_unit_superusers')
+ @patch('helpers.is_decommissioned')
+ @patch('helpers.is_cassandra_running')
+ def test_requires_live_node_decommissioned(self, is_running,
+ is_decommissioned, get_sup):
+ is_decommissioned.return_value = True # Is decommissioned.
+ is_running.return_value = True # Is running.
+ get_sup.return_value = set([hookenv.local_unit()]) # Creds exist.
+
+ self.assertFalse(bool(definitions.RequiresLiveNode()))
+
+ @patch('helpers.get_unit_superusers')
+ @patch('helpers.is_decommissioned')
+ @patch('helpers.is_cassandra_running')
+ def test_requires_live_node_down(self, is_running,
+ is_decommissioned, get_sup):
+ is_decommissioned.return_value = False # Is not decommissioned.
+ is_running.return_value = False # Is not running.
+ get_sup.return_value = set([hookenv.local_unit()]) # Creds exist.
+
+ self.assertFalse(bool(definitions.RequiresLiveNode()))
+
+ @patch('helpers.get_unit_superusers')
+ @patch('helpers.is_decommissioned')
+ @patch('helpers.is_cassandra_running')
+ def test_requires_live_node_creds(self, is_running,
+ is_decommissioned, get_sup):
+ is_decommissioned.return_value = False # Is not decommissioned.
+ is_running.return_value = True # Is running.
+ get_sup.return_value = set() # Creds do not exist.
+
+ self.assertFalse(bool(definitions.RequiresLiveNode()))
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/charms/trusty/cassandra/tests/test_helpers.py b/charms/trusty/cassandra/tests/test_helpers.py
new file mode 100755
index 0000000..92fa1e8
--- /dev/null
+++ b/charms/trusty/cassandra/tests/test_helpers.py
@@ -0,0 +1,1466 @@
+#!.venv3/bin/python3
+
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from collections import namedtuple
+import errno
+import functools
+from itertools import repeat
+import os.path
+import subprocess
+import tempfile
+from textwrap import dedent
+import unittest
+from unittest.mock import ANY, call, MagicMock, patch, sentinel
+
+from cassandra import AuthenticationFailed, ConsistencyLevel
+from cassandra.cluster import NoHostAvailable
+import yaml
+
+from charmhelpers import fetch
+from charmhelpers.core import hookenv, host
+
+from tests.base import TestCaseBase
+import helpers
+
+
+patch = functools.partial(patch, autospec=True)
+
+
+class TestHelpers(TestCaseBase):
+ @patch('time.sleep')
+ def test_backoff(self, sleep):
+ i = 0
+ for _ in helpers.backoff('foo to bar'):
+ i += 1
+ if i == 10:
+ break
+ sleep.assert_has_calls([
+ call(2), call(4), call(8), call(16), call(32),
+ call(60), call(60), call(60), call(60)])
+
+ i = 0
+ for _ in helpers.backoff('foo to bar', max_pause=10):
+ i += 1
+ if i == 10:
+ break
+ sleep.assert_has_calls([
+ call(2), call(4), call(8), call(10), call(10),
+ call(10), call(10), call(10), call(10)])
+
+ def test_autostart_disabled(self):
+ with tempfile.TemporaryDirectory() as tmpdir:
+
+ prc = os.path.join(tmpdir, 'policy-rc.d')
+ prc_backup = prc + '-orig'
+
+ with helpers.autostart_disabled(_policy_rc=prc):
+ # No existing policy-rc.d, so no backup made.
+ self.assertFalse(os.path.exists(prc_backup))
+
+ # A policy-rc.d file has been created that will disable
+ # package autostart per spec (ie. returns a 101 exit code).
+ self.assertTrue(os.path.exists(prc))
+ self.assertEqual(subprocess.call([prc]), 101)
+
+ with helpers.autostart_disabled(_policy_rc=prc):
+ # A second time, we have a backup made.
+ # policy-rc.d still works
+ self.assertTrue(os.path.exists(prc_backup))
+ self.assertEqual(subprocess.call([prc]), 101)
+
+ # Backup removed, and policy-rc.d still works.
+ self.assertFalse(os.path.exists(prc_backup))
+ self.assertEqual(subprocess.call([prc]), 101)
+
+ # Neither backup nor policy-rc.d exist now we are out of the
+ # context manager.
+ self.assertFalse(os.path.exists(prc_backup))
+ self.assertFalse(os.path.exists(prc))
+
+ def test_autostart_disabled_partial(self):
+ with tempfile.TemporaryDirectory() as tmpdir:
+
+ prc = os.path.join(tmpdir, 'policy-rc.d')
+ prc_backup = prc + '-orig'
+
+ with helpers.autostart_disabled(['foo', 'bar'], _policy_rc=prc):
+ # No existing policy-rc.d, so no backup made.
+ self.assertFalse(os.path.exists(prc_backup))
+
+ # A policy-rc.d file has been created that will disable
+ # package autostart per spec (ie. returns a 101 exit code).
+ self.assertTrue(os.path.exists(prc))
+ self.assertEqual(subprocess.call([prc, 'foo']), 101)
+ self.assertEqual(subprocess.call([prc, 'bar']), 101)
+ self.assertEqual(subprocess.call([prc, 'baz']), 0)
+
+ # Neither backup nor policy-rc.d exist now we are out of the
+ # context manager.
+ self.assertFalse(os.path.exists(prc_backup))
+ self.assertFalse(os.path.exists(prc))
+
+ @patch('helpers.autostart_disabled')
+ @patch('charmhelpers.fetch.apt_install')
+ def test_install_packages(self, apt_install, autostart_disabled):
+ packages = ['a_pack', 'b_pack']
+ helpers.install_packages(packages)
+
+ # All packages got installed, and hook aborted if package
+ # installation failed.
+ apt_install.assert_called_once_with(['a_pack', 'b_pack'], fatal=True)
+
+ # The autostart_disabled context manager was used to stop
+ # package installation starting services.
+ autostart_disabled().__enter__.assert_called_once_with()
+ autostart_disabled().__exit__.assert_called_once_with(None, None, None)
+
+ @patch('helpers.autostart_disabled')
+ @patch('charmhelpers.fetch.apt_install')
+ def test_install_packages_extras(self, apt_install, autostart_disabled):
+ packages = ['a_pack', 'b_pack']
+ hookenv.config()['extra_packages'] = 'c_pack d_pack'
+ helpers.install_packages(packages)
+
+ # All packages got installed, and hook aborted if package
+ # installation failed.
+ apt_install.assert_called_once_with(['a_pack', 'b_pack',
+ 'c_pack', 'd_pack'], fatal=True)
+
+ # The autostart_disabled context manager was used to stop
+ # package installation starting services.
+ autostart_disabled().__enter__.assert_called_once_with()
+ autostart_disabled().__exit__.assert_called_once_with(None, None, None)
+
+ @patch('helpers.autostart_disabled')
+ @patch('charmhelpers.fetch.apt_install')
+ def test_install_packages_noop(self, apt_install, autostart_disabled):
+ # Everything is already installed. Nothing to do.
+ fetch.filter_installed_packages.side_effect = lambda pkgs: []
+
+ packages = ['a_pack', 'b_pack']
+ hookenv.config()['extra_packages'] = 'c_pack d_pack'
+ helpers.install_packages(packages)
+
+ # All packages got installed, and hook aborted if package
+ # installation failed.
+ self.assertFalse(apt_install.called)
+
+ # Autostart wasn't messed with.
+ self.assertFalse(autostart_disabled.called)
+
+ @patch('subprocess.Popen')
+ def test_ensure_package_status(self, popen):
+ for status in ['install', 'hold']:
+ with self.subTest(status=status):
+ popen.reset_mock()
+ hookenv.config()['package_status'] = status
+ helpers.ensure_package_status(['a_pack', 'b_pack'])
+
+ selections = 'a_pack {}\nb_pack {}\n'.format(
+ status, status).encode('US-ASCII')
+
+ self.assertEqual(
+ [call(['dpkg', '--set-selections'], stdin=subprocess.PIPE),
+ call().communicate(input=selections)], popen.mock_calls)
+
+ popen.reset_mock()
+ hookenv.config()['package_status'] = 'invalid'
+ self.assertRaises(RuntimeError,
+ helpers.ensure_package_status, ['a_pack', 'b_back'])
+ self.assertFalse(popen.called)
+
+ @patch('charmhelpers.core.hookenv.leader_get')
+ def test_get_seed_ips(self, leader_get):
+ leader_get.return_value = '1.2.3.4,5.6.7.8'
+ self.assertSetEqual(helpers.get_seed_ips(), set(['1.2.3.4',
+ '5.6.7.8']))
+
+ @patch('helpers.read_cassandra_yaml')
+ def test_actual_seed_ips(self, read_yaml):
+ read_yaml.return_value = yaml.load(dedent('''\
+ seed_provider:
+ - class_name: blah
+ parameters:
+ - seeds: a,b,c
+ '''))
+ self.assertSetEqual(helpers.actual_seed_ips(),
+ set(['a', 'b', 'c']))
+
+ @patch('relations.StorageRelation')
+ def test_get_database_directory(self, storage_relation):
+ storage_relation().mountpoint = None
+
+ # Relative paths are relative to /var/lib/cassandra
+ self.assertEqual(helpers.get_database_directory('bar'),
+ '/var/lib/cassandra/bar')
+
+ # If there is an external mount, relative paths are relative to
+ # it. Note the extra 'cassandra' directory - life is easier
+ # if we store all our data in a subdirectory on the external
+ # mount rather than in its root.
+ storage_relation().mountpoint = '/srv/foo'
+ self.assertEqual(helpers.get_database_directory('bar'),
+ '/srv/foo/cassandra/bar')
+
+ # Absolute paths are absolute and passed through unmolested.
+ self.assertEqual(helpers.get_database_directory('/bar'), '/bar')
+
+ @patch('helpers.get_cassandra_version')
+ @patch('relations.StorageRelation')
+ def test_get_all_database_directories(self, storage_relation, ver):
+ ver.return_value = '2.2'
+ storage_relation().mountpoint = '/s'
+ self.assertDictEqual(
+ helpers.get_all_database_directories(),
+ dict(data_file_directories=['/s/cassandra/data'],
+ commitlog_directory='/s/cassandra/commitlog',
+ saved_caches_directory='/s/cassandra/saved_caches'))
+
+ @patch('helpers.get_cassandra_version')
+ @patch('relations.StorageRelation')
+ def test_get_all_database_directories_30(self, storage_relation, ver):
+ ver.return_value = '3.0'
+ storage_relation().mountpoint = '/s'
+ self.assertDictEqual(
+ helpers.get_all_database_directories(),
+ dict(data_file_directories=['/s/cassandra/data'],
+ commitlog_directory='/s/cassandra/commitlog',
+ saved_caches_directory='/s/cassandra/saved_caches',
+ hints_directory='/s/cassandra/hints'))
+
+ @patch('helpers.recursive_chown')
+ @patch('charmhelpers.core.host.mkdir')
+ @patch('helpers.get_database_directory')
+ @patch('helpers.is_cassandra_running')
+ def test_ensure_database_directory(self, is_running, get_db_dir, mkdir,
+ recursive_chown):
+ absdir = '/an/absolute/dir'
+ is_running.return_value = False
+ get_db_dir.return_value = absdir
+
+ # ensure_database_directory() returns the absolute path.
+ self.assertEqual(helpers.ensure_database_directory(absdir), absdir)
+
+ # The directory will have been made.
+ mkdir.assert_has_calls([
+ call('/an'),
+ call('/an/absolute'),
+ call('/an/absolute/dir',
+ owner='cassandra', group='cassandra', perms=0o750)])
+
+ # The ownership of the contents has not been reset. Rather than
+ # attempting to remount an existing database, which requires
+ # resetting permissions, it is better to use sstableloader to
+ # import the data into the cluster.
+ self.assertFalse(recursive_chown.called)
+
+ @patch('charmhelpers.core.host.write_file')
+ @patch('os.path.isdir')
+ @patch('subprocess.check_output')
+ def test_set_io_scheduler(self, check_output, isdir, write_file):
+ # Normal operation, the device is detected and the magic
+ # file written.
+ check_output.return_value = 'foo\n/dev/sdq 1 2 3 1% /foo\n'
+ isdir.return_value = True
+
+ helpers.set_io_scheduler('fnord', '/foo')
+
+ write_file.assert_called_once_with('/sys/block/sdq/queue/scheduler',
+ b'fnord', perms=0o644)
+
+ # Some OSErrors we log warnings for, and continue.
+ for e in (errno.EACCES, errno.ENOENT):
+ with self.subTest(errno=e):
+ write_file.side_effect = repeat(OSError(e, 'Whoops'))
+ hookenv.log.reset_mock()
+ helpers.set_io_scheduler('fnord', '/foo')
+ hookenv.log.assert_has_calls([call(ANY),
+ call(ANY, hookenv.WARNING)])
+
+ # Other OSErrors just fail hard.
+ write_file.side_effect = iter([OSError(errno.EFAULT, 'Whoops')])
+ self.assertRaises(OSError, helpers.set_io_scheduler, 'fnord', '/foo')
+
+ # If we are not under lxc, nothing happens at all except a log
+ # message.
+ helpers.is_lxc.return_value = True
+ hookenv.log.reset_mock()
+ write_file.reset_mock()
+ helpers.set_io_scheduler('fnord', '/foo')
+ self.assertFalse(write_file.called)
+ hookenv.log.assert_called_once_with(ANY) # A single INFO message.
+
+ @patch('shutil.chown')
+ def test_recursive_chown(self, chown):
+ with tempfile.TemporaryDirectory() as tmpdir:
+ os.makedirs(os.path.join(tmpdir, 'a', 'bb', 'ccc'))
+ with open(os.path.join(tmpdir, 'top file'), 'w') as f:
+ f.write('top file')
+ with open(os.path.join(tmpdir, 'a', 'bb', 'midfile'), 'w') as f:
+ f.write('midfile')
+ helpers.recursive_chown(tmpdir, 'un', 'gn')
+ chown.assert_has_calls(
+ [call(os.path.join(tmpdir, 'a'), 'un', 'gn'),
+ call(os.path.join(tmpdir, 'a', 'bb'), 'un', 'gn'),
+ call(os.path.join(tmpdir, 'a', 'bb', 'ccc'), 'un', 'gn'),
+ call(os.path.join(tmpdir, 'top file'), 'un', 'gn'),
+ call(os.path.join(tmpdir, 'a', 'bb', 'midfile'), 'un', 'gn')],
+ any_order=True)
+
+ def test_maybe_backup(self):
+ with tempfile.TemporaryDirectory() as tmpdir:
+ # Our file is backed up to a .orig
+ path = os.path.join(tmpdir, 'foo.conf')
+ host.write_file(path, b'hello', perms=0o644)
+ helpers.maybe_backup(path)
+ path_orig = path + '.orig'
+ self.assertTrue(os.path.exists(path_orig))
+ with open(path_orig, 'rb') as f:
+ self.assertEqual(f.read(), b'hello')
+ # Safe permissions
+ self.assertEqual(os.lstat(path_orig).st_mode & 0o777, 0o600)
+
+ # A second call, nothing happens as the .orig is already
+ # there.
+ host.write_file(path, b'second')
+ helpers.maybe_backup(path)
+ with open(path_orig, 'rb') as f:
+ self.assertEqual(f.read(), b'hello')
+
+ @patch('charmhelpers.fetch.apt_cache')
+ def test_get_package_version(self, apt_cache):
+ version = namedtuple('Version', 'ver_str')('1.0-foo')
+ package = namedtuple('Package', 'current_ver')(version)
+ apt_cache.return_value = dict(package=package)
+ ver = helpers.get_package_version('package')
+ self.assertEqual(ver, '1.0-foo')
+
+ @patch('charmhelpers.fetch.apt_cache')
+ def test_get_package_version_not_found(self, apt_cache):
+ version = namedtuple('Version', 'ver_str')('1.0-foo')
+ package = namedtuple('Package', 'current_ver')(version)
+ apt_cache.return_value = dict(package=package)
+ self.assertIsNone(helpers.get_package_version('notfound'))
+
+ @patch('charmhelpers.fetch.apt_cache')
+ def test_get_package_version_not_installed(self, apt_cache):
+ package = namedtuple('Package', 'current_ver')(None)
+ apt_cache.return_value = dict(package=package)
+ self.assertIsNone(helpers.get_package_version('package'))
+
+ def test_get_jre(self):
+ hookenv.config()['jre'] = 'opEnjdk' # Case insensitive
+ self.assertEqual(helpers.get_jre(), 'openjdk')
+
+ hookenv.config()['jre'] = 'oRacle' # Case insensitive
+ self.assertEqual(helpers.get_jre(), 'oracle')
+
+ def test_get_jre_unknown(self):
+ hookenv.config()['jre'] = 'OopsJDK'
+ self.assertEqual(helpers.get_jre(), 'openjdk')
+ # An error was logged.
+ hookenv.log.assert_called_once_with(ANY, hookenv.ERROR)
+
+ def test_get_jre_dse_override(self):
+ hookenv.config()['edition'] = 'dse'
+ self.assertEqual(helpers.get_jre(), 'oracle')
+
+ def test_get_cassandra_edition(self):
+ hookenv.config()['edition'] = 'community'
+ self.assertEqual(helpers.get_cassandra_edition(), 'community')
+
+ hookenv.config()['edition'] = 'DSE' # Case insensitive
+ self.assertEqual(helpers.get_cassandra_edition(), 'dse')
+
+ self.assertFalse(hookenv.log.called)
+
+ hookenv.config()['edition'] = 'typo' # Default to community
+ self.assertEqual(helpers.get_cassandra_edition(), 'community')
+ hookenv.log.assert_any_call(ANY, hookenv.ERROR) # Logs an error.
+
+ @patch('helpers.get_cassandra_edition')
+ def test_get_cassandra_service(self, get_edition):
+ get_edition.return_value = 'whatever'
+ self.assertEqual(helpers.get_cassandra_service(), 'cassandra')
+ get_edition.return_value = 'dse'
+ self.assertEqual(helpers.get_cassandra_service(), 'dse')
+
+ def test_get_cassandra_service_dse_override(self):
+ hookenv.config()['edition'] = 'dse'
+ self.assertEqual(helpers.get_cassandra_service(), 'dse')
+
+ @patch('helpers.get_package_version')
+ def test_get_cassandra_version(self, get_package_version):
+ # Return cassandra package version if it is installed.
+ get_package_version.return_value = '1.2.3-2~64'
+ self.assertEqual(helpers.get_cassandra_version(), '1.2.3-2~64')
+ get_package_version.assert_called_with('cassandra')
+
+ @patch('helpers.get_package_version')
+ def test_get_cassandra_version_uninstalled(self, get_package_version):
+ # Return none if the main cassandra package is not installed
+ get_package_version.return_value = None
+ self.assertEqual(helpers.get_cassandra_version(), None)
+ get_package_version.assert_called_with('cassandra')
+
+ @patch('helpers.get_package_version')
+ def test_get_cassandra_version_dse(self, get_package_version):
+ # Return the cassandra version equivalent if using dse.
+ hookenv.config()['edition'] = 'dse'
+ get_package_version.return_value = '4.7-beta2~88'
+ self.assertEqual(helpers.get_cassandra_version(), '2.1')
+ get_package_version.assert_called_with('dse-full')
+
+ @patch('helpers.get_package_version')
+ def test_get_cassandra_version_dse_uninstalled(self, get_package_version):
+ # Return the cassandra version equivalent if using dse.
+ hookenv.config()['edition'] = 'dse'
+ get_package_version.return_value = None
+ self.assertEqual(helpers.get_cassandra_version(), None)
+ get_package_version.assert_called_with('dse-full')
+
+ def test_get_cassandra_config_dir(self):
+ self.assertEqual(helpers.get_cassandra_config_dir(),
+ '/etc/cassandra')
+ hookenv.config()['edition'] = 'dse'
+ self.assertEqual(helpers.get_cassandra_config_dir(),
+ '/etc/dse/cassandra')
+
+ @patch('helpers.get_cassandra_config_dir')
+ def test_get_cassandra_yaml_file(self, get_cassandra_config_dir):
+ get_cassandra_config_dir.return_value = '/foo'
+ self.assertEqual(helpers.get_cassandra_yaml_file(),
+ '/foo/cassandra.yaml')
+
+ @patch('helpers.get_cassandra_config_dir')
+ def test_get_cassandra_env_file(self, get_cassandra_config_dir):
+ get_cassandra_config_dir.return_value = '/foo'
+ self.assertEqual(helpers.get_cassandra_env_file(),
+ '/foo/cassandra-env.sh')
+
+ @patch('helpers.get_cassandra_config_dir')
+ def test_get_cassandra_rackdc_file(self, get_cassandra_config_dir):
+ get_cassandra_config_dir.return_value = '/foo'
+ self.assertEqual(helpers.get_cassandra_rackdc_file(),
+ '/foo/cassandra-rackdc.properties')
+
+ @patch('helpers.get_cassandra_edition')
+ def test_get_cassandra_pid_file(self, get_edition):
+ get_edition.return_value = 'whatever'
+ self.assertEqual(helpers.get_cassandra_pid_file(),
+ '/var/run/cassandra/cassandra.pid')
+ get_edition.return_value = 'dse'
+ self.assertEqual(helpers.get_cassandra_pid_file(),
+ '/var/run/dse/dse.pid')
+
+ def test_get_cassandra_packages(self):
+ # Default
+ self.assertSetEqual(helpers.get_cassandra_packages(),
+ set(['cassandra', 'ntp', 'run-one',
+ 'netcat', 'openjdk-8-jre-headless']))
+
+ def test_get_cassandra_packages_oracle_jre(self):
+ # Oracle JRE
+ hookenv.config()['jre'] = 'oracle'
+ self.assertSetEqual(helpers.get_cassandra_packages(),
+ set(['cassandra', 'ntp', 'run-one', 'netcat']))
+
+ def test_get_cassandra_packages_dse(self):
+ # DataStax Enterprise, and implicit Oracle JRE.
+ hookenv.config()['edition'] = 'dsE' # Insensitive.
+ self.assertSetEqual(helpers.get_cassandra_packages(),
+ set(['dse-full', 'ntp', 'run-one', 'netcat']))
+
+ @patch('helpers.get_cassandra_service')
+ @patch('charmhelpers.core.host.service_stop')
+ @patch('helpers.is_cassandra_running')
+ def test_stop_cassandra(self, is_cassandra_running,
+ service_stop, get_service):
+ get_service.return_value = sentinel.service_name
+ is_cassandra_running.side_effect = iter([True, False])
+ helpers.stop_cassandra()
+ service_stop.assert_called_once_with(sentinel.service_name)
+
+ @patch('helpers.get_cassandra_service')
+ @patch('charmhelpers.core.host.service_stop')
+ @patch('helpers.is_cassandra_running')
+ def test_stop_cassandra_noop(self, is_cassandra_running,
+ service_stop, get_service):
+ get_service.return_value = sentinel.service_name
+ is_cassandra_running.return_value = False
+ helpers.stop_cassandra()
+ self.assertFalse(service_stop.called)
+
+ @patch('charmhelpers.core.hookenv.status_set')
+ @patch('helpers.get_cassandra_service')
+ @patch('charmhelpers.core.host.service_stop')
+ @patch('helpers.is_cassandra_running')
+ def test_stop_cassandra_failure(self, is_cassandra_running,
+ service_stop, get_service, status_set):
+ get_service.return_value = sentinel.service_name
+ is_cassandra_running.side_effect = iter([True, True])
+ self.assertRaises(SystemExit, helpers.stop_cassandra)
+ service_stop.assert_called_once_with(sentinel.service_name)
+ status_set.assert_called_once_with('blocked',
+ 'Cassandra failed to shut down')
+
+ @patch('helpers.actual_seed_ips')
+ @patch('time.sleep')
+ @patch('helpers.get_cassandra_service')
+ @patch('charmhelpers.core.host.service_start')
+ @patch('helpers.is_cassandra_running')
+ def test_start_cassandra(self, is_cassandra_running,
+ service_start, get_service, sleep, seed_ips):
+ get_service.return_value = sentinel.service_name
+ seed_ips.return_value = set(['1.2.3.4'])
+ is_cassandra_running.return_value = True
+ helpers.start_cassandra()
+ self.assertFalse(service_start.called)
+
+ is_cassandra_running.side_effect = iter([False, False, False, True])
+ helpers.start_cassandra()
+ service_start.assert_called_once_with(sentinel.service_name)
+
+ # A side effect of starting cassandra is storing the current live
+ # seed list, so we can tell when it has changed.
+ self.assertEqual(hookenv.config()['configured_seeds'], ['1.2.3.4'])
+
+ @patch('os.chmod')
+ @patch('helpers.is_cassandra_running')
+ @patch('relations.StorageRelation')
+ def test_remount_cassandra(self, storage, is_running, chmod):
+ config = hookenv.config()
+ storage().needs_remount.return_value = True
+ storage().mountpoint = '/srv/foo'
+ is_running.return_value = False
+ config['data_file_directories'] = '/srv/ext/data1 data2'
+ config['bootstrapped_into_cluster'] = True
+
+ helpers.remount_cassandra()
+ storage().migrate.assert_called_once_with('/var/lib/cassandra',
+ 'cassandra')
+ chmod.assert_called_once_with('/srv/foo/cassandra', 0o750)
+ self.assertEqual(config['bootstrapped_into_cluster'], False)
+
+ @patch('os.chmod')
+ @patch('helpers.is_cassandra_running')
+ @patch('relations.StorageRelation')
+ def test_remount_cassandra_noop(self, storage, is_running, chmod):
+ storage().needs_remount.return_value = False
+ storage().mountpoint = None
+ is_running.return_value = False
+
+ helpers.remount_cassandra()
+ self.assertFalse(storage().migrate.called)
+ self.assertFalse(chmod.called)
+
+ @patch('helpers.is_cassandra_running')
+ @patch('relations.StorageRelation')
+ def test_remount_cassandra_unmount(self, storage, is_running):
+ storage().needs_remount.return_value = True
+ storage().mountpoint = None # Reverting to local disk.
+ is_running.return_value = False
+ hookenv.config()['data_file_directories'] = '/srv/ext/data1 data2'
+
+ helpers.remount_cassandra()
+
+ # We cannot migrate data back to local disk, as by the time our
+ # hooks are called the data is gone.
+ self.assertFalse(storage().migrate.called)
+
+ # We warn in this case, as reverting to local disk may resurrect
+ # old data (if the cluster was ever time while using local
+ # disk).
+ hookenv.log.assert_any_call(ANY, hookenv.WARNING)
+
+ @patch('helpers.ensure_database_directory')
+ @patch('helpers.get_all_database_directories')
+ def test_ensure_database_directories(self, get_all_dirs, ensure_dir):
+ get_all_dirs.return_value = dict(
+ data_file_directories=[sentinel.data_file_dir_1,
+ sentinel.data_file_dir_2],
+ commitlog_directory=sentinel.commitlog_dir,
+ saved_caches_directory=sentinel.saved_caches_dir)
+ helpers.ensure_database_directories()
+ ensure_dir.assert_has_calls([
+ call(sentinel.data_file_dir_1),
+ call(sentinel.data_file_dir_2),
+ call(sentinel.commitlog_dir),
+ call(sentinel.saved_caches_dir)], any_order=True)
+
+ @patch('cassandra.cluster.Cluster')
+ @patch('cassandra.auth.PlainTextAuthProvider')
+ @patch('helpers.superuser_credentials')
+ @patch('helpers.read_cassandra_yaml')
+ def test_connect(self, yaml, creds, auth_provider, cluster):
+ # host and port are pulled from the current active
+ # cassandra.yaml file, rather than configuration, as
+ # configuration may not match reality (if for no other reason
+ # that we are running this code in order to make reality match
+ # the desired configuration).
+ yaml.return_value = dict(rpc_address='1.2.3.4',
+ native_transport_port=666)
+
+ creds.return_value = ('un', 'pw')
+ auth_provider.return_value = sentinel.ap
+
+ cluster().connect.return_value = sentinel.session
+ cluster.reset_mock()
+
+ with helpers.connect() as session:
+ auth_provider.assert_called_once_with(username='un',
+ password='pw')
+ cluster.assert_called_once_with(['1.2.3.4'], port=666,
+ auth_provider=sentinel.ap)
+ self.assertIs(session, sentinel.session)
+ self.assertFalse(cluster().shutdown.called)
+
+ cluster().shutdown.assert_called_once_with()
+
+ @patch('cassandra.cluster.Cluster')
+ @patch('cassandra.auth.PlainTextAuthProvider')
+ @patch('helpers.superuser_credentials')
+ @patch('helpers.read_cassandra_yaml')
+ def test_connect_with_creds(self, yaml, creds, auth_provider, cluster):
+ # host and port are pulled from the current active
+ # cassandra.yaml file, rather than configuration, as
+ # configuration may not match reality (if for no other reason
+ # that we are running this code in order to make reality match
+ # the desired configuration).
+ yaml.return_value = dict(rpc_address='1.2.3.4',
+ native_transport_port=666)
+
+ auth_provider.return_value = sentinel.ap
+
+ with helpers.connect(username='explicit', password='boo'):
+ auth_provider.assert_called_once_with(username='explicit',
+ password='boo')
+
+ @patch('time.sleep')
+ @patch('time.time')
+ @patch('cassandra.cluster.Cluster')
+ @patch('helpers.superuser_credentials')
+ @patch('helpers.read_cassandra_yaml')
+ def test_connect_badauth(self, yaml, creds, cluster, time, sleep):
+ # host and port are pulled from the current active
+ # cassandra.yaml file, rather than configuration, as
+ # configuration may not match reality (if for no other reason
+ # that we are running this code in order to make reality match
+ # the desired configuration).
+ yaml.return_value = dict(rpc_address='1.2.3.4',
+ native_transport_port=666)
+ time.side_effect = [0, 7, 99999]
+
+ creds.return_value = ('un', 'pw')
+
+ x = NoHostAvailable('whoops', {'1.2.3.4': AuthenticationFailed()})
+ cluster().connect.side_effect = x
+
+ self.assertRaises(AuthenticationFailed, helpers.connect().__enter__)
+
+ # Authentication failures are retried, but for a shorter time
+ # than other connection errors which are retried for a few
+ # minutes.
+ self.assertEqual(cluster().connect.call_count, 2)
+ self.assertEqual(cluster().shutdown.call_count, 2)
+
+ @patch('time.sleep')
+ @patch('time.time')
+ @patch('cassandra.cluster.Cluster')
+ @patch('helpers.superuser_credentials')
+ @patch('helpers.read_cassandra_yaml')
+ def test_connect_timeout(self, yaml, creds, cluster, time, sleep):
+ yaml.return_value = dict(rpc_address='1.2.3.4',
+ native_transport_port=666)
+ time.side_effect = [0, 1, 2, 3, 10, 20, 30, 40, 99999]
+
+ creds.return_value = ('un', 'pw')
+
+ x = NoHostAvailable('whoops', {'1.2.3.4': sentinel.exception})
+ cluster().connect.side_effect = x
+
+ self.assertRaises(NoHostAvailable, helpers.connect().__enter__)
+
+ # Authentication failures fail immediately, unlike other
+ # connection errors which are retried.
+ self.assertEqual(cluster().connect.call_count, 5)
+ self.assertEqual(cluster().shutdown.call_count, 5)
+ self.assertEqual(sleep.call_count, 4)
+
+ @patch('cassandra.query.SimpleStatement')
+ def test_query(self, simple_statement):
+ simple_statement.return_value = sentinel.s_statement
+ session = MagicMock()
+ session.execute.return_value = sentinel.results
+ self.assertEqual(helpers.query(session, sentinel.statement,
+ sentinel.consistency, sentinel.args),
+ sentinel.results)
+ simple_statement.assert_called_once_with(
+ sentinel.statement, consistency_level=sentinel.consistency)
+ session.execute.assert_called_once_with(simple_statement(''),
+ sentinel.args)
+
+ @patch('cassandra.query.SimpleStatement')
+ @patch('helpers.backoff')
+ def test_query_retry(self, backoff, simple_statement):
+ backoff.return_value = repeat(True)
+ simple_statement.return_value = sentinel.s_statement
+ session = MagicMock()
+ session.execute.side_effect = iter([RuntimeError(), sentinel.results])
+ self.assertEqual(helpers.query(session, sentinel.statement,
+ sentinel.consistency, sentinel.args),
+ sentinel.results)
+ self.assertEqual(session.execute.call_count, 2)
+
+ @patch('time.time')
+ @patch('cassandra.query.SimpleStatement')
+ @patch('helpers.backoff')
+ def test_query_timeout(self, backoff, simple_statement, time):
+ backoff.return_value = repeat(True)
+ # Timeout is 600
+ time.side_effect = iter([0, 1, 2, 3, 500, 700, RuntimeError()])
+ simple_statement.return_value = sentinel.s_statement
+ session = MagicMock()
+
+ class Whoops(Exception):
+ pass
+
+ session.execute.side_effect = repeat(Whoops('Fail'))
+ self.assertRaises(Whoops, helpers.query, session, sentinel.statement,
+ sentinel.consistency, sentinel.args)
+ self.assertEqual(session.execute.call_count, 4)
+
+ @patch('helpers.get_cassandra_version')
+ @patch('helpers.query')
+ def test_ensure_user(self, query, ver):
+ ver.return_value = '2.1'
+ helpers.ensure_user(sentinel.session,
+ sentinel.username, sentinel.pwhash,
+ superuser=sentinel.supflag)
+ query.assert_has_calls([
+ call(sentinel.session,
+ 'INSERT INTO system_auth.users (name, super) VALUES (%s, %s)',
+ ConsistencyLevel.ALL, (sentinel.username, sentinel.supflag)),
+ call(sentinel.session,
+ 'INSERT INTO system_auth.credentials (username, salted_hash) '
+ 'VALUES (%s, %s)',
+ ConsistencyLevel.ALL,
+ (sentinel.username, sentinel.pwhash))])
+
+ @patch('helpers.get_cassandra_version')
+ @patch('helpers.query')
+ def test_ensure_user_22(self, query, ver):
+ ver.return_value = '2.2'
+ helpers.ensure_user(sentinel.session,
+ sentinel.username, sentinel.pwhash,
+ superuser=sentinel.supflag)
+ query.assert_called_once_with(sentinel.session,
+ 'INSERT INTO system_auth.roles (role, '
+ 'can_login, is_superuser, salted_hash) '
+ 'VALUES (%s, TRUE, %s, %s)',
+ ConsistencyLevel.ALL,
+ (sentinel.username, sentinel.supflag,
+ sentinel.pwhash))
+
+ @patch('helpers.ensure_user')
+ @patch('helpers.encrypt_password')
+ @patch('helpers.nodetool')
+ @patch('helpers.reconfigure_and_restart_cassandra')
+ @patch('helpers.connect')
+ @patch('helpers.superuser_credentials')
+ def test_create_unit_superuser_hard(self, creds, connect, restart,
+ nodetool, encrypt_password,
+ ensure_user):
+ creds.return_value = (sentinel.username, sentinel.password)
+ connect().__enter__.return_value = sentinel.session
+ connect().__exit__.return_value = False
+ connect.reset_mock()
+
+ encrypt_password.return_value = sentinel.pwhash
+
+ helpers.create_unit_superuser_hard()
+
+ # Cassandra was restarted twice, first with authentication
+ # disabled and again with the normal configuration.
+ restart.assert_has_calls([
+ call(dict(authenticator='AllowAllAuthenticator',
+ rpc_address='localhost')),
+ call()])
+
+ # A connection was made as the superuser, which words because
+ # authentication has been disabled on this node.
+ connect.assert_called_once_with()
+
+ # The user was created.
+ encrypt_password.assert_called_once_with(sentinel.password)
+ ensure_user.assert_called_once_with(sentinel.session,
+ sentinel.username,
+ sentinel.pwhash,
+ superuser=True)
+
+ # Local Cassandra was flushed. This is probably unnecessary.
+ nodetool.assert_called_once_with('flush')
+
+ def test_cqlshrc_path(self):
+ self.assertEqual(helpers.get_cqlshrc_path(),
+ '/root/.cassandra/cqlshrc')
+
+ def test_superuser_username(self):
+ self.assertEqual(hookenv.local_unit(), 'service/1')
+ self.assertEqual(helpers.superuser_username(), 'juju_service_1')
+
+ @patch('helpers.superuser_username')
+ @patch('helpers.get_cqlshrc_path')
+ @patch('helpers.get_cassandra_version')
+ @patch('charmhelpers.core.host.pwgen')
+ def test_superuser_credentials_20(self, pwgen, get_cassandra_version,
+ get_cqlshrc_path, get_username):
+ get_cassandra_version.return_value = '2.0'
+ with tempfile.TemporaryDirectory() as dotcassandra_dir:
+ cqlshrc_path = os.path.join(dotcassandra_dir, 'cqlshrc')
+ get_cqlshrc_path.return_value = cqlshrc_path
+ get_username.return_value = 'foo'
+ pwgen.return_value = 'secret'
+ hookenv.config()['rpc_port'] = 666
+ hookenv.config()['native_transport_port'] = 777
+
+ # First time generates username & password.
+ username, password = helpers.superuser_credentials()
+ self.assertEqual(username, 'foo')
+ self.assertEqual(password, 'secret')
+
+ # Credentials are stored in the cqlshrc file.
+ expected_cqlshrc = dedent('''\
+ [authentication]
+ username = foo
+ password = secret
+
+ [connection]
+ hostname = 10.30.0.1
+ port = 666
+ ''').strip()
+ with open(cqlshrc_path, 'r') as f:
+ self.assertEqual(f.read().strip(), expected_cqlshrc)
+
+ # If the credentials have been stored, they are not
+ # regenerated.
+ pwgen.return_value = 'secret2'
+ username, password = helpers.superuser_credentials()
+ self.assertEqual(username, 'foo')
+ self.assertEqual(password, 'secret')
+ with open(cqlshrc_path, 'r') as f:
+ self.assertEqual(f.read().strip(), expected_cqlshrc)
+
+ @patch('helpers.superuser_username')
+ @patch('helpers.get_cqlshrc_path')
+ @patch('helpers.get_cassandra_version')
+ @patch('charmhelpers.core.host.pwgen')
+ def test_superuser_credentials(self, pwgen, get_cassandra_version,
+ get_cqlshrc_path, get_username):
+ # Cassandra 2.1 or higher uses native protocol in its cqlshrc
+ get_cassandra_version.return_value = '2.1'
+ with tempfile.TemporaryDirectory() as dotcassandra_dir:
+ cqlshrc_path = os.path.join(dotcassandra_dir, 'cqlshrc')
+ get_cqlshrc_path.return_value = cqlshrc_path
+ get_username.return_value = 'foo'
+ pwgen.return_value = 'secret'
+ hookenv.config()['rpc_port'] = 666
+ hookenv.config()['native_transport_port'] = 777
+
+ # First time generates username & password.
+ username, password = helpers.superuser_credentials()
+ self.assertEqual(username, 'foo')
+ self.assertEqual(password, 'secret')
+
+ # Credentials are stored in the cqlshrc file.
+ expected_cqlshrc = dedent('''\
+ [authentication]
+ username = foo
+ password = secret
+
+ [connection]
+ hostname = 10.30.0.1
+ port = 777
+ ''').strip()
+ with open(cqlshrc_path, 'r') as f:
+ self.assertEqual(f.read().strip(), expected_cqlshrc)
+
+ @patch('subprocess.check_output')
+ def test_nodetool(self, check_output):
+ check_output.return_value = 'OK'
+ self.assertEqual(helpers.nodetool('status', 'system_auth'), 'OK')
+
+ # The expected command was run against the local node.
+ check_output.assert_called_once_with(
+ ['nodetool', 'status', 'system_auth'],
+ universal_newlines=True, stderr=subprocess.STDOUT, timeout=119)
+
+ # The output was emitted.
+ helpers.emit.assert_called_once_with('OK')
+
+ @patch('helpers.is_cassandra_running')
+ @patch('helpers.backoff')
+ @patch('subprocess.check_output')
+ def test_nodetool_CASSANDRA_8776(self, check_output, backoff, is_running):
+ is_running.return_value = True
+ backoff.return_value = repeat(True)
+ check_output.side_effect = iter(['ONE Error: stuff', 'TWO OK'])
+ self.assertEqual(helpers.nodetool('status'), 'TWO OK')
+
+ # The output was emitted.
+ helpers.emit.assert_called_once_with('TWO OK')
+
+ @patch('helpers.is_cassandra_running')
+ @patch('helpers.backoff')
+ @patch('subprocess.check_output')
+ def test_nodetool_retry(self, check_output, backoff, is_running):
+ backoff.return_value = repeat(True)
+ is_running.return_value = True
+ check_output.side_effect = iter([
+ subprocess.CalledProcessError([], 1, 'fail 1'),
+ subprocess.CalledProcessError([], 1, 'fail 2'),
+ subprocess.CalledProcessError([], 1, 'fail 3'),
+ subprocess.CalledProcessError([], 1, 'fail 4'),
+ subprocess.CalledProcessError([], 1, 'fail 5'),
+ 'OK'])
+ self.assertEqual(helpers.nodetool('status'), 'OK')
+
+ # Later fails and final output was emitted.
+ helpers.emit.assert_has_calls([call('fail 5'), call('OK')])
+
+ @patch('helpers.get_bootstrapped_ips')
+ def test_num_nodes(self, bootstrapped_ips):
+ bootstrapped_ips.return_value = ['10.0.0.1', '10.0.0.2']
+ self.assertEqual(helpers.num_nodes(), 2)
+
+ @patch('helpers.get_cassandra_yaml_file')
+ def test_read_cassandra_yaml(self, get_cassandra_yaml_file):
+ with tempfile.NamedTemporaryFile('w') as f:
+ f.write('a: one')
+ f.flush()
+ get_cassandra_yaml_file.return_value = f.name
+ self.assertDictEqual(helpers.read_cassandra_yaml(),
+ dict(a='one'))
+
+ @patch('helpers.get_cassandra_yaml_file')
+ def test_write_cassandra_yaml(self, get_cassandra_yaml_file):
+ with tempfile.NamedTemporaryFile() as f:
+ get_cassandra_yaml_file.return_value = f.name
+ helpers.write_cassandra_yaml([1, 2, 3])
+ with open(f.name, 'r') as f2:
+ self.assertEqual(f2.read(), '[1, 2, 3]\n')
+
+ @patch('helpers.get_cassandra_version')
+ @patch('helpers.get_cassandra_yaml_file')
+ @patch('helpers.get_seed_ips')
+ @patch('charmhelpers.core.host.write_file')
+ def test_configure_cassandra_yaml_20(self, write_file, seed_ips, yaml_file,
+ get_cassandra_version):
+ get_cassandra_version.return_value = '2.0'
+ hookenv.config().update(dict(num_tokens=128,
+ cluster_name='test_cluster_name',
+ partitioner='test_partitioner'))
+
+ seed_ips.return_value = ['10.20.0.1', '10.20.0.2', '10.20.0.3']
+
+ existing_config = '''
+ seed_provider:
+ - class_name: blah.SimpleSeedProvider
+ parameters:
+ - seeds: 127.0.0.1 # Comma separated list.
+ '''
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ yaml_config = os.path.join(tmpdir, 'c.yaml')
+ yaml_file.return_value = yaml_config
+ with open(yaml_config, 'w', encoding='UTF-8') as f:
+ f.write(existing_config)
+
+ helpers.configure_cassandra_yaml()
+
+ self.assertEqual(write_file.call_count, 2)
+ new_config = write_file.call_args[0][1]
+
+ expected_config = dedent('''\
+ cluster_name: test_cluster_name
+ authenticator: PasswordAuthenticator
+ num_tokens: 128
+ partitioner: test_partitioner
+ listen_address: 10.20.0.1
+ rpc_address: 0.0.0.0
+ rpc_port: 9160
+ native_transport_port: 9042
+ storage_port: 7000
+ ssl_storage_port: 7001
+ authorizer: AllowAllAuthorizer
+ seed_provider:
+ - class_name: blah.SimpleSeedProvider
+ parameters:
+ # No whitespace in seeds is important.
+ - seeds: '10.20.0.1,10.20.0.2,10.20.0.3'
+ endpoint_snitch: GossipingPropertyFileSnitch
+ data_file_directories:
+ - /var/lib/cassandra/data
+ commitlog_directory: /var/lib/cassandra/commitlog
+ saved_caches_directory: /var/lib/cassandra/saved_caches
+ compaction_throughput_mb_per_sec: 16
+ stream_throughput_outbound_megabits_per_sec: 200
+ tombstone_warn_threshold: 1000
+ tombstone_failure_threshold: 100000
+ start_rpc: true
+ ''')
+ self.maxDiff = None
+ self.assertEqual(yaml.safe_load(new_config),
+ yaml.safe_load(expected_config))
+
+ # Confirm we can use an explicit cluster_name too.
+ write_file.reset_mock()
+ hookenv.config()['cluster_name'] = 'fubar'
+ helpers.configure_cassandra_yaml()
+ new_config = write_file.call_args[0][1]
+ self.assertEqual(yaml.safe_load(new_config)['cluster_name'],
+ 'fubar')
+
+ @patch('helpers.get_cassandra_version')
+ @patch('helpers.get_cassandra_yaml_file')
+ @patch('helpers.get_seed_ips')
+ @patch('charmhelpers.core.host.write_file')
+ def test_configure_cassandra_yaml_22(self, write_file, seed_ips, yaml_file,
+ get_cassandra_version):
+ get_cassandra_version.return_value = '2.0'
+ hookenv.config().update(dict(num_tokens=128,
+ cluster_name='test_cluster_name',
+ partitioner='test_partitioner'))
+
+ seed_ips.return_value = ['10.20.0.1', '10.20.0.2', '10.20.0.3']
+
+ existing_config = '''
+ seed_provider:
+ - class_name: blah.SimpleSeedProvider
+ parameters:
+ - seeds: 127.0.0.1 # Comma separated list.
+ start_rpc: false # Defaults to False starting 2.2
+ '''
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ yaml_config = os.path.join(tmpdir, 'c.yaml')
+ yaml_file.return_value = yaml_config
+ with open(yaml_config, 'w', encoding='UTF-8') as f:
+ f.write(existing_config)
+
+ helpers.configure_cassandra_yaml()
+
+ self.assertEqual(write_file.call_count, 2)
+ new_config = write_file.call_args[0][1]
+
+ expected_config = dedent('''\
+ start_rpc: true
+ cluster_name: test_cluster_name
+ authenticator: PasswordAuthenticator
+ num_tokens: 128
+ partitioner: test_partitioner
+ listen_address: 10.20.0.1
+ rpc_address: 0.0.0.0
+ rpc_port: 9160
+ native_transport_port: 9042
+ storage_port: 7000
+ ssl_storage_port: 7001
+ authorizer: AllowAllAuthorizer
+ seed_provider:
+ - class_name: blah.SimpleSeedProvider
+ parameters:
+ # No whitespace in seeds is important.
+ - seeds: '10.20.0.1,10.20.0.2,10.20.0.3'
+ endpoint_snitch: GossipingPropertyFileSnitch
+ data_file_directories:
+ - /var/lib/cassandra/data
+ commitlog_directory: /var/lib/cassandra/commitlog
+ saved_caches_directory: /var/lib/cassandra/saved_caches
+ compaction_throughput_mb_per_sec: 16
+ stream_throughput_outbound_megabits_per_sec: 200
+ tombstone_warn_threshold: 1000
+ tombstone_failure_threshold: 100000
+ ''')
+ self.maxDiff = None
+ self.assertEqual(yaml.safe_load(new_config),
+ yaml.safe_load(expected_config))
+
+ # Confirm we can use an explicit cluster_name too.
+ write_file.reset_mock()
+ hookenv.config()['cluster_name'] = 'fubar'
+ helpers.configure_cassandra_yaml()
+ new_config = write_file.call_args[0][1]
+ self.assertEqual(yaml.safe_load(new_config)['cluster_name'],
+ 'fubar')
+
+ @patch('helpers.get_cassandra_version')
+ @patch('helpers.get_cassandra_yaml_file')
+ @patch('helpers.get_seed_ips')
+ @patch('charmhelpers.core.host.write_file')
+ def test_configure_cassandra_yaml(self, write_file, seed_ips,
+ yaml_file, get_cassandra_version):
+ get_cassandra_version.return_value = '2.1'
+ hookenv.config().update(dict(num_tokens=128,
+ cluster_name='test_cluster_name',
+ partitioner='test_partitioner'))
+
+ seed_ips.return_value = ['10.20.0.1', '10.20.0.2', '10.20.0.3']
+
+ existing_config = '''
+ seed_provider:
+ - class_name: blah.SimpleSeedProvider
+ parameters:
+ - seeds: 127.0.0.1 # Comma separated list.
+ '''
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ yaml_config = os.path.join(tmpdir, 'c.yaml')
+ yaml_file.return_value = yaml_config
+ with open(yaml_config, 'w', encoding='UTF-8') as f:
+ f.write(existing_config)
+
+ helpers.configure_cassandra_yaml()
+
+ self.assertEqual(write_file.call_count, 2)
+ new_config = write_file.call_args[0][1]
+
+ expected_config = dedent('''\
+ cluster_name: test_cluster_name
+ authenticator: PasswordAuthenticator
+ num_tokens: 128
+ partitioner: test_partitioner
+ listen_address: 10.20.0.1
+ rpc_address: 0.0.0.0
+ broadcast_rpc_address: 10.30.0.1
+ start_rpc: true
+ rpc_port: 9160
+ native_transport_port: 9042
+ storage_port: 7000
+ ssl_storage_port: 7001
+ authorizer: AllowAllAuthorizer
+ seed_provider:
+ - class_name: blah.SimpleSeedProvider
+ parameters:
+ # No whitespace in seeds is important.
+ - seeds: '10.20.0.1,10.20.0.2,10.20.0.3'
+ endpoint_snitch: GossipingPropertyFileSnitch
+ data_file_directories:
+ - /var/lib/cassandra/data
+ commitlog_directory: /var/lib/cassandra/commitlog
+ saved_caches_directory: /var/lib/cassandra/saved_caches
+ compaction_throughput_mb_per_sec: 16
+ stream_throughput_outbound_megabits_per_sec: 200
+ tombstone_warn_threshold: 1000
+ tombstone_failure_threshold: 100000
+ ''')
+ self.maxDiff = None
+ self.assertEqual(yaml.safe_load(new_config),
+ yaml.safe_load(expected_config))
+
+ @patch('helpers.get_cassandra_version')
+ @patch('helpers.get_cassandra_yaml_file')
+ @patch('helpers.get_seed_ips')
+ @patch('charmhelpers.core.host.write_file')
+ def test_configure_cassandra_yaml_overrides(self, write_file, seed_ips,
+ yaml_file, version):
+ version.return_value = '2.1'
+ hookenv.config().update(dict(num_tokens=128,
+ cluster_name=None,
+ partitioner='my_partitioner'))
+
+ seed_ips.return_value = ['10.20.0.1', '10.20.0.2', '10.20.0.3']
+
+ existing_config = dedent('''\
+ seed_provider:
+ - class_name: blah.blah.SimpleSeedProvider
+ parameters:
+ - seeds: 127.0.0.1 # Comma separated list.
+ ''')
+ overrides = dict(partitioner='overridden_partitioner')
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ yaml_config = os.path.join(tmpdir, 'c.yaml')
+ yaml_file.return_value = yaml_config
+ with open(yaml_config, 'w', encoding='UTF-8') as f:
+ f.write(existing_config)
+
+ helpers.configure_cassandra_yaml(overrides=overrides)
+
+ self.assertEqual(write_file.call_count, 2)
+ new_config = write_file.call_args[0][1]
+
+ self.assertEqual(yaml.safe_load(new_config)['partitioner'],
+ 'overridden_partitioner')
+
+ def test_get_pid_from_file(self):
+ with tempfile.NamedTemporaryFile('w') as pid_file:
+ pid_file.write(' 42\t')
+ pid_file.flush()
+ self.assertEqual(helpers.get_pid_from_file(pid_file.name), 42)
+ pid_file.write('\nSome Noise')
+ pid_file.flush()
+ self.assertEqual(helpers.get_pid_from_file(pid_file.name), 42)
+
+ for invalid_pid in ['-1', '0', 'fred']:
+ with self.subTest(invalid_pid=invalid_pid):
+ with tempfile.NamedTemporaryFile('w') as pid_file:
+ pid_file.write(invalid_pid)
+ pid_file.flush()
+ self.assertRaises(ValueError,
+ helpers.get_pid_from_file, pid_file.name)
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ self.assertRaises(OSError, helpers.get_pid_from_file,
+ os.path.join(tmpdir, 'invalid.pid'))
+
+ @patch('helpers.get_cassandra_pid_file')
+ def test_is_cassandra_running_not_running(self, get_pid_file):
+ # When Cassandra is not running, the pidfile does not exist.
+ get_pid_file.return_value = 'does not exist'
+ self.assertFalse(helpers.is_cassandra_running())
+
+ @patch('os.path.exists')
+ @patch('helpers.get_pid_from_file')
+ def test_is_cassandra_running_invalid_pid(self, get_pid_from_file, exists):
+ # get_pid_from_file raises a ValueError if the pid is illegal.
+ get_pid_from_file.side_effect = repeat(ValueError('Whoops'))
+ exists.return_value = True # The pid file is there, just insane.
+
+ # is_cassandra_running() fails hard in this case, since we
+ # cannot safely continue when the system is insane.
+ self.assertRaises(ValueError, helpers.is_cassandra_running)
+
+ @patch('os.kill')
+ @patch('os.path.exists')
+ @patch('helpers.get_pid_from_file')
+ def test_is_cassandra_running_missing_process(self, get_pid_from_file,
+ exists, kill):
+ # get_pid_from_file raises a ValueError if the pid is illegal.
+ get_pid_from_file.return_value = sentinel.pid_file
+ exists.return_value = True # The pid file is there
+ kill.side_effect = repeat(ProcessLookupError()) # The process isn't
+ self.assertFalse(helpers.is_cassandra_running())
+
+ @patch('os.kill')
+ @patch('os.path.exists')
+ @patch('helpers.get_pid_from_file')
+ def test_is_cassandra_running_wrong_user(self, get_pid_from_file,
+ exists, kill):
+ # get_pid_from_file raises a ValueError if the pid is illegal.
+ get_pid_from_file.return_value = sentinel.pid_file
+ exists.return_value = True # The pid file is there
+ kill.side_effect = repeat(PermissionError()) # But the process isn't
+ self.assertRaises(PermissionError, helpers.is_cassandra_running)
+
+ @patch('time.sleep')
+ @patch('os.kill')
+ @patch('helpers.get_pid_from_file')
+ @patch('subprocess.call')
+ def test_is_cassandra_running_starting_up(self, call, get_pid_from_file,
+ kill, sleep):
+ sleep.return_value = None # Don't actually sleep in unittests.
+ os.kill.return_value = True # There is a running pid.
+ get_pid_from_file.return_value = 42
+ subprocess.call.side_effect = iter([3, 2, 1, 0]) # 4th time the charm
+ self.assertTrue(helpers.is_cassandra_running())
+
+ @patch('helpers.backoff')
+ @patch('os.kill')
+ @patch('subprocess.call')
+ @patch('helpers.get_pid_from_file')
+ def test_is_cassandra_running_shutting_down(self, get_pid_from_file,
+ call, kill, backoff):
+ # If Cassandra is in the process of shutting down, it might take
+ # several failed checks before the pid file disappears.
+ backoff.return_value = repeat(True)
+ os.kill.return_value = None # The process is running
+ call.return_value = 1 # But nodetool is not succeeding.
+
+ # Fourth time, the pid file is gone.
+ get_pid_from_file.side_effect = iter([42, 42, 42,
+ FileNotFoundError('Whoops')])
+ self.assertFalse(helpers.is_cassandra_running())
+
+ @patch('os.kill')
+ @patch('subprocess.call')
+ @patch('os.path.exists')
+ @patch('helpers.get_pid_from_file')
+ def test_is_cassandra_running_failsafe(self, get_pid_from_file,
+ exists, subprocess_call, kill):
+ get_pid_from_file.return_value = sentinel.pid_file
+ exists.return_value = True # The pid file is there
+ subprocess_call.side_effect = repeat(RuntimeError('whoops'))
+ # Weird errors are reraised.
+ self.assertRaises(RuntimeError, helpers.is_cassandra_running)
+
+ @patch('helpers.get_cassandra_version')
+ @patch('helpers.query')
+ def test_get_auth_keyspace_replication(self, query, ver):
+ ver.return_value = '2.2'
+ query.return_value = [('{"json": true}',)]
+ settings = helpers.get_auth_keyspace_replication(sentinel.session)
+ self.assertDictEqual(settings, dict(json=True))
+ query.assert_called_once_with(
+ sentinel.session, dedent('''\
+ SELECT strategy_options FROM system.schema_keyspaces
+ WHERE keyspace_name='system_auth'
+ '''), ConsistencyLevel.QUORUM)
+
+ @patch('helpers.get_cassandra_version')
+ @patch('helpers.query')
+ def test_get_auth_keyspace_replication_30(self, query, ver):
+ ver.return_value = '3.0'
+ query.return_value = [({"json": True},)] # Decoded under 3.0
+ settings = helpers.get_auth_keyspace_replication(sentinel.session)
+ self.assertDictEqual(settings, dict(json=True))
+ query.assert_called_once_with(
+ sentinel.session, dedent('''\
+ SELECT replication FROM system_schema.keyspaces
+ WHERE keyspace_name='system_auth'
+ '''), ConsistencyLevel.QUORUM)
+
+ @patch('helpers.status_set')
+ @patch('charmhelpers.core.hookenv.status_get')
+ @patch('helpers.query')
+ def test_set_auth_keyspace_replication(self, query,
+ status_get, status_set):
+ status_get.return_value = ('active', '')
+ settings = dict(json=True)
+ helpers.set_auth_keyspace_replication(sentinel.session, settings)
+ query.assert_called_once_with(sentinel.session,
+ 'ALTER KEYSPACE system_auth '
+ 'WITH REPLICATION = %s',
+ ConsistencyLevel.ALL, (settings,))
+
+ @patch('helpers.status_set')
+ @patch('charmhelpers.core.hookenv.status_get')
+ @patch('helpers.nodetool')
+ def test_repair_auth_keyspace(self, nodetool, status_get, status_set):
+ status_get.return_value = (sentinel.status, '')
+ helpers.repair_auth_keyspace()
+ status_set.assert_called_once_with(sentinel.status,
+ 'Repairing system_auth keyspace')
+ # The repair operation may still fail, and I am currently regularly
+ # seeing 'snapshot creation' errors. Repair also takes ages with
+ # Cassandra 2.0. So retry until success, up to 1 hour.
+ nodetool.assert_called_once_with('repair', 'system_auth', timeout=3600)
+
+ def test_is_bootstrapped(self):
+ self.assertFalse(helpers.is_bootstrapped())
+ helpers.set_bootstrapped()
+ self.assertTrue(helpers.is_bootstrapped())
+
+ @patch('helpers.get_node_status')
+ def test_is_decommissioned(self, get_node_status):
+ get_node_status.return_value = 'DECOMMISSIONED'
+ self.assertTrue(helpers.is_decommissioned())
+ get_node_status.return_value = 'LEAVING'
+ self.assertTrue(helpers.is_decommissioned())
+ get_node_status.return_value = 'NORMAL'
+ self.assertFalse(helpers.is_decommissioned())
+
+ @patch('helpers.nodetool')
+ def test_emit_describe_cluster(self, nodetool):
+ helpers.emit_describe_cluster()
+ nodetool.assert_called_once_with('describecluster')
+
+ @patch('helpers.nodetool')
+ def test_emit_status(self, nodetool):
+ helpers.emit_status()
+ nodetool.assert_called_once_with('status')
+
+ @patch('helpers.nodetool')
+ def test_emit_netstats(self, nodetool):
+ helpers.emit_netstats()
+ nodetool.assert_called_once_with('netstats')
+
+ def test_week_spread(self):
+ # The first seven units run midnight on different days.
+ for i in range(0, 7): # There is no unit 0
+ with self.subTest(unit=i):
+ self.assertTupleEqual(helpers.week_spread(i), (i, 0, 0))
+
+ # The next seven units run midday on different days.
+ for i in range(7, 14):
+ with self.subTest(unit=i):
+ self.assertTupleEqual(helpers.week_spread(i), (i - 7, 12, 0))
+
+ # And the next seven units at 6 am on different days.
+ for i in range(14, 21):
+ with self.subTest(unit=i):
+ self.assertTupleEqual(helpers.week_spread(i), (i - 14, 6, 0))
+
+ # This keeps going as best we can, subdividing the hours.
+ self.assertTupleEqual(helpers.week_spread(811), (6, 19, 18))
+
+ # The granularity is 1 minute, so eventually we wrap after about
+ # 7000 units.
+ self.assertTupleEqual(helpers.week_spread(0), (0, 0, 0))
+ for i in range(1, 7168):
+ with self.subTest(unit=i):
+ self.assertNotEqual(helpers.week_spread(i), (0, 0, 0))
+ self.assertTupleEqual(helpers.week_spread(7168), (0, 0, 0))
+
+ def test_local_plugins_dir(self):
+ self.assertEqual(helpers.local_plugins_dir(),
+ '/usr/local/lib/nagios/plugins')
+
+ def test_update_hosts_file_new_entry(self):
+ org = dedent("""\
+ 127.0.0.1 localhost
+ 10.0.1.2 existing
+ """)
+ new = dedent("""\
+ 127.0.0.1 localhost
+ 10.0.1.2 existing
+ 10.0.1.3 newname
+ """)
+ with tempfile.NamedTemporaryFile(mode='w') as f:
+ f.write(org)
+ f.flush()
+ m = {'10.0.1.3': 'newname'}
+ helpers.update_hosts_file(f.name, m)
+ self.assertEqual(new.strip(), open(f.name, 'r').read().strip())
+
+ def test_update_hosts_file_changed_entry(self):
+ org = dedent("""\
+ 127.0.0.1 localhost
+ 10.0.1.2 existing
+ """)
+ new = dedent("""\
+ 127.0.0.1 localhost
+ 10.0.1.3 existing
+ """)
+ with tempfile.NamedTemporaryFile(mode='w') as f:
+ f.write(org)
+ f.flush()
+ m = {'10.0.1.3': 'existing'}
+ helpers.update_hosts_file(f.name, m)
+ self.assertEqual(new.strip(), open(f.name, 'r').read().strip())
+
+
+class TestIsLxc(unittest.TestCase):
+ def test_is_lxc(self):
+ # Test the function runs under the current environmnet.
+ # Unfortunately we can't sanely test that it is returning the
+ # correct value
+ helpers.is_lxc()
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/charms/trusty/cassandra/tests/test_integration.py b/charms/trusty/cassandra/tests/test_integration.py
new file mode 100755
index 0000000..8d91bce
--- /dev/null
+++ b/charms/trusty/cassandra/tests/test_integration.py
@@ -0,0 +1,620 @@
+#!.venv3/bin/python3
+#
+# Copyright 2015 Canonical Ltd.
+#
+# This file is part of the Cassandra Charm for Juju.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranties of
+# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
+# PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import configparser
+from functools import wraps
+import glob
+import http.server
+from itertools import count
+import logging
+import multiprocessing
+import os
+import socket
+import subprocess
+import sys
+import time
+import unittest
+import uuid
+import warnings
+
+warnings.filterwarnings('ignore', 'The blist library is not available')
+
+import amulet.deployer
+import amulet.helpers
+from cassandra import Unavailable, ConsistencyLevel, AuthenticationFailed
+from cassandra.auth import PlainTextAuthProvider
+from cassandra.cluster import Cluster, NoHostAvailable
+from cassandra.query import SimpleStatement
+import yaml
+
+import helpers
+from testing.amuletfixture import AmuletFixture
+
+
+SERIES = os.environ.get('SERIES', 'trusty')
+
+WAIT_TIMEOUT = int(os.environ.get('AMULET_TIMEOUT', 3600))
+
+ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
+
+
+class TestDeploymentBase(unittest.TestCase):
+ rf = 1
+ deployment = None
+
+ common_config = dict(max_heap_size='96M',
+ heap_newsize='4M')
+ test_config = dict()
+
+ @classmethod
+ def setUpClass(cls):
+ deployment = AmuletFixture(series=SERIES)
+ deployment.setUp()
+ cls.deployment = deployment
+
+ deployment.add('cassandra', units=cls.rf,
+ constraints=dict(mem="2G"))
+ deployment.expose('cassandra') # Tests need client access.
+ config = dict()
+ config.update(cls.common_config)
+ config.update(cls.test_config) # Test subclass overrides
+ deployment.configure('cassandra', config)
+
+ deployment.add('storage',
+ 'cs:~stub/{}/storage'.format(SERIES))
+ deployment.configure('storage', dict(provider='local'))
+
+ # A stub client charm.
+ empty_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
+ os.pardir, 'lib',
+ 'testcharms', 'empty'))
+ deployment.add('client', empty_path)
+ deployment.relate('cassandra:database', 'client:database')
+ deployment.relate('cassandra:database-admin', 'client:database-admin')
+
+ # No official trusty branch of the nrpe-external-master charm, yet.
+ # This is a problem as it means tests may not be running against
+ # the lastest version.
+ deployment.add('nrpe',
+ 'cs:~stub/{}/nrpe-external-master'
+ ''.format(SERIES))
+ deployment.relate('cassandra:nrpe-external-master',
+ 'nrpe:nrpe-external-master')
+
+ deployment.deploy(timeout=WAIT_TIMEOUT)
+
+ # Silence noise - we are testing the charm, not the Cassandra
+ # driver.
+ cassandra_log = logging.getLogger('cassandra')
+ cassandra_log.setLevel(logging.CRITICAL)
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.deployment.tearDown()
+ cls.deployment = None
+
+ def juju_status(self):
+ status_yaml = subprocess.check_output(['juju', 'status',
+ '--format=yaml'])
+ if not status_yaml.strip():
+ return None
+ return yaml.safe_load(status_yaml)
+
+ def cluster(self, username=None, password=None, hosts=None, port=9042):
+ status = self.juju_status()
+
+ if username is None or password is None:
+ # Get some valid credentials - unit's superuser account will do.
+ unit = sorted(status['services']['cassandra']['units'].keys())[0]
+ cqlshrc_path = helpers.get_cqlshrc_path()
+ cqlshrc = configparser.ConfigParser(interpolation=None)
+ cqlshrc.read_string(
+ self.deployment.sentry[unit].file_contents(cqlshrc_path))
+ username = cqlshrc['authentication']['username']
+ password = cqlshrc['authentication']['password']
+
+ auth_provider = PlainTextAuthProvider(username=username,
+ password=password)
+
+ if hosts is None:
+ # Get the IP addresses
+ hosts = []
+ for unit, d in status['services']['cassandra']['units'].items():
+ hosts.append(d['public-address'])
+ cluster = Cluster(hosts, auth_provider=auth_provider, port=port)
+ self.addCleanup(cluster.shutdown)
+ return cluster
+
+ def session(self):
+ '''A session using the server's superuser credentials.'''
+ session = self.cluster().connect()
+ self.addCleanup(session.shutdown)
+ return session
+
+ def client_session(self, relname):
+ '''A session using the client's credentials.
+
+ We currently just steal the client's credentials and use
+ them from the local machine, but we could tunnel through the
+ client with a little more effort.
+ '''
+ relinfo = self.get_client_relinfo(relname)
+ self.assertIn('host', relinfo.keys())
+ cluster = self.cluster(relinfo['username'],
+ relinfo['password'],
+ [relinfo['host']],
+ int(relinfo['native_transport_port']))
+ session = cluster.connect()
+ self.addCleanup(session.shutdown)
+ return session
+
+ keyspace_ids = count()
+
+ def new_keyspace(self, session, rf=None):
+ if rf is None:
+ # We create a keyspace with a replication factor equal
+ # to the number of units. This ensures that all records
+ # are replicated to all nodes, and we can cofirm that
+ # all nodes are working by doing an insert with
+ # ConsistencyLevel.ALL.
+ rf = self.rf
+ keyspace = 'test{}'.format(next(TestDeploymentBase.keyspace_ids))
+ q = SimpleStatement(
+ 'CREATE KEYSPACE {} WITH REPLICATION ='.format(keyspace) +
+ "{'class': 'SimpleStrategy', 'replication_factor': %s}",
+ consistency_level=ConsistencyLevel.ALL)
+ session.execute(q, (rf,))
+ session.set_keyspace(keyspace)
+ return keyspace
+
+ def get_client_relinfo(self, relname):
+ # We only need one unit, even if rf > 1
+ s = self.deployment.sentry['cassandra'][0]
+ relinfo = s.relation(relname, 'client:{}'.format(relname))
+ return relinfo
+
+ def is_port_open(self, port):
+ status = self.juju_status()
+ detail = list(status['services']['cassandra']['units'].values())[0]
+ address = detail['public-address']
+ rc = subprocess.call(['nc', '-z', '-w', '2', address, str(port)])
+ return rc == 0
+
+ def reconfigure_cassandra(self, **overrides):
+ config = dict()
+ config.update(self.common_config)
+ config.update(self.test_config)
+ config.update(overrides)
+ self.deployment.configure('cassandra', config)
+ self.deployment.wait()
+
+
+class Test1UnitDeployment(TestDeploymentBase):
+ """Tests run on both a single node cluster and a 3 node cluster."""
+ rf = 1
+ test_config = dict(jre='openjdk')
+
+ def test_basics_unit_superuser(self):
+ # Basic tests using unit superuser credentials
+ session = self.session()
+ self.new_keyspace(session)
+ self._test_database_basics(session)
+
+ def test_basics_client_relation(self):
+ # Create a keyspace using superuser credentials
+ super_session = self.session()
+ keyspace = self.new_keyspace(super_session)
+
+ # Basic tests using standard client relation credentials.
+ session = self.client_session('database')
+ session.set_keyspace(keyspace)
+ self._test_database_basics(session)
+
+ def test_basics_client_admin_relation(self):
+ # Basic tests using administrative client relation credentials.
+ session = self.client_session('database-admin')
+ self.new_keyspace(session)
+ self._test_database_basics(session)
+
+ def _test_database_basics(self, session):
+ session.execute('CREATE TABLE Foo (x varchar PRIMARY KEY)')
+
+ # Insert some data, ensuring that it has been stored on
+ # all of our juju units. Note that the replication factor
+ # of our keyspace has been set to the number of units we
+ # deployed. Because it might take a while for the cluster to get
+ # its act together, we retry this in a loop with a timeout.
+ timeout = time.time() + 120
+ while True:
+ value = 'hello {}'.format(time.time())
+ query = SimpleStatement(
+ "INSERT INTO Foo (x) VALUES (%s)",
+ consistency_level=ConsistencyLevel.ALL)
+ try:
+ session.execute(query, (value,))
+ break
+ except Exception:
+ if time.time() > timeout:
+ raise
+
+ # We can get the data out again. This isn't testing our charm,
+ # but nice to know anyway...
+ r = session.execute('SELECT * FROM Foo LIMIT 1')
+ self.assertTrue(r[0].x.startswith('hello'))
+
+ def test_external_mount(self):
+ # Not only does this test migrating data from local disk to an
+ # external mount, it also exercises the rolling restart logic.
+ # If rf==1, the restart will happen in the
+ # storage-relation-changed hook as soon as the mount is ready.
+ # If rf > 1, the restart will happen in the
+ # cluster-relation-changed hook once the unit has determined
+ # that it is its turn to restart.
+
+ # First, create a keyspace pre-migration so we can confirm the
+ # data was migrated rather than being reset to an empty system.
+ session = self.session()
+ keyspace = self.new_keyspace(session)
+ session.execute('CREATE TABLE dat (x varchar PRIMARY KEY)')
+ total = self.rf * 50
+ q = SimpleStatement('INSERT INTO dat (x) VALUES (%s)')
+ for _ in range(0, total):
+ session.execute(q, (str(uuid.uuid1()),))
+ session.shutdown()
+
+ self.deployment.relate('cassandra:data', 'storage:data')
+ self.deployment.wait()
+ # Per Bug #1254766 and Bug #1254766, the sentry.wait() above
+ # will return before the hooks have actually finished running
+ # and data migrated. Instead, keep checking until our condition
+ # is met, or a timeout reached.
+ timeout = time.time() + 300
+ for s in self.deployment.sentry['cassandra']:
+ unit = s.info['unit_name']
+ unit_num = s.info['unit']
+ with self.subTest(unit=unit):
+ while True:
+ # Attempting to diagnose Amulet failures. I suspect
+ # SSH host keys again, per Bug #802117
+ try:
+ s.directory_contents('/')
+ except (subprocess.CalledProcessError, OSError):
+ self.skipTest('sentry[{!r}].directory_contents({!r}) '
+ 'failed!'.format(unit, '/'))
+ parents = ['/srv', '/srv/cassandra_{}'.format(unit_num),
+ '/srv/cassandra_{}/cassandra'.format(unit_num)]
+ for path in parents:
+ try:
+ s.directory_contents('/srv')
+ except (subprocess.CalledProcessError, OSError):
+ raise AssertionError('Failed to scan {!r} on {}'
+ .format(path, unit))
+ try:
+ contents = s.directory_contents(
+ '/srv/cassandra_{}/cassandra/data'.format(
+ unit_num))
+ found = set(contents['directories'])
+ self.assertIn(keyspace, found)
+ self.assertIn('system', found)
+ break
+ except Exception:
+ if time.time() > timeout:
+ raise
+ time.sleep(5)
+
+ # Confirm no data has been lost, which could happen if we badly
+ # shutdown and memtables were not flushed.
+ session = self.session()
+ session.set_keyspace(keyspace)
+ q = SimpleStatement('SELECT COUNT(*) FROM dat',
+ consistency_level=ConsistencyLevel.QUORUM)
+ results = session.execute(q)
+ self.assertEqual(results[0][0], total)
+
+ def test_cluster_ports_closed(self):
+ # The internal Cassandra ports are protected by ufw firewall
+ # rules, and are closed to everyone except for peers and the
+ # force_seed_nodes list. This is required for security, since
+ # the protocols are unauthenticated. It also stops rogue nodes
+ # on failed units from rejoining the cluster and causing chaos.
+ self.assertFalse(self.is_port_open(7000), 'Storage port open')
+ self.assertFalse(self.is_port_open(7001), 'SSL Storage port open')
+ self.assertFalse(self.is_port_open(7199), 'JMX port open')
+
+ def test_client_ports_open(self):
+ self.assertTrue(self.is_port_open(9042), 'Native trans port closed')
+ self.assertTrue(self.is_port_open(9160), 'Thrift RPC port closed')
+
+ def test_default_superuser_account_closed(self):
+ cluster = self.cluster(username='cassandra', password='cassandra')
+ try:
+ cluster.connect()
+ self.fail('Default credentials not reset')
+ except NoHostAvailable as x:
+ for fail in x.errors.values():
+ self.assertIsInstance(fail, AuthenticationFailed)
+
+ def test_cqlsh(self):
+ unit = self.deployment.sentry['cassandra'][0].info['unit_name']
+ subprocess.check_output(['juju', 'ssh', unit,
+ 'sudo -H cqlsh -e exit'],
+ stderr=subprocess.STDOUT)
+
+ def test_z_add_and_drop_node(self): # 'z' to run this test last.
+ # We need to be able to add a node correctly into the ring,
+ # without an operator needing to repair keyspaces to ensure data
+ # is located on the expected nodes.
+ # To test this, first create a keyspace with rf==1 and put enough
+ # data in it so each node will have some.
+ cluster = self.cluster()
+ s = cluster.connect()
+ keyspace = self.new_keyspace(s, rf=1)
+ q = SimpleStatement('CREATE TABLE dat (x varchar PRIMARY KEY)',
+ consistency_level=ConsistencyLevel.ALL)
+ s.execute(q)
+
+ total = self.rf * 50
+ q = SimpleStatement('INSERT INTO dat (x) VALUES (%s)',
+ consistency_level=ConsistencyLevel.QUORUM)
+ for _ in range(0, total):
+ s.execute(q, (str(uuid.uuid1()),))
+ cluster.shutdown()
+
+ def count(expected):
+ until = time.time() + 180
+ while True:
+ cluster = self.cluster()
+ try:
+ s = cluster.connect(keyspace)
+ results = s.execute(SimpleStatement(
+ 'SELECT count(*) FROM dat',
+ consistency_level=ConsistencyLevel.QUORUM))
+ found = results[0][0]
+ if found == expected or time.time() > until:
+ return found
+ time.sleep(0.2)
+ except Unavailable:
+ if time.time() > until:
+ raise
+ finally:
+ cluster.shutdown()
+
+ self.assertEqual(count(total), total)
+
+ self.deployment.add_unit('cassandra')
+ self.deployment.wait()
+ status = self.juju_status()
+ unit = sorted(status['services']['cassandra']['units'].keys())[-1]
+ try:
+ self.assertEqual(count(total), total)
+ finally:
+ # When a node is dropped, it needs to decommission itself and
+ # move its data to the remaining nodes so no data is lost.
+ # Alas, per Bug #1417874 we can't yet do this with Juju.
+ # First, the node must be manually decommissioned before we
+ # remove the unit.
+ self._decommission(unit)
+ self.deployment.remove_unit(unit)
+ self.deployment.wait()
+
+ self.assertEqual(count(total), total)
+
+ def _decommission(self, unit):
+ until = time.time() + WAIT_TIMEOUT
+ while True:
+ try:
+ subprocess.check_output(['juju', 'run', '--unit', unit,
+ 'nodetool decommission'],
+ stderr=subprocess.STDOUT,
+ universal_newlines=True)
+ break
+ except subprocess.CalledProcessError:
+ if time.time() > until:
+ raise
+
+ until = time.time() + WAIT_TIMEOUT
+ while True:
+ try:
+ cmd = ['juju', 'run', '--unit', unit, 'nodetool netstats']
+ raw = subprocess.check_output(cmd, stderr=subprocess.STDOUT,
+ universal_newlines=True)
+ if 'Mode: DECOMMISSIONED' in raw:
+ return
+ if time.time() > until:
+ raise subprocess.TimeoutExpired(cmd, WAIT_TIMEOUT, raw)
+ except subprocess.CalledProcessError:
+ if time.time() > until:
+ raise
+ time.sleep(3)
+
+
+class Test3UnitDeployment(Test1UnitDeployment):
+ """Tests run on a three node cluster."""
+ rf = 3
+
+
+_jre_url = None
+
+
+def _serve(cwd, host, port):
+ sys.stderr = open('/dev/null', 'w')
+ os.chdir(cwd)
+ httpd = http.server.HTTPServer((host, port),
+ http.server.SimpleHTTPRequestHandler)
+ httpd.serve_forever()
+
+
+_procs = []
+
+
+def get_jre_url():
+ '''Return the URL to the Oracle Java SE 8 Server Runtime tarball, or None.
+
+ The tarball needs to be placed in ../lib.
+
+ Spawns a web server as a subprocess to serve the file.
+ '''
+ global _jre_url
+ if _jre_url is not None:
+ return _jre_url
+
+ jre_dir = os.path.join(ROOT, 'lib')
+
+ jre_tarballs = sorted(glob.glob(os.path.join(jre_dir,
+ 'server-jre-?u*.tar.gz')))
+ if not jre_tarballs:
+ return None
+
+ # Get the local IP address, only available via hackish means and
+ # quite possibly incorrect.
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ s.connect(('www.canonical.com', 80))
+ host = s.getsockname()[0]
+ s.close()
+
+ # Get a free port.
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ s.bind((host, 0))
+ port = s.getsockname()[1]
+ s.close()
+
+ p = multiprocessing.Process(target=_serve, args=(jre_dir, host, port),
+ daemon=True)
+ p.start()
+ _procs.append(p)
+
+ _jre_url = 'http://{}:{}/{}'.format(host, port,
+ os.path.basename(jre_tarballs[-1]))
+ return _jre_url
+
+
+class TestOracleJREDeployment(Test1UnitDeployment):
+ """Basic test with the Oracle JRE.
+
+ Unfortunately these tests cannot be run by the automatic test runners,
+ as the Oracle JRE is protected from public download by Oracle's
+ click-through license agreement.
+ """
+ rf = 1
+ test_config = dict(jre='Oracle', edition='community',
+ private_jre_url=get_jre_url())
+
+ @classmethod
+ @unittest.skipUnless(get_jre_url(), 'No Oracle JRE tarballs available')
+ def setUpClass(cls):
+ super(TestOracleJREDeployment, cls).setUpClass()
+
+
+class TestDSEDeployment(Test1UnitDeployment):
+ """Tests run a single node DataStax Enterprise cluster.
+
+ Unfortunately these tests cannot be run by the automatic test
+ runners, as the DSE packages are not available for public download.
+ """
+ rf = 1
+ test_config = dict(
+ edition='DSE', # Forces Oracle JRE
+ install_sources=yaml.safe_dump([os.environ.get('DSE_SOURCE'),
+ 'ppa:stub/cassandra']),
+ install_keys=yaml.safe_dump([None, None]),
+ private_jre_url=get_jre_url())
+
+ @classmethod
+ @unittest.skipUnless(get_jre_url(), 'No Oracle JRE tarballs available')
+ @unittest.skipIf('DSE_SOURCE' not in os.environ,
+ 'DSE_SOURCE environment variable not configured')
+ def setUpClass(cls):
+ super(TestDSEDeployment, cls).setUpClass()
+
+
+class TestAllowAllAuthenticatorDeployment(Test3UnitDeployment):
+ test_config = dict(authenticator='AllowAllAuthenticator')
+
+ def cluster(self, username=None, password=None, hosts=None, port=9042):
+ '''A cluster using invalid credentials.'''
+ return super(TestAllowAllAuthenticatorDeployment,
+ self).cluster(username='wat', password='eva')
+
+ def client_session(self, relname):
+ '''A session using invalid credentials.'''
+ relinfo = self.get_client_relinfo(relname)
+ self.assertIn('host', relinfo.keys())
+ cluster = self.cluster('random', 'nonsense',
+ [relinfo['host']],
+ int(relinfo['native_transport_port']))
+ session = cluster.connect()
+ self.addCleanup(session.shutdown)
+ return session
+
+ test_default_superuser_account_closed = None
+
+
+class Test20Deployment(Test1UnitDeployment):
+ """Tests run on a single node Apache Cassandra 2.0 cluster.
+ """
+ rf = 1
+ test_config = dict(
+ edition='community',
+ install_sources=yaml.safe_dump([
+ 'ppa:stub/cassandra',
+ 'ppa:openjdk-r/ppa',
+ 'deb http://www.apache.org/dist/cassandra/debian 20x main']),
+ install_keys=yaml.safe_dump([None, None, None]))
+
+
+class Test21Deployment(Test1UnitDeployment):
+ """Tests run on a single node Apache Cassandra 2.1 cluster.
+ """
+ rf = 1
+ test_config = dict(
+ edition='community',
+ install_sources=yaml.safe_dump([
+ 'ppa:stub/cassandra',
+ 'ppa:openjdk-r/ppa',
+ 'deb http://www.apache.org/dist/cassandra/debian 21x main']),
+ install_keys=yaml.safe_dump([None, None, None]))
+
+
+class Test30Deployment(Test1UnitDeployment):
+ """Tests run on a single node Apache Cassandra 3.0 cluster.
+ """
+ rf = 1
+ test_config = dict(
+ edition='community',
+ install_sources=yaml.safe_dump([
+ 'ppa:stub/cassandra',
+ 'ppa:openjdk-r/ppa',
+ 'deb http://www.apache.org/dist/cassandra/debian 30x main']),
+ install_keys=yaml.safe_dump([None, None, None]))
+
+
+# Bug #1417097 means we need to monkey patch Amulet for now.
+real_juju = amulet.helpers.juju
+
+
+@wraps(real_juju)
+def patched_juju(args, env=None):
+ args = [str(a) for a in args]
+ return real_juju(args, env)
+
+amulet.helpers.juju = patched_juju
+amulet.deployer.juju = patched_juju
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/charms/trusty/cassandra/tests/tests.yaml b/charms/trusty/cassandra/tests/tests.yaml
new file mode 100644
index 0000000..fbbd7f0
--- /dev/null
+++ b/charms/trusty/cassandra/tests/tests.yaml
@@ -0,0 +1,15 @@
+bootstrap: true
+reset: false
+tests: ""
+virtualenv: false
+# sources: []
+# packages: []
+makefile:
+ - lint
+ - unittest
+ - Test1UnitDeployment
+ - Test3UnitDeployment
+ - Test20Deployment
+ - Test21Deployment
+ - Test30Deployment
+ - TestAllowAllAuthenticatorDeployment