aboutsummaryrefslogtreecommitdiffstats
path: root/charms/trusty/contrail-analytics
diff options
context:
space:
mode:
authorStuart Mackie <wsmackie@juniper.net>2016-10-07 12:24:58 -0700
committerStuart Mackie <wsmackie@juniper.net>2016-10-07 12:24:58 -0700
commit4faa7f927149a5c4ef7a03523f7bc14523cb9baa (patch)
tree0be55aa0809cc395e45baeae63db660b4e72fe83 /charms/trusty/contrail-analytics
parent82f1a7eb5535b30a95b1e71ff18c315d40d1e6f0 (diff)
Charms for Contrail 3.1 with Mitaka
Change-Id: Id37f3b9743d1974e31fcd7cd9c54be41bb0c47fb Signed-off-by: Stuart Mackie <wsmackie@juniper.net>
Diffstat (limited to 'charms/trusty/contrail-analytics')
-rw-r--r--charms/trusty/contrail-analytics/.bzrignore1
-rw-r--r--charms/trusty/contrail-analytics/Makefile10
-rw-r--r--charms/trusty/contrail-analytics/README.md49
-rw-r--r--charms/trusty/contrail-analytics/charm-helpers-sync.yaml9
-rw-r--r--charms/trusty/contrail-analytics/config.yaml34
-rw-r--r--charms/trusty/contrail-analytics/copyright17
-rw-r--r--charms/trusty/contrail-analytics/files/contrail5
-rw-r--r--charms/trusty/contrail-analytics/files/contrail-alarm-gen.ini13
-rw-r--r--charms/trusty/contrail-analytics/files/contrail-analytics-api.ini13
-rw-r--r--charms/trusty/contrail-analytics/files/contrail-analytics-nodemgr6
-rw-r--r--charms/trusty/contrail-analytics/files/contrail-collector.ini13
-rw-r--r--charms/trusty/contrail-analytics/files/contrail-nodemgr-analytics.ini6
-rw-r--r--charms/trusty/contrail-analytics/files/contrail-snmp-collector.ini13
-rw-r--r--charms/trusty/contrail-analytics/files/contrail-topology.ini13
-rwxr-xr-xcharms/trusty/contrail-analytics/files/ntpq-nodemgr10
l---------charms/trusty/contrail-analytics/hooks/cassandra-relation-broken1
l---------charms/trusty/contrail-analytics/hooks/cassandra-relation-changed1
l---------charms/trusty/contrail-analytics/hooks/cassandra-relation-departed1
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/__init__.py38
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/__init__.py15
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/__init__.py15
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ip.py456
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ovs/__init__.py96
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ufw.py318
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/__init__.py15
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/alternatives.py33
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/__init__.py15
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/deployment.py197
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/utils.py963
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/context.py1416
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/__init__.py18
-rwxr-xr-xcharms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh32
-rwxr-xr-xcharms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh30
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/ip.py151
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/neutron.py356
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/__init__.py18
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/ceph.conf15
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/git.upstart17
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg58
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend24
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf24
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken9
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo22
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-zeromq14
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templating.py323
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/utils.py926
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/__init__.py15
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/debug.py56
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/packages.py121
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/rpdb.py58
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/version.py34
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/__init__.py15
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/__init__.py15
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/ceph.py657
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/loopback.py78
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/lvm.py105
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/utils.py71
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/__init__.py15
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/decorators.py57
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/files.py45
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/fstab.py134
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/hookenv.py898
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/host.py586
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/hugepage.py69
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/kernel.py68
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/__init__.py18
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/base.py353
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/helpers.py283
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/strutils.py72
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/sysctl.py56
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/templating.py68
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/unitdata.py521
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/__init__.py468
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/archiveurl.py167
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/bzrurl.py78
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/giturl.py73
l---------charms/trusty/contrail-analytics/hooks/config-changed1
l---------charms/trusty/contrail-analytics/hooks/contrail-analytics-api-relation-joined1
l---------charms/trusty/contrail-analytics/hooks/contrail-api-relation-broken1
l---------charms/trusty/contrail-analytics/hooks/contrail-api-relation-changed1
l---------charms/trusty/contrail-analytics/hooks/contrail-api-relation-departed1
l---------charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-broken1
l---------charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-changed1
l---------charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-departed1
-rwxr-xr-xcharms/trusty/contrail-analytics/hooks/contrail_analytics_hooks.py305
-rw-r--r--charms/trusty/contrail-analytics/hooks/contrail_analytics_utils.py318
l---------charms/trusty/contrail-analytics/hooks/http-services-relation-joined1
l---------charms/trusty/contrail-analytics/hooks/identity-admin-relation-broken1
l---------charms/trusty/contrail-analytics/hooks/identity-admin-relation-changed1
l---------charms/trusty/contrail-analytics/hooks/identity-admin-relation-departed1
l---------charms/trusty/contrail-analytics/hooks/install1
l---------charms/trusty/contrail-analytics/hooks/kafka-relation-broken1
l---------charms/trusty/contrail-analytics/hooks/kafka-relation-changed1
l---------charms/trusty/contrail-analytics/hooks/kafka-relation-departed1
l---------charms/trusty/contrail-analytics/hooks/start1
l---------charms/trusty/contrail-analytics/hooks/stop1
l---------charms/trusty/contrail-analytics/hooks/upgrade-charm1
l---------charms/trusty/contrail-analytics/hooks/zookeeper-relation-broken1
l---------charms/trusty/contrail-analytics/hooks/zookeeper-relation-changed1
l---------charms/trusty/contrail-analytics/hooks/zookeeper-relation-departed1
-rw-r--r--charms/trusty/contrail-analytics/icon.svg309
-rw-r--r--charms/trusty/contrail-analytics/metadata.yaml28
-rw-r--r--charms/trusty/contrail-analytics/templates/contrail-alarm-gen.conf22
-rw-r--r--charms/trusty/contrail-analytics/templates/contrail-analytics-api.conf26
-rw-r--r--charms/trusty/contrail-analytics/templates/contrail-analytics-nodemgr.conf9
-rw-r--r--charms/trusty/contrail-analytics/templates/contrail-collector.conf31
-rw-r--r--charms/trusty/contrail-analytics/templates/contrail-keystone-auth.conf11
-rw-r--r--charms/trusty/contrail-analytics/templates/contrail-query-engine.conf24
-rw-r--r--charms/trusty/contrail-analytics/templates/contrail-snmp-collector.conf28
-rw-r--r--charms/trusty/contrail-analytics/templates/contrail-topology.conf19
-rw-r--r--charms/trusty/contrail-analytics/templates/vnc_api_lib.ini16
111 files changed, 12251 insertions, 0 deletions
diff --git a/charms/trusty/contrail-analytics/.bzrignore b/charms/trusty/contrail-analytics/.bzrignore
new file mode 100644
index 0000000..ba077a4
--- /dev/null
+++ b/charms/trusty/contrail-analytics/.bzrignore
@@ -0,0 +1 @@
+bin
diff --git a/charms/trusty/contrail-analytics/Makefile b/charms/trusty/contrail-analytics/Makefile
new file mode 100644
index 0000000..378713f
--- /dev/null
+++ b/charms/trusty/contrail-analytics/Makefile
@@ -0,0 +1,10 @@
+#!/usr/bin/make
+PYTHON := /usr/bin/env python
+
+bin/charm_helpers_sync.py:
+ @mkdir -p bin
+ @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
+ > bin/charm_helpers_sync.py
+
+sync: bin/charm_helpers_sync.py
+ @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
diff --git a/charms/trusty/contrail-analytics/README.md b/charms/trusty/contrail-analytics/README.md
new file mode 100644
index 0000000..0cfa491
--- /dev/null
+++ b/charms/trusty/contrail-analytics/README.md
@@ -0,0 +1,49 @@
+Overview
+--------
+
+OpenContrail (www.opencontrail.org) is a fully featured Software Defined
+Networking (SDN) solution for private clouds. It supports high performance
+isolated tenant networks without requiring external hardware support. It
+provides a Neutron plugin to integrate with OpenStack.
+
+This charm is designed to be used in conjunction with the rest of the OpenStack
+related charms in the charm store to virtualize the network that Nova Compute
+instances plug into.
+
+This charm provides the analytics node component which includes
+contrail-collector, contrail-query-engine and contrail-analytics-api services.
+Only OpenStack Icehouse or newer is supported.
+
+Usage
+-----
+
+Cassandra and Contrail Configuration are prerequisite services to deploy.
+Once ready, deploy and relate as follows:
+
+ juju deploy contrail-analytics
+ juju add-relation contrail-analytics:cassandra cassandra:database
+ juju add-relation contrail-analytics contrail-configuration
+
+Install Sources
+---------------
+
+The version of OpenContrail installed when deploying can be changed using the
+'install-sources' option. This is a multilined value that may refer to PPAs or
+Deb repositories.
+
+The version of dependent OpenStack components installed when deploying can be
+changed using the 'openstack-origin' option. When deploying to different
+OpenStack versions, openstack-origin needs to be set across all OpenStack and
+OpenContrail charms where available.
+
+High Availability (HA)
+----------------------
+
+Multiple units of this charm can be deployed to support HA deployments:
+
+ juju add-unit contrail-analytics
+
+Relating to haproxy charm (http-services relation) allows multiple units to be
+load balanced:
+
+ juju add-relation contrail-analytics haproxy
diff --git a/charms/trusty/contrail-analytics/charm-helpers-sync.yaml b/charms/trusty/contrail-analytics/charm-helpers-sync.yaml
new file mode 100644
index 0000000..eadff82
--- /dev/null
+++ b/charms/trusty/contrail-analytics/charm-helpers-sync.yaml
@@ -0,0 +1,9 @@
+branch: lp:charm-helpers
+destination: hooks/charmhelpers
+include:
+ - core
+ - fetch
+ - contrib.network
+ - contrib.openstack|inc=*
+ - contrib.python
+ - contrib.storage
diff --git a/charms/trusty/contrail-analytics/config.yaml b/charms/trusty/contrail-analytics/config.yaml
new file mode 100644
index 0000000..b92d33e
--- /dev/null
+++ b/charms/trusty/contrail-analytics/config.yaml
@@ -0,0 +1,34 @@
+options:
+ install-sources:
+ type: string
+ default: |
+ - "ppa:opencontrail/ppa"
+ - "ppa:opencontrail/r2.20"
+ description: Package sources for install
+ install-keys:
+ type: string
+ description: Apt keys for package install sources
+ openstack-origin:
+ type: string
+ default: distro
+ description: |
+ Repository from which to install. May be one of the following:
+ distro (default), ppa:somecustom/ppa, a deb url sources entry,
+ or a supported Cloud Archive release pocket.
+
+ Supported Cloud Archive sources include: cloud:precise-folsom,
+ cloud:precise-folsom/updates, cloud:precise-folsom/staging,
+ cloud:precise-folsom/proposed.
+ vip:
+ type: string
+ description: |
+ Virtual IP address to use when services are related in a High Availability
+ configuration.
+ cassandra-units:
+ type: int
+ default: 1
+ description: Minimum number of units required in cassandra relation
+ kafka-units:
+ type: int
+ default: 1
+ description: Minimum number of units required in kafka relation
diff --git a/charms/trusty/contrail-analytics/copyright b/charms/trusty/contrail-analytics/copyright
new file mode 100644
index 0000000..567db82
--- /dev/null
+++ b/charms/trusty/contrail-analytics/copyright
@@ -0,0 +1,17 @@
+Format: http://dep.debian.net/deps/dep5/
+
+Files: *
+Copyright: Copyright 2015, Canonical Ltd., All Rights Reserved.
+License: GPL-3
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+ .
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ .
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-analytics/files/contrail b/charms/trusty/contrail-analytics/files/contrail
new file mode 100644
index 0000000..39e20cc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/files/contrail
@@ -0,0 +1,5 @@
+Explanation: Use contrail version of the package.
+Package: python-redis
+Pin: version /contrail/
+Pin-Priority: 1001
+
diff --git a/charms/trusty/contrail-analytics/files/contrail-alarm-gen.ini b/charms/trusty/contrail-analytics/files/contrail-alarm-gen.ini
new file mode 100644
index 0000000..aa3ab8a
--- /dev/null
+++ b/charms/trusty/contrail-analytics/files/contrail-alarm-gen.ini
@@ -0,0 +1,13 @@
+[program:contrail-alarm-gen]
+command=/usr/bin/contrail-alarm-gen --conf_file /etc/contrail/contrail-keystone-auth.conf --conf_file /etc/contrail/contrail-alarm-gen.conf
+priority=440
+autostart=true
+killasgroup=true
+stopsignal=KILL
+stdout_capture_maxbytes=1MB
+redirect_stderr=true
+stdout_logfile=/var/log/contrail/contrail-alarm-gen-stdout.log
+stderr_logfile=/var/log/contrail/contrail-alarm-gen-stderr.log
+startsecs=5
+exitcodes=0 ; 'expected' exit codes for process (default 0,2)
+user=contrail
diff --git a/charms/trusty/contrail-analytics/files/contrail-analytics-api.ini b/charms/trusty/contrail-analytics/files/contrail-analytics-api.ini
new file mode 100644
index 0000000..3f76d94
--- /dev/null
+++ b/charms/trusty/contrail-analytics/files/contrail-analytics-api.ini
@@ -0,0 +1,13 @@
+[program:contrail-analytics-api]
+command=/usr/bin/contrail-analytics-api --conf_file /etc/contrail/contrail-keystone-auth.conf --conf_file /etc/contrail/contrail-analytics-api.conf
+priority=440
+autostart=true
+killasgroup=true
+stopsignal=KILL
+stdout_capture_maxbytes=1MB
+redirect_stderr=true
+stdout_logfile=/var/log/contrail/contrail-analytics-api-stdout.log
+stderr_logfile=/var/log/contrail/contrail-analytics-api-stderr.log
+startsecs=5
+exitcodes=0 ; 'expected' exit codes for process (default 0,2)
+user=contrail
diff --git a/charms/trusty/contrail-analytics/files/contrail-analytics-nodemgr b/charms/trusty/contrail-analytics/files/contrail-analytics-nodemgr
new file mode 100644
index 0000000..a88aafa
--- /dev/null
+++ b/charms/trusty/contrail-analytics/files/contrail-analytics-nodemgr
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+# chkconfig: 2345 99 01
+# description: Juniper Network Analytics Node Manager
+
+supervisorctl -s unix:///tmp/supervisord_analytics.sock ${1} `basename ${0}`
diff --git a/charms/trusty/contrail-analytics/files/contrail-collector.ini b/charms/trusty/contrail-analytics/files/contrail-collector.ini
new file mode 100644
index 0000000..17a5617
--- /dev/null
+++ b/charms/trusty/contrail-analytics/files/contrail-collector.ini
@@ -0,0 +1,13 @@
+[program:contrail-collector]
+command=/usr/bin/contrail-collector --conf_file /etc/contrail/contrail-keystone-auth.conf --conf_file /etc/contrail/contrail-collector.conf
+priority=420
+autostart=true
+killasgroup=true
+stopsignal=KILL
+stdout_capture_maxbytes=1MB
+redirect_stderr=true
+stdout_logfile=/var/log/contrail/contrail-collector-stdout.log
+stderr_logfile=/dev/null
+startsecs=5
+exitcodes=0 ; 'expected' exit codes for process (default 0,2)
+user=contrail
diff --git a/charms/trusty/contrail-analytics/files/contrail-nodemgr-analytics.ini b/charms/trusty/contrail-analytics/files/contrail-nodemgr-analytics.ini
new file mode 100644
index 0000000..5aa2873
--- /dev/null
+++ b/charms/trusty/contrail-analytics/files/contrail-nodemgr-analytics.ini
@@ -0,0 +1,6 @@
+[eventlistener:contrail-analytics-nodemgr]
+command=/bin/bash -c "exec /usr/bin/contrail-nodemgr"
+events=PROCESS_COMMUNICATION,PROCESS_STATE,TICK_60
+buffer_size=10000
+stdout_logfile=/var/log/contrail/contrail-analytics-nodemgr-stdout.log
+stderr_logfile=/var/log/contrail/contrail-analytics-nodemgr-stderr.log
diff --git a/charms/trusty/contrail-analytics/files/contrail-snmp-collector.ini b/charms/trusty/contrail-analytics/files/contrail-snmp-collector.ini
new file mode 100644
index 0000000..5f28ac5
--- /dev/null
+++ b/charms/trusty/contrail-analytics/files/contrail-snmp-collector.ini
@@ -0,0 +1,13 @@
+[program:contrail-snmp-collector]
+command=/usr/bin/contrail-snmp-collector --conf_file /etc/contrail/contrail-snmp-collector.conf --conf_file /etc/contrail/contrail-keystone-auth.conf
+priority=340
+autostart=true
+killasgroup=true
+stopsignal=KILL
+stdout_capture_maxbytes=1MB
+redirect_stderr=true
+stdout_logfile=/var/log/contrail/contrail-snmp-collector-stdout.log
+stderr_logfile=/var/log/contrail/contrail-snmp-collector-stderr.log
+startsecs=5
+exitcodes=0 ; 'expected' exit codes for process (default 0,2)
+user=contrail
diff --git a/charms/trusty/contrail-analytics/files/contrail-topology.ini b/charms/trusty/contrail-analytics/files/contrail-topology.ini
new file mode 100644
index 0000000..bce6a0b
--- /dev/null
+++ b/charms/trusty/contrail-analytics/files/contrail-topology.ini
@@ -0,0 +1,13 @@
+[program:contrail-topology]
+command=/usr/bin/contrail-topology --conf_file /etc/contrail/contrail-topology.conf --conf_file /etc/contrail/contrail-keystone-auth.conf
+priority=340
+autostart=true
+killasgroup=true
+stopsignal=KILL
+stdout_capture_maxbytes=1MB
+redirect_stderr=true
+stdout_logfile=/var/log/contrail/contrail-topology-stdout.log
+stderr_logfile=/var/log/contrail/contrail-topology-stderr.log
+startsecs=5
+exitcodes=0 ; 'expected' exit codes for process (default 0,2)
+user=contrail
diff --git a/charms/trusty/contrail-analytics/files/ntpq-nodemgr b/charms/trusty/contrail-analytics/files/ntpq-nodemgr
new file mode 100755
index 0000000..da00247
--- /dev/null
+++ b/charms/trusty/contrail-analytics/files/ntpq-nodemgr
@@ -0,0 +1,10 @@
+#!/bin/sh
+#
+# Script to produce some dummy output to satisfy contrail-nodemgr ntp status
+# Note: This is intended to be deployed inside containers where the host is running ntp
+
+if [ -x /usr/bin/ntpq ]; then
+ exec /usr/bin/ntpq "$@"
+fi
+
+echo "*"
diff --git a/charms/trusty/contrail-analytics/hooks/cassandra-relation-broken b/charms/trusty/contrail-analytics/hooks/cassandra-relation-broken
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/cassandra-relation-broken
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/cassandra-relation-changed b/charms/trusty/contrail-analytics/hooks/cassandra-relation-changed
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/cassandra-relation-changed
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/cassandra-relation-departed b/charms/trusty/contrail-analytics/hooks/cassandra-relation-departed
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/cassandra-relation-departed
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/__init__.py
new file mode 100644
index 0000000..f72e7f8
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/__init__.py
@@ -0,0 +1,38 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+# Bootstrap charm-helpers, installing its dependencies if necessary using
+# only standard libraries.
+import subprocess
+import sys
+
+try:
+ import six # flake8: noqa
+except ImportError:
+ if sys.version_info.major == 2:
+ subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
+ else:
+ subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
+ import six # flake8: noqa
+
+try:
+ import yaml # flake8: noqa
+except ImportError:
+ if sys.version_info.major == 2:
+ subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
+ else:
+ subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
+ import yaml # flake8: noqa
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ip.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ip.py
new file mode 100644
index 0000000..7f3b66b
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ip.py
@@ -0,0 +1,456 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import glob
+import re
+import subprocess
+import six
+import socket
+
+from functools import partial
+
+from charmhelpers.core.hookenv import unit_get
+from charmhelpers.fetch import apt_install, apt_update
+from charmhelpers.core.hookenv import (
+ log,
+ WARNING,
+)
+
+try:
+ import netifaces
+except ImportError:
+ apt_update(fatal=True)
+ apt_install('python-netifaces', fatal=True)
+ import netifaces
+
+try:
+ import netaddr
+except ImportError:
+ apt_update(fatal=True)
+ apt_install('python-netaddr', fatal=True)
+ import netaddr
+
+
+def _validate_cidr(network):
+ try:
+ netaddr.IPNetwork(network)
+ except (netaddr.core.AddrFormatError, ValueError):
+ raise ValueError("Network (%s) is not in CIDR presentation format" %
+ network)
+
+
+def no_ip_found_error_out(network):
+ errmsg = ("No IP address found in network: %s" % network)
+ raise ValueError(errmsg)
+
+
+def get_address_in_network(network, fallback=None, fatal=False):
+ """Get an IPv4 or IPv6 address within the network from the host.
+
+ :param network (str): CIDR presentation format. For example,
+ '192.168.1.0/24'.
+ :param fallback (str): If no address is found, return fallback.
+ :param fatal (boolean): If no address is found, fallback is not
+ set and fatal is True then exit(1).
+ """
+ if network is None:
+ if fallback is not None:
+ return fallback
+
+ if fatal:
+ no_ip_found_error_out(network)
+ else:
+ return None
+
+ _validate_cidr(network)
+ network = netaddr.IPNetwork(network)
+ for iface in netifaces.interfaces():
+ addresses = netifaces.ifaddresses(iface)
+ if network.version == 4 and netifaces.AF_INET in addresses:
+ addr = addresses[netifaces.AF_INET][0]['addr']
+ netmask = addresses[netifaces.AF_INET][0]['netmask']
+ cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
+ if cidr in network:
+ return str(cidr.ip)
+
+ if network.version == 6 and netifaces.AF_INET6 in addresses:
+ for addr in addresses[netifaces.AF_INET6]:
+ if not addr['addr'].startswith('fe80'):
+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
+ addr['netmask']))
+ if cidr in network:
+ return str(cidr.ip)
+
+ if fallback is not None:
+ return fallback
+
+ if fatal:
+ no_ip_found_error_out(network)
+
+ return None
+
+
+def is_ipv6(address):
+ """Determine whether provided address is IPv6 or not."""
+ try:
+ address = netaddr.IPAddress(address)
+ except netaddr.AddrFormatError:
+ # probably a hostname - so not an address at all!
+ return False
+
+ return address.version == 6
+
+
+def is_address_in_network(network, address):
+ """
+ Determine whether the provided address is within a network range.
+
+ :param network (str): CIDR presentation format. For example,
+ '192.168.1.0/24'.
+ :param address: An individual IPv4 or IPv6 address without a net
+ mask or subnet prefix. For example, '192.168.1.1'.
+ :returns boolean: Flag indicating whether address is in network.
+ """
+ try:
+ network = netaddr.IPNetwork(network)
+ except (netaddr.core.AddrFormatError, ValueError):
+ raise ValueError("Network (%s) is not in CIDR presentation format" %
+ network)
+
+ try:
+ address = netaddr.IPAddress(address)
+ except (netaddr.core.AddrFormatError, ValueError):
+ raise ValueError("Address (%s) is not in correct presentation format" %
+ address)
+
+ if address in network:
+ return True
+ else:
+ return False
+
+
+def _get_for_address(address, key):
+ """Retrieve an attribute of or the physical interface that
+ the IP address provided could be bound to.
+
+ :param address (str): An individual IPv4 or IPv6 address without a net
+ mask or subnet prefix. For example, '192.168.1.1'.
+ :param key: 'iface' for the physical interface name or an attribute
+ of the configured interface, for example 'netmask'.
+ :returns str: Requested attribute or None if address is not bindable.
+ """
+ address = netaddr.IPAddress(address)
+ for iface in netifaces.interfaces():
+ addresses = netifaces.ifaddresses(iface)
+ if address.version == 4 and netifaces.AF_INET in addresses:
+ addr = addresses[netifaces.AF_INET][0]['addr']
+ netmask = addresses[netifaces.AF_INET][0]['netmask']
+ network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
+ cidr = network.cidr
+ if address in cidr:
+ if key == 'iface':
+ return iface
+ else:
+ return addresses[netifaces.AF_INET][0][key]
+
+ if address.version == 6 and netifaces.AF_INET6 in addresses:
+ for addr in addresses[netifaces.AF_INET6]:
+ if not addr['addr'].startswith('fe80'):
+ network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
+ addr['netmask']))
+ cidr = network.cidr
+ if address in cidr:
+ if key == 'iface':
+ return iface
+ elif key == 'netmask' and cidr:
+ return str(cidr).split('/')[1]
+ else:
+ return addr[key]
+
+ return None
+
+
+get_iface_for_address = partial(_get_for_address, key='iface')
+
+
+get_netmask_for_address = partial(_get_for_address, key='netmask')
+
+
+def format_ipv6_addr(address):
+ """If address is IPv6, wrap it in '[]' otherwise return None.
+
+ This is required by most configuration files when specifying IPv6
+ addresses.
+ """
+ if is_ipv6(address):
+ return "[%s]" % address
+
+ return None
+
+
+def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
+ fatal=True, exc_list=None):
+ """Return the assigned IP address for a given interface, if any."""
+ # Extract nic if passed /dev/ethX
+ if '/' in iface:
+ iface = iface.split('/')[-1]
+
+ if not exc_list:
+ exc_list = []
+
+ try:
+ inet_num = getattr(netifaces, inet_type)
+ except AttributeError:
+ raise Exception("Unknown inet type '%s'" % str(inet_type))
+
+ interfaces = netifaces.interfaces()
+ if inc_aliases:
+ ifaces = []
+ for _iface in interfaces:
+ if iface == _iface or _iface.split(':')[0] == iface:
+ ifaces.append(_iface)
+
+ if fatal and not ifaces:
+ raise Exception("Invalid interface '%s'" % iface)
+
+ ifaces.sort()
+ else:
+ if iface not in interfaces:
+ if fatal:
+ raise Exception("Interface '%s' not found " % (iface))
+ else:
+ return []
+
+ else:
+ ifaces = [iface]
+
+ addresses = []
+ for netiface in ifaces:
+ net_info = netifaces.ifaddresses(netiface)
+ if inet_num in net_info:
+ for entry in net_info[inet_num]:
+ if 'addr' in entry and entry['addr'] not in exc_list:
+ addresses.append(entry['addr'])
+
+ if fatal and not addresses:
+ raise Exception("Interface '%s' doesn't have any %s addresses." %
+ (iface, inet_type))
+
+ return sorted(addresses)
+
+
+get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
+
+
+def get_iface_from_addr(addr):
+ """Work out on which interface the provided address is configured."""
+ for iface in netifaces.interfaces():
+ addresses = netifaces.ifaddresses(iface)
+ for inet_type in addresses:
+ for _addr in addresses[inet_type]:
+ _addr = _addr['addr']
+ # link local
+ ll_key = re.compile("(.+)%.*")
+ raw = re.match(ll_key, _addr)
+ if raw:
+ _addr = raw.group(1)
+
+ if _addr == addr:
+ log("Address '%s' is configured on iface '%s'" %
+ (addr, iface))
+ return iface
+
+ msg = "Unable to infer net iface on which '%s' is configured" % (addr)
+ raise Exception(msg)
+
+
+def sniff_iface(f):
+ """Ensure decorated function is called with a value for iface.
+
+ If no iface provided, inject net iface inferred from unit private address.
+ """
+ def iface_sniffer(*args, **kwargs):
+ if not kwargs.get('iface', None):
+ kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
+
+ return f(*args, **kwargs)
+
+ return iface_sniffer
+
+
+@sniff_iface
+def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
+ dynamic_only=True):
+ """Get assigned IPv6 address for a given interface.
+
+ Returns list of addresses found. If no address found, returns empty list.
+
+ If iface is None, we infer the current primary interface by doing a reverse
+ lookup on the unit private-address.
+
+ We currently only support scope global IPv6 addresses i.e. non-temporary
+ addresses. If no global IPv6 address is found, return the first one found
+ in the ipv6 address list.
+ """
+ addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
+ inc_aliases=inc_aliases, fatal=fatal,
+ exc_list=exc_list)
+
+ if addresses:
+ global_addrs = []
+ for addr in addresses:
+ key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
+ m = re.match(key_scope_link_local, addr)
+ if m:
+ eui_64_mac = m.group(1)
+ iface = m.group(2)
+ else:
+ global_addrs.append(addr)
+
+ if global_addrs:
+ # Make sure any found global addresses are not temporary
+ cmd = ['ip', 'addr', 'show', iface]
+ out = subprocess.check_output(cmd).decode('UTF-8')
+ if dynamic_only:
+ key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
+ else:
+ key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
+
+ addrs = []
+ for line in out.split('\n'):
+ line = line.strip()
+ m = re.match(key, line)
+ if m and 'temporary' not in line:
+ # Return the first valid address we find
+ for addr in global_addrs:
+ if m.group(1) == addr:
+ if not dynamic_only or \
+ m.group(1).endswith(eui_64_mac):
+ addrs.append(addr)
+
+ if addrs:
+ return addrs
+
+ if fatal:
+ raise Exception("Interface '%s' does not have a scope global "
+ "non-temporary ipv6 address." % iface)
+
+ return []
+
+
+def get_bridges(vnic_dir='/sys/devices/virtual/net'):
+ """Return a list of bridges on the system."""
+ b_regex = "%s/*/bridge" % vnic_dir
+ return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
+
+
+def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
+ """Return a list of nics comprising a given bridge on the system."""
+ brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
+ return [x.split('/')[-1] for x in glob.glob(brif_regex)]
+
+
+def is_bridge_member(nic):
+ """Check if a given nic is a member of a bridge."""
+ for bridge in get_bridges():
+ if nic in get_bridge_nics(bridge):
+ return True
+
+ return False
+
+
+def is_ip(address):
+ """
+ Returns True if address is a valid IP address.
+ """
+ try:
+ # Test to see if already an IPv4 address
+ socket.inet_aton(address)
+ return True
+ except socket.error:
+ return False
+
+
+def ns_query(address):
+ try:
+ import dns.resolver
+ except ImportError:
+ apt_install('python-dnspython')
+ import dns.resolver
+
+ if isinstance(address, dns.name.Name):
+ rtype = 'PTR'
+ elif isinstance(address, six.string_types):
+ rtype = 'A'
+ else:
+ return None
+
+ answers = dns.resolver.query(address, rtype)
+ if answers:
+ return str(answers[0])
+ return None
+
+
+def get_host_ip(hostname, fallback=None):
+ """
+ Resolves the IP for a given hostname, or returns
+ the input if it is already an IP.
+ """
+ if is_ip(hostname):
+ return hostname
+
+ ip_addr = ns_query(hostname)
+ if not ip_addr:
+ try:
+ ip_addr = socket.gethostbyname(hostname)
+ except:
+ log("Failed to resolve hostname '%s'" % (hostname),
+ level=WARNING)
+ return fallback
+ return ip_addr
+
+
+def get_hostname(address, fqdn=True):
+ """
+ Resolves hostname for given IP, or returns the input
+ if it is already a hostname.
+ """
+ if is_ip(address):
+ try:
+ import dns.reversename
+ except ImportError:
+ apt_install("python-dnspython")
+ import dns.reversename
+
+ rev = dns.reversename.from_address(address)
+ result = ns_query(rev)
+
+ if not result:
+ try:
+ result = socket.gethostbyaddr(address)[0]
+ except:
+ return None
+ else:
+ result = address
+
+ if fqdn:
+ # strip trailing .
+ if result.endswith('.'):
+ return result[:-1]
+ else:
+ return result
+ else:
+ return result.split('.')[0]
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ovs/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ovs/__init__.py
new file mode 100644
index 0000000..77e2db7
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ovs/__init__.py
@@ -0,0 +1,96 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+''' Helpers for interacting with OpenvSwitch '''
+import subprocess
+import os
+from charmhelpers.core.hookenv import (
+ log, WARNING
+)
+from charmhelpers.core.host import (
+ service
+)
+
+
+def add_bridge(name):
+ ''' Add the named bridge to openvswitch '''
+ log('Creating bridge {}'.format(name))
+ subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name])
+
+
+def del_bridge(name):
+ ''' Delete the named bridge from openvswitch '''
+ log('Deleting bridge {}'.format(name))
+ subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
+
+
+def add_bridge_port(name, port, promisc=False):
+ ''' Add a port to the named openvswitch bridge '''
+ log('Adding port {} to bridge {}'.format(port, name))
+ subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
+ name, port])
+ subprocess.check_call(["ip", "link", "set", port, "up"])
+ if promisc:
+ subprocess.check_call(["ip", "link", "set", port, "promisc", "on"])
+ else:
+ subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
+
+
+def del_bridge_port(name, port):
+ ''' Delete a port from the named openvswitch bridge '''
+ log('Deleting port {} from bridge {}'.format(port, name))
+ subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
+ name, port])
+ subprocess.check_call(["ip", "link", "set", port, "down"])
+ subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
+
+
+def set_manager(manager):
+ ''' Set the controller for the local openvswitch '''
+ log('Setting manager for local ovs to {}'.format(manager))
+ subprocess.check_call(['ovs-vsctl', 'set-manager',
+ 'ssl:{}'.format(manager)])
+
+
+CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem'
+
+
+def get_certificate():
+ ''' Read openvswitch certificate from disk '''
+ if os.path.exists(CERT_PATH):
+ log('Reading ovs certificate from {}'.format(CERT_PATH))
+ with open(CERT_PATH, 'r') as cert:
+ full_cert = cert.read()
+ begin_marker = "-----BEGIN CERTIFICATE-----"
+ end_marker = "-----END CERTIFICATE-----"
+ begin_index = full_cert.find(begin_marker)
+ end_index = full_cert.rfind(end_marker)
+ if end_index == -1 or begin_index == -1:
+ raise RuntimeError("Certificate does not contain valid begin"
+ " and end markers.")
+ full_cert = full_cert[begin_index:(end_index + len(end_marker))]
+ return full_cert
+ else:
+ log('Certificate not found', level=WARNING)
+ return None
+
+
+def full_restart():
+ ''' Full restart and reload of openvswitch '''
+ if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
+ service('start', 'openvswitch-force-reload-kmod')
+ else:
+ service('force-reload-kmod', 'openvswitch-switch')
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ufw.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ufw.py
new file mode 100644
index 0000000..b65d963
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ufw.py
@@ -0,0 +1,318 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+This module contains helpers to add and remove ufw rules.
+
+Examples:
+
+- open SSH port for subnet 10.0.3.0/24:
+
+ >>> from charmhelpers.contrib.network import ufw
+ >>> ufw.enable()
+ >>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp')
+
+- open service by name as defined in /etc/services:
+
+ >>> from charmhelpers.contrib.network import ufw
+ >>> ufw.enable()
+ >>> ufw.service('ssh', 'open')
+
+- close service by port number:
+
+ >>> from charmhelpers.contrib.network import ufw
+ >>> ufw.enable()
+ >>> ufw.service('4949', 'close') # munin
+"""
+import re
+import os
+import subprocess
+
+from charmhelpers.core import hookenv
+from charmhelpers.core.kernel import modprobe, is_module_loaded
+
+__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
+
+
+class UFWError(Exception):
+ pass
+
+
+class UFWIPv6Error(UFWError):
+ pass
+
+
+def is_enabled():
+ """
+ Check if `ufw` is enabled
+
+ :returns: True if ufw is enabled
+ """
+ output = subprocess.check_output(['ufw', 'status'],
+ universal_newlines=True,
+ env={'LANG': 'en_US',
+ 'PATH': os.environ['PATH']})
+
+ m = re.findall(r'^Status: active\n', output, re.M)
+
+ return len(m) >= 1
+
+
+def is_ipv6_ok(soft_fail=False):
+ """
+ Check if IPv6 support is present and ip6tables functional
+
+ :param soft_fail: If set to True and IPv6 support is broken, then reports
+ that the host doesn't have IPv6 support, otherwise a
+ UFWIPv6Error exception is raised.
+ :returns: True if IPv6 is working, False otherwise
+ """
+
+ # do we have IPv6 in the machine?
+ if os.path.isdir('/proc/sys/net/ipv6'):
+ # is ip6tables kernel module loaded?
+ if not is_module_loaded('ip6_tables'):
+ # ip6tables support isn't complete, let's try to load it
+ try:
+ modprobe('ip6_tables')
+ # great, we can load the module
+ return True
+ except subprocess.CalledProcessError as ex:
+ hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,
+ level="WARN")
+ # we are in a world where ip6tables isn't working
+ if soft_fail:
+ # so we inform that the machine doesn't have IPv6
+ return False
+ else:
+ raise UFWIPv6Error("IPv6 firewall support broken")
+ else:
+ # the module is present :)
+ return True
+
+ else:
+ # the system doesn't have IPv6
+ return False
+
+
+def disable_ipv6():
+ """
+ Disable ufw IPv6 support in /etc/default/ufw
+ """
+ exit_code = subprocess.call(['sed', '-i', 's/IPV6=.*/IPV6=no/g',
+ '/etc/default/ufw'])
+ if exit_code == 0:
+ hookenv.log('IPv6 support in ufw disabled', level='INFO')
+ else:
+ hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR")
+ raise UFWError("Couldn't disable IPv6 support in ufw")
+
+
+def enable(soft_fail=False):
+ """
+ Enable ufw
+
+ :param soft_fail: If set to True silently disables IPv6 support in ufw,
+ otherwise a UFWIPv6Error exception is raised when IP6
+ support is broken.
+ :returns: True if ufw is successfully enabled
+ """
+ if is_enabled():
+ return True
+
+ if not is_ipv6_ok(soft_fail):
+ disable_ipv6()
+
+ output = subprocess.check_output(['ufw', 'enable'],
+ universal_newlines=True,
+ env={'LANG': 'en_US',
+ 'PATH': os.environ['PATH']})
+
+ m = re.findall('^Firewall is active and enabled on system startup\n',
+ output, re.M)
+ hookenv.log(output, level='DEBUG')
+
+ if len(m) == 0:
+ hookenv.log("ufw couldn't be enabled", level='WARN')
+ return False
+ else:
+ hookenv.log("ufw enabled", level='INFO')
+ return True
+
+
+def disable():
+ """
+ Disable ufw
+
+ :returns: True if ufw is successfully disabled
+ """
+ if not is_enabled():
+ return True
+
+ output = subprocess.check_output(['ufw', 'disable'],
+ universal_newlines=True,
+ env={'LANG': 'en_US',
+ 'PATH': os.environ['PATH']})
+
+ m = re.findall(r'^Firewall stopped and disabled on system startup\n',
+ output, re.M)
+ hookenv.log(output, level='DEBUG')
+
+ if len(m) == 0:
+ hookenv.log("ufw couldn't be disabled", level='WARN')
+ return False
+ else:
+ hookenv.log("ufw disabled", level='INFO')
+ return True
+
+
+def default_policy(policy='deny', direction='incoming'):
+ """
+ Changes the default policy for traffic `direction`
+
+ :param policy: allow, deny or reject
+ :param direction: traffic direction, possible values: incoming, outgoing,
+ routed
+ """
+ if policy not in ['allow', 'deny', 'reject']:
+ raise UFWError(('Unknown policy %s, valid values: '
+ 'allow, deny, reject') % policy)
+
+ if direction not in ['incoming', 'outgoing', 'routed']:
+ raise UFWError(('Unknown direction %s, valid values: '
+ 'incoming, outgoing, routed') % direction)
+
+ output = subprocess.check_output(['ufw', 'default', policy, direction],
+ universal_newlines=True,
+ env={'LANG': 'en_US',
+ 'PATH': os.environ['PATH']})
+ hookenv.log(output, level='DEBUG')
+
+ m = re.findall("^Default %s policy changed to '%s'\n" % (direction,
+ policy),
+ output, re.M)
+ if len(m) == 0:
+ hookenv.log("ufw couldn't change the default policy to %s for %s"
+ % (policy, direction), level='WARN')
+ return False
+ else:
+ hookenv.log("ufw default policy for %s changed to %s"
+ % (direction, policy), level='INFO')
+ return True
+
+
+def modify_access(src, dst='any', port=None, proto=None, action='allow',
+ index=None):
+ """
+ Grant access to an address or subnet
+
+ :param src: address (e.g. 192.168.1.234) or subnet
+ (e.g. 192.168.1.0/24).
+ :param dst: destiny of the connection, if the machine has multiple IPs and
+ connections to only one of those have to accepted this is the
+ field has to be set.
+ :param port: destiny port
+ :param proto: protocol (tcp or udp)
+ :param action: `allow` or `delete`
+ :param index: if different from None the rule is inserted at the given
+ `index`.
+ """
+ if not is_enabled():
+ hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
+ return
+
+ if action == 'delete':
+ cmd = ['ufw', 'delete', 'allow']
+ elif index is not None:
+ cmd = ['ufw', 'insert', str(index), action]
+ else:
+ cmd = ['ufw', action]
+
+ if src is not None:
+ cmd += ['from', src]
+
+ if dst is not None:
+ cmd += ['to', dst]
+
+ if port is not None:
+ cmd += ['port', str(port)]
+
+ if proto is not None:
+ cmd += ['proto', proto]
+
+ hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG')
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ (stdout, stderr) = p.communicate()
+
+ hookenv.log(stdout, level='INFO')
+
+ if p.returncode != 0:
+ hookenv.log(stderr, level='ERROR')
+ hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd),
+ p.returncode),
+ level='ERROR')
+
+
+def grant_access(src, dst='any', port=None, proto=None, index=None):
+ """
+ Grant access to an address or subnet
+
+ :param src: address (e.g. 192.168.1.234) or subnet
+ (e.g. 192.168.1.0/24).
+ :param dst: destiny of the connection, if the machine has multiple IPs and
+ connections to only one of those have to accepted this is the
+ field has to be set.
+ :param port: destiny port
+ :param proto: protocol (tcp or udp)
+ :param index: if different from None the rule is inserted at the given
+ `index`.
+ """
+ return modify_access(src, dst=dst, port=port, proto=proto, action='allow',
+ index=index)
+
+
+def revoke_access(src, dst='any', port=None, proto=None):
+ """
+ Revoke access to an address or subnet
+
+ :param src: address (e.g. 192.168.1.234) or subnet
+ (e.g. 192.168.1.0/24).
+ :param dst: destiny of the connection, if the machine has multiple IPs and
+ connections to only one of those have to accepted this is the
+ field has to be set.
+ :param port: destiny port
+ :param proto: protocol (tcp or udp)
+ """
+ return modify_access(src, dst=dst, port=port, proto=proto, action='delete')
+
+
+def service(name, action):
+ """
+ Open/close access to a service
+
+ :param name: could be a service name defined in `/etc/services` or a port
+ number.
+ :param action: `open` or `close`
+ """
+ if action == 'open':
+ subprocess.check_output(['ufw', 'allow', str(name)],
+ universal_newlines=True)
+ elif action == 'close':
+ subprocess.check_output(['ufw', 'delete', 'allow', str(name)],
+ universal_newlines=True)
+ else:
+ raise UFWError(("'{}' not supported, use 'allow' "
+ "or 'delete'").format(action))
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/alternatives.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/alternatives.py
new file mode 100644
index 0000000..ef77caf
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/alternatives.py
@@ -0,0 +1,33 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+''' Helper for managing alternatives for file conflict resolution '''
+
+import subprocess
+import shutil
+import os
+
+
+def install_alternative(name, target, source, priority=50):
+ ''' Install alternative configuration '''
+ if (os.path.exists(target) and not os.path.islink(target)):
+ # Move existing file/directory away before installing
+ shutil.move(target, '{}.bak'.format(target))
+ cmd = [
+ 'update-alternatives', '--force', '--install',
+ target, name, source, str(priority)
+ ]
+ subprocess.check_call(cmd)
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
new file mode 100644
index 0000000..722bc64
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -0,0 +1,197 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import six
+from collections import OrderedDict
+from charmhelpers.contrib.amulet.deployment import (
+ AmuletDeployment
+)
+
+
+class OpenStackAmuletDeployment(AmuletDeployment):
+ """OpenStack amulet deployment.
+
+ This class inherits from AmuletDeployment and has additional support
+ that is specifically for use by OpenStack charms.
+ """
+
+ def __init__(self, series=None, openstack=None, source=None, stable=True):
+ """Initialize the deployment environment."""
+ super(OpenStackAmuletDeployment, self).__init__(series)
+ self.openstack = openstack
+ self.source = source
+ self.stable = stable
+ # Note(coreycb): this needs to be changed when new next branches come
+ # out.
+ self.current_next = "trusty"
+
+ def _determine_branch_locations(self, other_services):
+ """Determine the branch locations for the other services.
+
+ Determine if the local branch being tested is derived from its
+ stable or next (dev) branch, and based on this, use the corresonding
+ stable or next branches for the other_services."""
+
+ # Charms outside the lp:~openstack-charmers namespace
+ base_charms = ['mysql', 'mongodb', 'nrpe']
+
+ # Force these charms to current series even when using an older series.
+ # ie. Use trusty/nrpe even when series is precise, as the P charm
+ # does not possess the necessary external master config and hooks.
+ force_series_current = ['nrpe']
+
+ if self.series in ['precise', 'trusty']:
+ base_series = self.series
+ else:
+ base_series = self.current_next
+
+ for svc in other_services:
+ if svc['name'] in force_series_current:
+ base_series = self.current_next
+ # If a location has been explicitly set, use it
+ if svc.get('location'):
+ continue
+ if self.stable:
+ temp = 'lp:charms/{}/{}'
+ svc['location'] = temp.format(base_series,
+ svc['name'])
+ else:
+ if svc['name'] in base_charms:
+ temp = 'lp:charms/{}/{}'
+ svc['location'] = temp.format(base_series,
+ svc['name'])
+ else:
+ temp = 'lp:~openstack-charmers/charms/{}/{}/next'
+ svc['location'] = temp.format(self.current_next,
+ svc['name'])
+
+ return other_services
+
+ def _add_services(self, this_service, other_services):
+ """Add services to the deployment and set openstack-origin/source."""
+ other_services = self._determine_branch_locations(other_services)
+
+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
+ other_services)
+
+ services = other_services
+ services.append(this_service)
+
+ # Charms which should use the source config option
+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
+ 'ceph-osd', 'ceph-radosgw']
+
+ # Charms which can not use openstack-origin, ie. many subordinates
+ no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
+
+ if self.openstack:
+ for svc in services:
+ if svc['name'] not in use_source + no_origin:
+ config = {'openstack-origin': self.openstack}
+ self.d.configure(svc['name'], config)
+
+ if self.source:
+ for svc in services:
+ if svc['name'] in use_source and svc['name'] not in no_origin:
+ config = {'source': self.source}
+ self.d.configure(svc['name'], config)
+
+ def _configure_services(self, configs):
+ """Configure all of the services."""
+ for service, config in six.iteritems(configs):
+ self.d.configure(service, config)
+
+ def _get_openstack_release(self):
+ """Get openstack release.
+
+ Return an integer representing the enum value of the openstack
+ release.
+ """
+ # Must be ordered by OpenStack release (not by Ubuntu release):
+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
+ self.precise_havana, self.precise_icehouse,
+ self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
+ self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
+ self.wily_liberty) = range(12)
+
+ releases = {
+ ('precise', None): self.precise_essex,
+ ('precise', 'cloud:precise-folsom'): self.precise_folsom,
+ ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
+ ('precise', 'cloud:precise-havana'): self.precise_havana,
+ ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
+ ('trusty', None): self.trusty_icehouse,
+ ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
+ ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
+ ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
+ ('utopic', None): self.utopic_juno,
+ ('vivid', None): self.vivid_kilo,
+ ('wily', None): self.wily_liberty}
+ return releases[(self.series, self.openstack)]
+
+ def _get_openstack_release_string(self):
+ """Get openstack release string.
+
+ Return a string representing the openstack release.
+ """
+ releases = OrderedDict([
+ ('precise', 'essex'),
+ ('quantal', 'folsom'),
+ ('raring', 'grizzly'),
+ ('saucy', 'havana'),
+ ('trusty', 'icehouse'),
+ ('utopic', 'juno'),
+ ('vivid', 'kilo'),
+ ('wily', 'liberty'),
+ ])
+ if self.openstack:
+ os_origin = self.openstack.split(':')[1]
+ return os_origin.split('%s-' % self.series)[1].split('/')[0]
+ else:
+ return releases[self.series]
+
+ def get_ceph_expected_pools(self, radosgw=False):
+ """Return a list of expected ceph pools in a ceph + cinder + glance
+ test scenario, based on OpenStack release and whether ceph radosgw
+ is flagged as present or not."""
+
+ if self._get_openstack_release() >= self.trusty_kilo:
+ # Kilo or later
+ pools = [
+ 'rbd',
+ 'cinder',
+ 'glance'
+ ]
+ else:
+ # Juno or earlier
+ pools = [
+ 'data',
+ 'metadata',
+ 'rbd',
+ 'cinder',
+ 'glance'
+ ]
+
+ if radosgw:
+ pools.extend([
+ '.rgw.root',
+ '.rgw.control',
+ '.rgw',
+ '.rgw.gc',
+ '.users.uid'
+ ])
+
+ return pools
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/utils.py
new file mode 100644
index 0000000..b139741
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/utils.py
@@ -0,0 +1,963 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import amulet
+import json
+import logging
+import os
+import six
+import time
+import urllib
+
+import cinderclient.v1.client as cinder_client
+import glanceclient.v1.client as glance_client
+import heatclient.v1.client as heat_client
+import keystoneclient.v2_0 as keystone_client
+import novaclient.v1_1.client as nova_client
+import pika
+import swiftclient
+
+from charmhelpers.contrib.amulet.utils import (
+ AmuletUtils
+)
+
+DEBUG = logging.DEBUG
+ERROR = logging.ERROR
+
+
+class OpenStackAmuletUtils(AmuletUtils):
+ """OpenStack amulet utilities.
+
+ This class inherits from AmuletUtils and has additional support
+ that is specifically for use by OpenStack charm tests.
+ """
+
+ def __init__(self, log_level=ERROR):
+ """Initialize the deployment environment."""
+ super(OpenStackAmuletUtils, self).__init__(log_level)
+
+ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
+ public_port, expected):
+ """Validate endpoint data.
+
+ Validate actual endpoint data vs expected endpoint data. The ports
+ are used to find the matching endpoint.
+ """
+ self.log.debug('Validating endpoint data...')
+ self.log.debug('actual: {}'.format(repr(endpoints)))
+ found = False
+ for ep in endpoints:
+ self.log.debug('endpoint: {}'.format(repr(ep)))
+ if (admin_port in ep.adminurl and
+ internal_port in ep.internalurl and
+ public_port in ep.publicurl):
+ found = True
+ actual = {'id': ep.id,
+ 'region': ep.region,
+ 'adminurl': ep.adminurl,
+ 'internalurl': ep.internalurl,
+ 'publicurl': ep.publicurl,
+ 'service_id': ep.service_id}
+ ret = self._validate_dict_data(expected, actual)
+ if ret:
+ return 'unexpected endpoint data - {}'.format(ret)
+
+ if not found:
+ return 'endpoint not found'
+
+ def validate_svc_catalog_endpoint_data(self, expected, actual):
+ """Validate service catalog endpoint data.
+
+ Validate a list of actual service catalog endpoints vs a list of
+ expected service catalog endpoints.
+ """
+ self.log.debug('Validating service catalog endpoint data...')
+ self.log.debug('actual: {}'.format(repr(actual)))
+ for k, v in six.iteritems(expected):
+ if k in actual:
+ ret = self._validate_dict_data(expected[k][0], actual[k][0])
+ if ret:
+ return self.endpoint_error(k, ret)
+ else:
+ return "endpoint {} does not exist".format(k)
+ return ret
+
+ def validate_tenant_data(self, expected, actual):
+ """Validate tenant data.
+
+ Validate a list of actual tenant data vs list of expected tenant
+ data.
+ """
+ self.log.debug('Validating tenant data...')
+ self.log.debug('actual: {}'.format(repr(actual)))
+ for e in expected:
+ found = False
+ for act in actual:
+ a = {'enabled': act.enabled, 'description': act.description,
+ 'name': act.name, 'id': act.id}
+ if e['name'] == a['name']:
+ found = True
+ ret = self._validate_dict_data(e, a)
+ if ret:
+ return "unexpected tenant data - {}".format(ret)
+ if not found:
+ return "tenant {} does not exist".format(e['name'])
+ return ret
+
+ def validate_role_data(self, expected, actual):
+ """Validate role data.
+
+ Validate a list of actual role data vs a list of expected role
+ data.
+ """
+ self.log.debug('Validating role data...')
+ self.log.debug('actual: {}'.format(repr(actual)))
+ for e in expected:
+ found = False
+ for act in actual:
+ a = {'name': act.name, 'id': act.id}
+ if e['name'] == a['name']:
+ found = True
+ ret = self._validate_dict_data(e, a)
+ if ret:
+ return "unexpected role data - {}".format(ret)
+ if not found:
+ return "role {} does not exist".format(e['name'])
+ return ret
+
+ def validate_user_data(self, expected, actual):
+ """Validate user data.
+
+ Validate a list of actual user data vs a list of expected user
+ data.
+ """
+ self.log.debug('Validating user data...')
+ self.log.debug('actual: {}'.format(repr(actual)))
+ for e in expected:
+ found = False
+ for act in actual:
+ a = {'enabled': act.enabled, 'name': act.name,
+ 'email': act.email, 'tenantId': act.tenantId,
+ 'id': act.id}
+ if e['name'] == a['name']:
+ found = True
+ ret = self._validate_dict_data(e, a)
+ if ret:
+ return "unexpected user data - {}".format(ret)
+ if not found:
+ return "user {} does not exist".format(e['name'])
+ return ret
+
+ def validate_flavor_data(self, expected, actual):
+ """Validate flavor data.
+
+ Validate a list of actual flavors vs a list of expected flavors.
+ """
+ self.log.debug('Validating flavor data...')
+ self.log.debug('actual: {}'.format(repr(actual)))
+ act = [a.name for a in actual]
+ return self._validate_list_data(expected, act)
+
+ def tenant_exists(self, keystone, tenant):
+ """Return True if tenant exists."""
+ self.log.debug('Checking if tenant exists ({})...'.format(tenant))
+ return tenant in [t.name for t in keystone.tenants.list()]
+
+ def authenticate_cinder_admin(self, keystone_sentry, username,
+ password, tenant):
+ """Authenticates admin user with cinder."""
+ # NOTE(beisner): cinder python client doesn't accept tokens.
+ service_ip = \
+ keystone_sentry.relation('shared-db',
+ 'mysql:shared-db')['private-address']
+ ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
+ return cinder_client.Client(username, password, tenant, ept)
+
+ def authenticate_keystone_admin(self, keystone_sentry, user, password,
+ tenant):
+ """Authenticates admin user with the keystone admin endpoint."""
+ self.log.debug('Authenticating keystone admin...')
+ unit = keystone_sentry
+ service_ip = unit.relation('shared-db',
+ 'mysql:shared-db')['private-address']
+ ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
+ return keystone_client.Client(username=user, password=password,
+ tenant_name=tenant, auth_url=ep)
+
+ def authenticate_keystone_user(self, keystone, user, password, tenant):
+ """Authenticates a regular user with the keystone public endpoint."""
+ self.log.debug('Authenticating keystone user ({})...'.format(user))
+ ep = keystone.service_catalog.url_for(service_type='identity',
+ endpoint_type='publicURL')
+ return keystone_client.Client(username=user, password=password,
+ tenant_name=tenant, auth_url=ep)
+
+ def authenticate_glance_admin(self, keystone):
+ """Authenticates admin user with glance."""
+ self.log.debug('Authenticating glance admin...')
+ ep = keystone.service_catalog.url_for(service_type='image',
+ endpoint_type='adminURL')
+ return glance_client.Client(ep, token=keystone.auth_token)
+
+ def authenticate_heat_admin(self, keystone):
+ """Authenticates the admin user with heat."""
+ self.log.debug('Authenticating heat admin...')
+ ep = keystone.service_catalog.url_for(service_type='orchestration',
+ endpoint_type='publicURL')
+ return heat_client.Client(endpoint=ep, token=keystone.auth_token)
+
+ def authenticate_nova_user(self, keystone, user, password, tenant):
+ """Authenticates a regular user with nova-api."""
+ self.log.debug('Authenticating nova user ({})...'.format(user))
+ ep = keystone.service_catalog.url_for(service_type='identity',
+ endpoint_type='publicURL')
+ return nova_client.Client(username=user, api_key=password,
+ project_id=tenant, auth_url=ep)
+
+ def authenticate_swift_user(self, keystone, user, password, tenant):
+ """Authenticates a regular user with swift api."""
+ self.log.debug('Authenticating swift user ({})...'.format(user))
+ ep = keystone.service_catalog.url_for(service_type='identity',
+ endpoint_type='publicURL')
+ return swiftclient.Connection(authurl=ep,
+ user=user,
+ key=password,
+ tenant_name=tenant,
+ auth_version='2.0')
+
+ def create_cirros_image(self, glance, image_name):
+ """Download the latest cirros image and upload it to glance,
+ validate and return a resource pointer.
+
+ :param glance: pointer to authenticated glance connection
+ :param image_name: display name for new image
+ :returns: glance image pointer
+ """
+ self.log.debug('Creating glance cirros image '
+ '({})...'.format(image_name))
+
+ # Download cirros image
+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
+ if http_proxy:
+ proxies = {'http': http_proxy}
+ opener = urllib.FancyURLopener(proxies)
+ else:
+ opener = urllib.FancyURLopener()
+
+ f = opener.open('http://download.cirros-cloud.net/version/released')
+ version = f.read().strip()
+ cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
+ local_path = os.path.join('tests', cirros_img)
+
+ if not os.path.exists(local_path):
+ cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
+ version, cirros_img)
+ opener.retrieve(cirros_url, local_path)
+ f.close()
+
+ # Create glance image
+ with open(local_path) as f:
+ image = glance.images.create(name=image_name, is_public=True,
+ disk_format='qcow2',
+ container_format='bare', data=f)
+
+ # Wait for image to reach active status
+ img_id = image.id
+ ret = self.resource_reaches_status(glance.images, img_id,
+ expected_stat='active',
+ msg='Image status wait')
+ if not ret:
+ msg = 'Glance image failed to reach expected state.'
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ # Re-validate new image
+ self.log.debug('Validating image attributes...')
+ val_img_name = glance.images.get(img_id).name
+ val_img_stat = glance.images.get(img_id).status
+ val_img_pub = glance.images.get(img_id).is_public
+ val_img_cfmt = glance.images.get(img_id).container_format
+ val_img_dfmt = glance.images.get(img_id).disk_format
+ msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
+ 'container fmt:{} disk fmt:{}'.format(
+ val_img_name, val_img_pub, img_id,
+ val_img_stat, val_img_cfmt, val_img_dfmt))
+
+ if val_img_name == image_name and val_img_stat == 'active' \
+ and val_img_pub is True and val_img_cfmt == 'bare' \
+ and val_img_dfmt == 'qcow2':
+ self.log.debug(msg_attr)
+ else:
+ msg = ('Volume validation failed, {}'.format(msg_attr))
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ return image
+
+ def delete_image(self, glance, image):
+ """Delete the specified image."""
+
+ # /!\ DEPRECATION WARNING
+ self.log.warn('/!\\ DEPRECATION WARNING: use '
+ 'delete_resource instead of delete_image.')
+ self.log.debug('Deleting glance image ({})...'.format(image))
+ return self.delete_resource(glance.images, image, msg='glance image')
+
+ def create_instance(self, nova, image_name, instance_name, flavor):
+ """Create the specified instance."""
+ self.log.debug('Creating instance '
+ '({}|{}|{})'.format(instance_name, image_name, flavor))
+ image = nova.images.find(name=image_name)
+ flavor = nova.flavors.find(name=flavor)
+ instance = nova.servers.create(name=instance_name, image=image,
+ flavor=flavor)
+
+ count = 1
+ status = instance.status
+ while status != 'ACTIVE' and count < 60:
+ time.sleep(3)
+ instance = nova.servers.get(instance.id)
+ status = instance.status
+ self.log.debug('instance status: {}'.format(status))
+ count += 1
+
+ if status != 'ACTIVE':
+ self.log.error('instance creation timed out')
+ return None
+
+ return instance
+
+ def delete_instance(self, nova, instance):
+ """Delete the specified instance."""
+
+ # /!\ DEPRECATION WARNING
+ self.log.warn('/!\\ DEPRECATION WARNING: use '
+ 'delete_resource instead of delete_instance.')
+ self.log.debug('Deleting instance ({})...'.format(instance))
+ return self.delete_resource(nova.servers, instance,
+ msg='nova instance')
+
+ def create_or_get_keypair(self, nova, keypair_name="testkey"):
+ """Create a new keypair, or return pointer if it already exists."""
+ try:
+ _keypair = nova.keypairs.get(keypair_name)
+ self.log.debug('Keypair ({}) already exists, '
+ 'using it.'.format(keypair_name))
+ return _keypair
+ except:
+ self.log.debug('Keypair ({}) does not exist, '
+ 'creating it.'.format(keypair_name))
+
+ _keypair = nova.keypairs.create(name=keypair_name)
+ return _keypair
+
+ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
+ img_id=None, src_vol_id=None, snap_id=None):
+ """Create cinder volume, optionally from a glance image, OR
+ optionally as a clone of an existing volume, OR optionally
+ from a snapshot. Wait for the new volume status to reach
+ the expected status, validate and return a resource pointer.
+
+ :param vol_name: cinder volume display name
+ :param vol_size: size in gigabytes
+ :param img_id: optional glance image id
+ :param src_vol_id: optional source volume id to clone
+ :param snap_id: optional snapshot id to use
+ :returns: cinder volume pointer
+ """
+ # Handle parameter input and avoid impossible combinations
+ if img_id and not src_vol_id and not snap_id:
+ # Create volume from image
+ self.log.debug('Creating cinder volume from glance image...')
+ bootable = 'true'
+ elif src_vol_id and not img_id and not snap_id:
+ # Clone an existing volume
+ self.log.debug('Cloning cinder volume...')
+ bootable = cinder.volumes.get(src_vol_id).bootable
+ elif snap_id and not src_vol_id and not img_id:
+ # Create volume from snapshot
+ self.log.debug('Creating cinder volume from snapshot...')
+ snap = cinder.volume_snapshots.find(id=snap_id)
+ vol_size = snap.size
+ snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
+ bootable = cinder.volumes.get(snap_vol_id).bootable
+ elif not img_id and not src_vol_id and not snap_id:
+ # Create volume
+ self.log.debug('Creating cinder volume...')
+ bootable = 'false'
+ else:
+ # Impossible combination of parameters
+ msg = ('Invalid method use - name:{} size:{} img_id:{} '
+ 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
+ img_id, src_vol_id,
+ snap_id))
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ # Create new volume
+ try:
+ vol_new = cinder.volumes.create(display_name=vol_name,
+ imageRef=img_id,
+ size=vol_size,
+ source_volid=src_vol_id,
+ snapshot_id=snap_id)
+ vol_id = vol_new.id
+ except Exception as e:
+ msg = 'Failed to create volume: {}'.format(e)
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ # Wait for volume to reach available status
+ ret = self.resource_reaches_status(cinder.volumes, vol_id,
+ expected_stat="available",
+ msg="Volume status wait")
+ if not ret:
+ msg = 'Cinder volume failed to reach expected state.'
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ # Re-validate new volume
+ self.log.debug('Validating volume attributes...')
+ val_vol_name = cinder.volumes.get(vol_id).display_name
+ val_vol_boot = cinder.volumes.get(vol_id).bootable
+ val_vol_stat = cinder.volumes.get(vol_id).status
+ val_vol_size = cinder.volumes.get(vol_id).size
+ msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
+ '{} size:{}'.format(val_vol_name, vol_id,
+ val_vol_stat, val_vol_boot,
+ val_vol_size))
+
+ if val_vol_boot == bootable and val_vol_stat == 'available' \
+ and val_vol_name == vol_name and val_vol_size == vol_size:
+ self.log.debug(msg_attr)
+ else:
+ msg = ('Volume validation failed, {}'.format(msg_attr))
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ return vol_new
+
+ def delete_resource(self, resource, resource_id,
+ msg="resource", max_wait=120):
+ """Delete one openstack resource, such as one instance, keypair,
+ image, volume, stack, etc., and confirm deletion within max wait time.
+
+ :param resource: pointer to os resource type, ex:glance_client.images
+ :param resource_id: unique name or id for the openstack resource
+ :param msg: text to identify purpose in logging
+ :param max_wait: maximum wait time in seconds
+ :returns: True if successful, otherwise False
+ """
+ self.log.debug('Deleting OpenStack resource '
+ '{} ({})'.format(resource_id, msg))
+ num_before = len(list(resource.list()))
+ resource.delete(resource_id)
+
+ tries = 0
+ num_after = len(list(resource.list()))
+ while num_after != (num_before - 1) and tries < (max_wait / 4):
+ self.log.debug('{} delete check: '
+ '{} [{}:{}] {}'.format(msg, tries,
+ num_before,
+ num_after,
+ resource_id))
+ time.sleep(4)
+ num_after = len(list(resource.list()))
+ tries += 1
+
+ self.log.debug('{}: expected, actual count = {}, '
+ '{}'.format(msg, num_before - 1, num_after))
+
+ if num_after == (num_before - 1):
+ return True
+ else:
+ self.log.error('{} delete timed out'.format(msg))
+ return False
+
+ def resource_reaches_status(self, resource, resource_id,
+ expected_stat='available',
+ msg='resource', max_wait=120):
+ """Wait for an openstack resources status to reach an
+ expected status within a specified time. Useful to confirm that
+ nova instances, cinder vols, snapshots, glance images, heat stacks
+ and other resources eventually reach the expected status.
+
+ :param resource: pointer to os resource type, ex: heat_client.stacks
+ :param resource_id: unique id for the openstack resource
+ :param expected_stat: status to expect resource to reach
+ :param msg: text to identify purpose in logging
+ :param max_wait: maximum wait time in seconds
+ :returns: True if successful, False if status is not reached
+ """
+
+ tries = 0
+ resource_stat = resource.get(resource_id).status
+ while resource_stat != expected_stat and tries < (max_wait / 4):
+ self.log.debug('{} status check: '
+ '{} [{}:{}] {}'.format(msg, tries,
+ resource_stat,
+ expected_stat,
+ resource_id))
+ time.sleep(4)
+ resource_stat = resource.get(resource_id).status
+ tries += 1
+
+ self.log.debug('{}: expected, actual status = {}, '
+ '{}'.format(msg, resource_stat, expected_stat))
+
+ if resource_stat == expected_stat:
+ return True
+ else:
+ self.log.debug('{} never reached expected status: '
+ '{}'.format(resource_id, expected_stat))
+ return False
+
+ def get_ceph_osd_id_cmd(self, index):
+ """Produce a shell command that will return a ceph-osd id."""
+ return ("`initctl list | grep 'ceph-osd ' | "
+ "awk 'NR=={} {{ print $2 }}' | "
+ "grep -o '[0-9]*'`".format(index + 1))
+
+ def get_ceph_pools(self, sentry_unit):
+ """Return a dict of ceph pools from a single ceph unit, with
+ pool name as keys, pool id as vals."""
+ pools = {}
+ cmd = 'sudo ceph osd lspools'
+ output, code = sentry_unit.run(cmd)
+ if code != 0:
+ msg = ('{} `{}` returned {} '
+ '{}'.format(sentry_unit.info['unit_name'],
+ cmd, code, output))
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
+ for pool in str(output).split(','):
+ pool_id_name = pool.split(' ')
+ if len(pool_id_name) == 2:
+ pool_id = pool_id_name[0]
+ pool_name = pool_id_name[1]
+ pools[pool_name] = int(pool_id)
+
+ self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
+ pools))
+ return pools
+
+ def get_ceph_df(self, sentry_unit):
+ """Return dict of ceph df json output, including ceph pool state.
+
+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
+ :returns: Dict of ceph df output
+ """
+ cmd = 'sudo ceph df --format=json'
+ output, code = sentry_unit.run(cmd)
+ if code != 0:
+ msg = ('{} `{}` returned {} '
+ '{}'.format(sentry_unit.info['unit_name'],
+ cmd, code, output))
+ amulet.raise_status(amulet.FAIL, msg=msg)
+ return json.loads(output)
+
+ def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
+ """Take a sample of attributes of a ceph pool, returning ceph
+ pool name, object count and disk space used for the specified
+ pool ID number.
+
+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
+ :param pool_id: Ceph pool ID
+ :returns: List of pool name, object count, kb disk space used
+ """
+ df = self.get_ceph_df(sentry_unit)
+ pool_name = df['pools'][pool_id]['name']
+ obj_count = df['pools'][pool_id]['stats']['objects']
+ kb_used = df['pools'][pool_id]['stats']['kb_used']
+ self.log.debug('Ceph {} pool (ID {}): {} objects, '
+ '{} kb used'.format(pool_name, pool_id,
+ obj_count, kb_used))
+ return pool_name, obj_count, kb_used
+
+ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
+ """Validate ceph pool samples taken over time, such as pool
+ object counts or pool kb used, before adding, after adding, and
+ after deleting items which affect those pool attributes. The
+ 2nd element is expected to be greater than the 1st; 3rd is expected
+ to be less than the 2nd.
+
+ :param samples: List containing 3 data samples
+ :param sample_type: String for logging and usage context
+ :returns: None if successful, Failure message otherwise
+ """
+ original, created, deleted = range(3)
+ if samples[created] <= samples[original] or \
+ samples[deleted] >= samples[created]:
+ return ('Ceph {} samples ({}) '
+ 'unexpected.'.format(sample_type, samples))
+ else:
+ self.log.debug('Ceph {} samples (OK): '
+ '{}'.format(sample_type, samples))
+ return None
+
+# rabbitmq/amqp specific helpers:
+ def add_rmq_test_user(self, sentry_units,
+ username="testuser1", password="changeme"):
+ """Add a test user via the first rmq juju unit, check connection as
+ the new user against all sentry units.
+
+ :param sentry_units: list of sentry unit pointers
+ :param username: amqp user name, default to testuser1
+ :param password: amqp user password
+ :returns: None if successful. Raise on error.
+ """
+ self.log.debug('Adding rmq user ({})...'.format(username))
+
+ # Check that user does not already exist
+ cmd_user_list = 'rabbitmqctl list_users'
+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
+ if username in output:
+ self.log.warning('User ({}) already exists, returning '
+ 'gracefully.'.format(username))
+ return
+
+ perms = '".*" ".*" ".*"'
+ cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
+ 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
+
+ # Add user via first unit
+ for cmd in cmds:
+ output, _ = self.run_cmd_unit(sentry_units[0], cmd)
+
+ # Check connection against the other sentry_units
+ self.log.debug('Checking user connect against units...')
+ for sentry_unit in sentry_units:
+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
+ username=username,
+ password=password)
+ connection.close()
+
+ def delete_rmq_test_user(self, sentry_units, username="testuser1"):
+ """Delete a rabbitmq user via the first rmq juju unit.
+
+ :param sentry_units: list of sentry unit pointers
+ :param username: amqp user name, default to testuser1
+ :param password: amqp user password
+ :returns: None if successful or no such user.
+ """
+ self.log.debug('Deleting rmq user ({})...'.format(username))
+
+ # Check that the user exists
+ cmd_user_list = 'rabbitmqctl list_users'
+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
+
+ if username not in output:
+ self.log.warning('User ({}) does not exist, returning '
+ 'gracefully.'.format(username))
+ return
+
+ # Delete the user
+ cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
+
+ def get_rmq_cluster_status(self, sentry_unit):
+ """Execute rabbitmq cluster status command on a unit and return
+ the full output.
+
+ :param unit: sentry unit
+ :returns: String containing console output of cluster status command
+ """
+ cmd = 'rabbitmqctl cluster_status'
+ output, _ = self.run_cmd_unit(sentry_unit, cmd)
+ self.log.debug('{} cluster_status:\n{}'.format(
+ sentry_unit.info['unit_name'], output))
+ return str(output)
+
+ def get_rmq_cluster_running_nodes(self, sentry_unit):
+ """Parse rabbitmqctl cluster_status output string, return list of
+ running rabbitmq cluster nodes.
+
+ :param unit: sentry unit
+ :returns: List containing node names of running nodes
+ """
+ # NOTE(beisner): rabbitmqctl cluster_status output is not
+ # json-parsable, do string chop foo, then json.loads that.
+ str_stat = self.get_rmq_cluster_status(sentry_unit)
+ if 'running_nodes' in str_stat:
+ pos_start = str_stat.find("{running_nodes,") + 15
+ pos_end = str_stat.find("]},", pos_start) + 1
+ str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
+ run_nodes = json.loads(str_run_nodes)
+ return run_nodes
+ else:
+ return []
+
+ def validate_rmq_cluster_running_nodes(self, sentry_units):
+ """Check that all rmq unit hostnames are represented in the
+ cluster_status output of all units.
+
+ :param host_names: dict of juju unit names to host names
+ :param units: list of sentry unit pointers (all rmq units)
+ :returns: None if successful, otherwise return error message
+ """
+ host_names = self.get_unit_hostnames(sentry_units)
+ errors = []
+
+ # Query every unit for cluster_status running nodes
+ for query_unit in sentry_units:
+ query_unit_name = query_unit.info['unit_name']
+ running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
+
+ # Confirm that every unit is represented in the queried unit's
+ # cluster_status running nodes output.
+ for validate_unit in sentry_units:
+ val_host_name = host_names[validate_unit.info['unit_name']]
+ val_node_name = 'rabbit@{}'.format(val_host_name)
+
+ if val_node_name not in running_nodes:
+ errors.append('Cluster member check failed on {}: {} not '
+ 'in {}\n'.format(query_unit_name,
+ val_node_name,
+ running_nodes))
+ if errors:
+ return ''.join(errors)
+
+ def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
+ """Check a single juju rmq unit for ssl and port in the config file."""
+ host = sentry_unit.info['public-address']
+ unit_name = sentry_unit.info['unit_name']
+
+ conf_file = '/etc/rabbitmq/rabbitmq.config'
+ conf_contents = str(self.file_contents_safe(sentry_unit,
+ conf_file, max_wait=16))
+ # Checks
+ conf_ssl = 'ssl' in conf_contents
+ conf_port = str(port) in conf_contents
+
+ # Port explicitly checked in config
+ if port and conf_port and conf_ssl:
+ self.log.debug('SSL is enabled @{}:{} '
+ '({})'.format(host, port, unit_name))
+ return True
+ elif port and not conf_port and conf_ssl:
+ self.log.debug('SSL is enabled @{} but not on port {} '
+ '({})'.format(host, port, unit_name))
+ return False
+ # Port not checked (useful when checking that ssl is disabled)
+ elif not port and conf_ssl:
+ self.log.debug('SSL is enabled @{}:{} '
+ '({})'.format(host, port, unit_name))
+ return True
+ elif not port and not conf_ssl:
+ self.log.debug('SSL not enabled @{}:{} '
+ '({})'.format(host, port, unit_name))
+ return False
+ else:
+ msg = ('Unknown condition when checking SSL status @{}:{} '
+ '({})'.format(host, port, unit_name))
+ amulet.raise_status(amulet.FAIL, msg)
+
+ def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
+ """Check that ssl is enabled on rmq juju sentry units.
+
+ :param sentry_units: list of all rmq sentry units
+ :param port: optional ssl port override to validate
+ :returns: None if successful, otherwise return error message
+ """
+ for sentry_unit in sentry_units:
+ if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
+ return ('Unexpected condition: ssl is disabled on unit '
+ '({})'.format(sentry_unit.info['unit_name']))
+ return None
+
+ def validate_rmq_ssl_disabled_units(self, sentry_units):
+ """Check that ssl is enabled on listed rmq juju sentry units.
+
+ :param sentry_units: list of all rmq sentry units
+ :returns: True if successful. Raise on error.
+ """
+ for sentry_unit in sentry_units:
+ if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
+ return ('Unexpected condition: ssl is enabled on unit '
+ '({})'.format(sentry_unit.info['unit_name']))
+ return None
+
+ def configure_rmq_ssl_on(self, sentry_units, deployment,
+ port=None, max_wait=60):
+ """Turn ssl charm config option on, with optional non-default
+ ssl port specification. Confirm that it is enabled on every
+ unit.
+
+ :param sentry_units: list of sentry units
+ :param deployment: amulet deployment object pointer
+ :param port: amqp port, use defaults if None
+ :param max_wait: maximum time to wait in seconds to confirm
+ :returns: None if successful. Raise on error.
+ """
+ self.log.debug('Setting ssl charm config option: on')
+
+ # Enable RMQ SSL
+ config = {'ssl': 'on'}
+ if port:
+ config['ssl_port'] = port
+
+ deployment.configure('rabbitmq-server', config)
+
+ # Confirm
+ tries = 0
+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
+ while ret and tries < (max_wait / 4):
+ time.sleep(4)
+ self.log.debug('Attempt {}: {}'.format(tries, ret))
+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
+ tries += 1
+
+ if ret:
+ amulet.raise_status(amulet.FAIL, ret)
+
+ def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
+ """Turn ssl charm config option off, confirm that it is disabled
+ on every unit.
+
+ :param sentry_units: list of sentry units
+ :param deployment: amulet deployment object pointer
+ :param max_wait: maximum time to wait in seconds to confirm
+ :returns: None if successful. Raise on error.
+ """
+ self.log.debug('Setting ssl charm config option: off')
+
+ # Disable RMQ SSL
+ config = {'ssl': 'off'}
+ deployment.configure('rabbitmq-server', config)
+
+ # Confirm
+ tries = 0
+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
+ while ret and tries < (max_wait / 4):
+ time.sleep(4)
+ self.log.debug('Attempt {}: {}'.format(tries, ret))
+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
+ tries += 1
+
+ if ret:
+ amulet.raise_status(amulet.FAIL, ret)
+
+ def connect_amqp_by_unit(self, sentry_unit, ssl=False,
+ port=None, fatal=True,
+ username="testuser1", password="changeme"):
+ """Establish and return a pika amqp connection to the rabbitmq service
+ running on a rmq juju unit.
+
+ :param sentry_unit: sentry unit pointer
+ :param ssl: boolean, default to False
+ :param port: amqp port, use defaults if None
+ :param fatal: boolean, default to True (raises on connect error)
+ :param username: amqp user name, default to testuser1
+ :param password: amqp user password
+ :returns: pika amqp connection pointer or None if failed and non-fatal
+ """
+ host = sentry_unit.info['public-address']
+ unit_name = sentry_unit.info['unit_name']
+
+ # Default port logic if port is not specified
+ if ssl and not port:
+ port = 5671
+ elif not ssl and not port:
+ port = 5672
+
+ self.log.debug('Connecting to amqp on {}:{} ({}) as '
+ '{}...'.format(host, port, unit_name, username))
+
+ try:
+ credentials = pika.PlainCredentials(username, password)
+ parameters = pika.ConnectionParameters(host=host, port=port,
+ credentials=credentials,
+ ssl=ssl,
+ connection_attempts=3,
+ retry_delay=5,
+ socket_timeout=1)
+ connection = pika.BlockingConnection(parameters)
+ assert connection.server_properties['product'] == 'RabbitMQ'
+ self.log.debug('Connect OK')
+ return connection
+ except Exception as e:
+ msg = ('amqp connection failed to {}:{} as '
+ '{} ({})'.format(host, port, username, str(e)))
+ if fatal:
+ amulet.raise_status(amulet.FAIL, msg)
+ else:
+ self.log.warn(msg)
+ return None
+
+ def publish_amqp_message_by_unit(self, sentry_unit, message,
+ queue="test", ssl=False,
+ username="testuser1",
+ password="changeme",
+ port=None):
+ """Publish an amqp message to a rmq juju unit.
+
+ :param sentry_unit: sentry unit pointer
+ :param message: amqp message string
+ :param queue: message queue, default to test
+ :param username: amqp user name, default to testuser1
+ :param password: amqp user password
+ :param ssl: boolean, default to False
+ :param port: amqp port, use defaults if None
+ :returns: None. Raises exception if publish failed.
+ """
+ self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
+ message))
+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
+ port=port,
+ username=username,
+ password=password)
+
+ # NOTE(beisner): extra debug here re: pika hang potential:
+ # https://github.com/pika/pika/issues/297
+ # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
+ self.log.debug('Defining channel...')
+ channel = connection.channel()
+ self.log.debug('Declaring queue...')
+ channel.queue_declare(queue=queue, auto_delete=False, durable=True)
+ self.log.debug('Publishing message...')
+ channel.basic_publish(exchange='', routing_key=queue, body=message)
+ self.log.debug('Closing channel...')
+ channel.close()
+ self.log.debug('Closing connection...')
+ connection.close()
+
+ def get_amqp_message_by_unit(self, sentry_unit, queue="test",
+ username="testuser1",
+ password="changeme",
+ ssl=False, port=None):
+ """Get an amqp message from a rmq juju unit.
+
+ :param sentry_unit: sentry unit pointer
+ :param queue: message queue, default to test
+ :param username: amqp user name, default to testuser1
+ :param password: amqp user password
+ :param ssl: boolean, default to False
+ :param port: amqp port, use defaults if None
+ :returns: amqp message body as string. Raise if get fails.
+ """
+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
+ port=port,
+ username=username,
+ password=password)
+ channel = connection.channel()
+ method_frame, _, body = channel.basic_get(queue)
+
+ if method_frame:
+ self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
+ body))
+ channel.basic_ack(method_frame.delivery_tag)
+ channel.close()
+ connection.close()
+ return body
+ else:
+ msg = 'No message retrieved.'
+ amulet.raise_status(amulet.FAIL, msg)
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/context.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/context.py
new file mode 100644
index 0000000..1248d49
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/context.py
@@ -0,0 +1,1416 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import json
+import os
+import re
+import time
+from base64 import b64decode
+from subprocess import check_call
+
+import six
+import yaml
+
+from charmhelpers.fetch import (
+ apt_install,
+ filter_installed_packages,
+)
+from charmhelpers.core.hookenv import (
+ config,
+ is_relation_made,
+ local_unit,
+ log,
+ relation_get,
+ relation_ids,
+ related_units,
+ relation_set,
+ unit_get,
+ unit_private_ip,
+ charm_name,
+ DEBUG,
+ INFO,
+ WARNING,
+ ERROR,
+)
+
+from charmhelpers.core.sysctl import create as sysctl_create
+from charmhelpers.core.strutils import bool_from_string
+
+from charmhelpers.core.host import (
+ get_bond_master,
+ is_phy_iface,
+ list_nics,
+ get_nic_hwaddr,
+ mkdir,
+ write_file,
+)
+from charmhelpers.contrib.hahelpers.cluster import (
+ determine_apache_port,
+ determine_api_port,
+ https,
+ is_clustered,
+)
+from charmhelpers.contrib.hahelpers.apache import (
+ get_cert,
+ get_ca_cert,
+ install_ca_cert,
+)
+from charmhelpers.contrib.openstack.neutron import (
+ neutron_plugin_attribute,
+ parse_data_port_mappings,
+)
+from charmhelpers.contrib.openstack.ip import (
+ resolve_address,
+ INTERNAL,
+)
+from charmhelpers.contrib.network.ip import (
+ get_address_in_network,
+ get_ipv4_addr,
+ get_ipv6_addr,
+ get_netmask_for_address,
+ format_ipv6_addr,
+ is_address_in_network,
+ is_bridge_member,
+)
+from charmhelpers.contrib.openstack.utils import get_host_ip
+CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
+ADDRESS_TYPES = ['admin', 'internal', 'public']
+
+
+class OSContextError(Exception):
+ pass
+
+
+def ensure_packages(packages):
+ """Install but do not upgrade required plugin packages."""
+ required = filter_installed_packages(packages)
+ if required:
+ apt_install(required, fatal=True)
+
+
+def context_complete(ctxt):
+ _missing = []
+ for k, v in six.iteritems(ctxt):
+ if v is None or v == '':
+ _missing.append(k)
+
+ if _missing:
+ log('Missing required data: %s' % ' '.join(_missing), level=INFO)
+ return False
+
+ return True
+
+
+def config_flags_parser(config_flags):
+ """Parses config flags string into dict.
+
+ This parsing method supports a few different formats for the config
+ flag values to be parsed:
+
+ 1. A string in the simple format of key=value pairs, with the possibility
+ of specifying multiple key value pairs within the same string. For
+ example, a string in the format of 'key1=value1, key2=value2' will
+ return a dict of:
+
+ {'key1': 'value1',
+ 'key2': 'value2'}.
+
+ 2. A string in the above format, but supporting a comma-delimited list
+ of values for the same key. For example, a string in the format of
+ 'key1=value1, key2=value3,value4,value5' will return a dict of:
+
+ {'key1', 'value1',
+ 'key2', 'value2,value3,value4'}
+
+ 3. A string containing a colon character (:) prior to an equal
+ character (=) will be treated as yaml and parsed as such. This can be
+ used to specify more complex key value pairs. For example,
+ a string in the format of 'key1: subkey1=value1, subkey2=value2' will
+ return a dict of:
+
+ {'key1', 'subkey1=value1, subkey2=value2'}
+
+ The provided config_flags string may be a list of comma-separated values
+ which themselves may be comma-separated list of values.
+ """
+ # If we find a colon before an equals sign then treat it as yaml.
+ # Note: limit it to finding the colon first since this indicates assignment
+ # for inline yaml.
+ colon = config_flags.find(':')
+ equals = config_flags.find('=')
+ if colon > 0:
+ if colon < equals or equals < 0:
+ return yaml.safe_load(config_flags)
+
+ if config_flags.find('==') >= 0:
+ log("config_flags is not in expected format (key=value)", level=ERROR)
+ raise OSContextError
+
+ # strip the following from each value.
+ post_strippers = ' ,'
+ # we strip any leading/trailing '=' or ' ' from the string then
+ # split on '='.
+ split = config_flags.strip(' =').split('=')
+ limit = len(split)
+ flags = {}
+ for i in range(0, limit - 1):
+ current = split[i]
+ next = split[i + 1]
+ vindex = next.rfind(',')
+ if (i == limit - 2) or (vindex < 0):
+ value = next
+ else:
+ value = next[:vindex]
+
+ if i == 0:
+ key = current
+ else:
+ # if this not the first entry, expect an embedded key.
+ index = current.rfind(',')
+ if index < 0:
+ log("Invalid config value(s) at index %s" % (i), level=ERROR)
+ raise OSContextError
+ key = current[index + 1:]
+
+ # Add to collection.
+ flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
+
+ return flags
+
+
+class OSContextGenerator(object):
+ """Base class for all context generators."""
+ interfaces = []
+ related = False
+ complete = False
+ missing_data = []
+
+ def __call__(self):
+ raise NotImplementedError
+
+ def context_complete(self, ctxt):
+ """Check for missing data for the required context data.
+ Set self.missing_data if it exists and return False.
+ Set self.complete if no missing data and return True.
+ """
+ # Fresh start
+ self.complete = False
+ self.missing_data = []
+ for k, v in six.iteritems(ctxt):
+ if v is None or v == '':
+ if k not in self.missing_data:
+ self.missing_data.append(k)
+
+ if self.missing_data:
+ self.complete = False
+ log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
+ else:
+ self.complete = True
+ return self.complete
+
+ def get_related(self):
+ """Check if any of the context interfaces have relation ids.
+ Set self.related and return True if one of the interfaces
+ has relation ids.
+ """
+ # Fresh start
+ self.related = False
+ try:
+ for interface in self.interfaces:
+ if relation_ids(interface):
+ self.related = True
+ return self.related
+ except AttributeError as e:
+ log("{} {}"
+ "".format(self, e), 'INFO')
+ return self.related
+
+
+class SharedDBContext(OSContextGenerator):
+ interfaces = ['shared-db']
+
+ def __init__(self,
+ database=None, user=None, relation_prefix=None, ssl_dir=None):
+ """Allows inspecting relation for settings prefixed with
+ relation_prefix. This is useful for parsing access for multiple
+ databases returned via the shared-db interface (eg, nova_password,
+ quantum_password)
+ """
+ self.relation_prefix = relation_prefix
+ self.database = database
+ self.user = user
+ self.ssl_dir = ssl_dir
+ self.rel_name = self.interfaces[0]
+
+ def __call__(self):
+ self.database = self.database or config('database')
+ self.user = self.user or config('database-user')
+ if None in [self.database, self.user]:
+ log("Could not generate shared_db context. Missing required charm "
+ "config options. (database name and user)", level=ERROR)
+ raise OSContextError
+
+ ctxt = {}
+
+ # NOTE(jamespage) if mysql charm provides a network upon which
+ # access to the database should be made, reconfigure relation
+ # with the service units local address and defer execution
+ access_network = relation_get('access-network')
+ if access_network is not None:
+ if self.relation_prefix is not None:
+ hostname_key = "{}_hostname".format(self.relation_prefix)
+ else:
+ hostname_key = "hostname"
+ access_hostname = get_address_in_network(access_network,
+ unit_get('private-address'))
+ set_hostname = relation_get(attribute=hostname_key,
+ unit=local_unit())
+ if set_hostname != access_hostname:
+ relation_set(relation_settings={hostname_key: access_hostname})
+ return None # Defer any further hook execution for now....
+
+ password_setting = 'password'
+ if self.relation_prefix:
+ password_setting = self.relation_prefix + '_password'
+
+ for rid in relation_ids(self.interfaces[0]):
+ self.related = True
+ for unit in related_units(rid):
+ rdata = relation_get(rid=rid, unit=unit)
+ host = rdata.get('db_host')
+ host = format_ipv6_addr(host) or host
+ ctxt = {
+ 'database_host': host,
+ 'database': self.database,
+ 'database_user': self.user,
+ 'database_password': rdata.get(password_setting),
+ 'database_type': 'mysql'
+ }
+ if self.context_complete(ctxt):
+ db_ssl(rdata, ctxt, self.ssl_dir)
+ return ctxt
+ return {}
+
+
+class PostgresqlDBContext(OSContextGenerator):
+ interfaces = ['pgsql-db']
+
+ def __init__(self, database=None):
+ self.database = database
+
+ def __call__(self):
+ self.database = self.database or config('database')
+ if self.database is None:
+ log('Could not generate postgresql_db context. Missing required '
+ 'charm config options. (database name)', level=ERROR)
+ raise OSContextError
+
+ ctxt = {}
+ for rid in relation_ids(self.interfaces[0]):
+ self.related = True
+ for unit in related_units(rid):
+ rel_host = relation_get('host', rid=rid, unit=unit)
+ rel_user = relation_get('user', rid=rid, unit=unit)
+ rel_passwd = relation_get('password', rid=rid, unit=unit)
+ ctxt = {'database_host': rel_host,
+ 'database': self.database,
+ 'database_user': rel_user,
+ 'database_password': rel_passwd,
+ 'database_type': 'postgresql'}
+ if self.context_complete(ctxt):
+ return ctxt
+
+ return {}
+
+
+def db_ssl(rdata, ctxt, ssl_dir):
+ if 'ssl_ca' in rdata and ssl_dir:
+ ca_path = os.path.join(ssl_dir, 'db-client.ca')
+ with open(ca_path, 'w') as fh:
+ fh.write(b64decode(rdata['ssl_ca']))
+
+ ctxt['database_ssl_ca'] = ca_path
+ elif 'ssl_ca' in rdata:
+ log("Charm not setup for ssl support but ssl ca found", level=INFO)
+ return ctxt
+
+ if 'ssl_cert' in rdata:
+ cert_path = os.path.join(
+ ssl_dir, 'db-client.cert')
+ if not os.path.exists(cert_path):
+ log("Waiting 1m for ssl client cert validity", level=INFO)
+ time.sleep(60)
+
+ with open(cert_path, 'w') as fh:
+ fh.write(b64decode(rdata['ssl_cert']))
+
+ ctxt['database_ssl_cert'] = cert_path
+ key_path = os.path.join(ssl_dir, 'db-client.key')
+ with open(key_path, 'w') as fh:
+ fh.write(b64decode(rdata['ssl_key']))
+
+ ctxt['database_ssl_key'] = key_path
+
+ return ctxt
+
+
+class IdentityServiceContext(OSContextGenerator):
+
+ def __init__(self, service=None, service_user=None, rel_name='identity-service'):
+ self.service = service
+ self.service_user = service_user
+ self.rel_name = rel_name
+ self.interfaces = [self.rel_name]
+
+ def __call__(self):
+ log('Generating template context for ' + self.rel_name, level=DEBUG)
+ ctxt = {}
+
+ if self.service and self.service_user:
+ # This is required for pki token signing if we don't want /tmp to
+ # be used.
+ cachedir = '/var/cache/%s' % (self.service)
+ if not os.path.isdir(cachedir):
+ log("Creating service cache dir %s" % (cachedir), level=DEBUG)
+ mkdir(path=cachedir, owner=self.service_user,
+ group=self.service_user, perms=0o700)
+
+ ctxt['signing_dir'] = cachedir
+
+ for rid in relation_ids(self.rel_name):
+ self.related = True
+ for unit in related_units(rid):
+ rdata = relation_get(rid=rid, unit=unit)
+ serv_host = rdata.get('service_host')
+ serv_host = format_ipv6_addr(serv_host) or serv_host
+ auth_host = rdata.get('auth_host')
+ auth_host = format_ipv6_addr(auth_host) or auth_host
+ svc_protocol = rdata.get('service_protocol') or 'http'
+ auth_protocol = rdata.get('auth_protocol') or 'http'
+ ctxt.update({'service_port': rdata.get('service_port'),
+ 'service_host': serv_host,
+ 'auth_host': auth_host,
+ 'auth_port': rdata.get('auth_port'),
+ 'admin_tenant_name': rdata.get('service_tenant'),
+ 'admin_user': rdata.get('service_username'),
+ 'admin_password': rdata.get('service_password'),
+ 'service_protocol': svc_protocol,
+ 'auth_protocol': auth_protocol})
+
+ if self.context_complete(ctxt):
+ # NOTE(jamespage) this is required for >= icehouse
+ # so a missing value just indicates keystone needs
+ # upgrading
+ ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
+ return ctxt
+
+ return {}
+
+
+class AMQPContext(OSContextGenerator):
+
+ def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
+ self.ssl_dir = ssl_dir
+ self.rel_name = rel_name
+ self.relation_prefix = relation_prefix
+ self.interfaces = [rel_name]
+
+ def __call__(self):
+ log('Generating template context for amqp', level=DEBUG)
+ conf = config()
+ if self.relation_prefix:
+ user_setting = '%s-rabbit-user' % (self.relation_prefix)
+ vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
+ else:
+ user_setting = 'rabbit-user'
+ vhost_setting = 'rabbit-vhost'
+
+ try:
+ username = conf[user_setting]
+ vhost = conf[vhost_setting]
+ except KeyError as e:
+ log('Could not generate shared_db context. Missing required charm '
+ 'config options: %s.' % e, level=ERROR)
+ raise OSContextError
+
+ ctxt = {}
+ for rid in relation_ids(self.rel_name):
+ ha_vip_only = False
+ self.related = True
+ for unit in related_units(rid):
+ if relation_get('clustered', rid=rid, unit=unit):
+ ctxt['clustered'] = True
+ vip = relation_get('vip', rid=rid, unit=unit)
+ vip = format_ipv6_addr(vip) or vip
+ ctxt['rabbitmq_host'] = vip
+ else:
+ host = relation_get('private-address', rid=rid, unit=unit)
+ host = format_ipv6_addr(host) or host
+ ctxt['rabbitmq_host'] = host
+
+ ctxt.update({
+ 'rabbitmq_user': username,
+ 'rabbitmq_password': relation_get('password', rid=rid,
+ unit=unit),
+ 'rabbitmq_virtual_host': vhost,
+ })
+
+ ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
+ if ssl_port:
+ ctxt['rabbit_ssl_port'] = ssl_port
+
+ ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
+ if ssl_ca:
+ ctxt['rabbit_ssl_ca'] = ssl_ca
+
+ if relation_get('ha_queues', rid=rid, unit=unit) is not None:
+ ctxt['rabbitmq_ha_queues'] = True
+
+ ha_vip_only = relation_get('ha-vip-only',
+ rid=rid, unit=unit) is not None
+
+ if self.context_complete(ctxt):
+ if 'rabbit_ssl_ca' in ctxt:
+ if not self.ssl_dir:
+ log("Charm not setup for ssl support but ssl ca "
+ "found", level=INFO)
+ break
+
+ ca_path = os.path.join(
+ self.ssl_dir, 'rabbit-client-ca.pem')
+ with open(ca_path, 'w') as fh:
+ fh.write(b64decode(ctxt['rabbit_ssl_ca']))
+ ctxt['rabbit_ssl_ca'] = ca_path
+
+ # Sufficient information found = break out!
+ break
+
+ # Used for active/active rabbitmq >= grizzly
+ if (('clustered' not in ctxt or ha_vip_only) and
+ len(related_units(rid)) > 1):
+ rabbitmq_hosts = []
+ for unit in related_units(rid):
+ host = relation_get('private-address', rid=rid, unit=unit)
+ host = format_ipv6_addr(host) or host
+ rabbitmq_hosts.append(host)
+
+ ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
+
+ oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
+ if oslo_messaging_flags:
+ ctxt['oslo_messaging_flags'] = config_flags_parser(
+ oslo_messaging_flags)
+
+ if not self.complete:
+ return {}
+
+ return ctxt
+
+
+class CephContext(OSContextGenerator):
+ """Generates context for /etc/ceph/ceph.conf templates."""
+ interfaces = ['ceph']
+
+ def __call__(self):
+ if not relation_ids('ceph'):
+ return {}
+
+ log('Generating template context for ceph', level=DEBUG)
+ mon_hosts = []
+ ctxt = {
+ 'use_syslog': str(config('use-syslog')).lower()
+ }
+ for rid in relation_ids('ceph'):
+ for unit in related_units(rid):
+ if not ctxt.get('auth'):
+ ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
+ if not ctxt.get('key'):
+ ctxt['key'] = relation_get('key', rid=rid, unit=unit)
+ ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
+ unit=unit)
+ unit_priv_addr = relation_get('private-address', rid=rid,
+ unit=unit)
+ ceph_addr = ceph_pub_addr or unit_priv_addr
+ ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
+ mon_hosts.append(ceph_addr)
+
+ ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
+
+ if not os.path.isdir('/etc/ceph'):
+ os.mkdir('/etc/ceph')
+
+ if not self.context_complete(ctxt):
+ return {}
+
+ ensure_packages(['ceph-common'])
+ return ctxt
+
+
+class HAProxyContext(OSContextGenerator):
+ """Provides half a context for the haproxy template, which describes
+ all peers to be included in the cluster. Each charm needs to include
+ its own context generator that describes the port mapping.
+ """
+ interfaces = ['cluster']
+
+ def __init__(self, singlenode_mode=False):
+ self.singlenode_mode = singlenode_mode
+
+ def __call__(self):
+ if not relation_ids('cluster') and not self.singlenode_mode:
+ return {}
+
+ if config('prefer-ipv6'):
+ addr = get_ipv6_addr(exc_list=[config('vip')])[0]
+ else:
+ addr = get_host_ip(unit_get('private-address'))
+
+ l_unit = local_unit().replace('/', '-')
+ cluster_hosts = {}
+
+ # NOTE(jamespage): build out map of configured network endpoints
+ # and associated backends
+ for addr_type in ADDRESS_TYPES:
+ cfg_opt = 'os-{}-network'.format(addr_type)
+ laddr = get_address_in_network(config(cfg_opt))
+ if laddr:
+ netmask = get_netmask_for_address(laddr)
+ cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
+ netmask),
+ 'backends': {l_unit: laddr}}
+ for rid in relation_ids('cluster'):
+ for unit in related_units(rid):
+ _laddr = relation_get('{}-address'.format(addr_type),
+ rid=rid, unit=unit)
+ if _laddr:
+ _unit = unit.replace('/', '-')
+ cluster_hosts[laddr]['backends'][_unit] = _laddr
+
+ # NOTE(jamespage) add backend based on private address - this
+ # with either be the only backend or the fallback if no acls
+ # match in the frontend
+ cluster_hosts[addr] = {}
+ netmask = get_netmask_for_address(addr)
+ cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
+ 'backends': {l_unit: addr}}
+ for rid in relation_ids('cluster'):
+ for unit in related_units(rid):
+ _laddr = relation_get('private-address',
+ rid=rid, unit=unit)
+ if _laddr:
+ _unit = unit.replace('/', '-')
+ cluster_hosts[addr]['backends'][_unit] = _laddr
+
+ ctxt = {
+ 'frontends': cluster_hosts,
+ 'default_backend': addr
+ }
+
+ if config('haproxy-server-timeout'):
+ ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
+
+ if config('haproxy-client-timeout'):
+ ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
+
+ if config('prefer-ipv6'):
+ ctxt['ipv6'] = True
+ ctxt['local_host'] = 'ip6-localhost'
+ ctxt['haproxy_host'] = '::'
+ ctxt['stat_port'] = ':::8888'
+ else:
+ ctxt['local_host'] = '127.0.0.1'
+ ctxt['haproxy_host'] = '0.0.0.0'
+ ctxt['stat_port'] = ':8888'
+
+ for frontend in cluster_hosts:
+ if (len(cluster_hosts[frontend]['backends']) > 1 or
+ self.singlenode_mode):
+ # Enable haproxy when we have enough peers.
+ log('Ensuring haproxy enabled in /etc/default/haproxy.',
+ level=DEBUG)
+ with open('/etc/default/haproxy', 'w') as out:
+ out.write('ENABLED=1\n')
+
+ return ctxt
+
+ log('HAProxy context is incomplete, this unit has no peers.',
+ level=INFO)
+ return {}
+
+
+class ImageServiceContext(OSContextGenerator):
+ interfaces = ['image-service']
+
+ def __call__(self):
+ """Obtains the glance API server from the image-service relation.
+ Useful in nova and cinder (currently).
+ """
+ log('Generating template context for image-service.', level=DEBUG)
+ rids = relation_ids('image-service')
+ if not rids:
+ return {}
+
+ for rid in rids:
+ for unit in related_units(rid):
+ api_server = relation_get('glance-api-server',
+ rid=rid, unit=unit)
+ if api_server:
+ return {'glance_api_servers': api_server}
+
+ log("ImageService context is incomplete. Missing required relation "
+ "data.", level=INFO)
+ return {}
+
+
+class ApacheSSLContext(OSContextGenerator):
+ """Generates a context for an apache vhost configuration that configures
+ HTTPS reverse proxying for one or many endpoints. Generated context
+ looks something like::
+
+ {
+ 'namespace': 'cinder',
+ 'private_address': 'iscsi.mycinderhost.com',
+ 'endpoints': [(8776, 8766), (8777, 8767)]
+ }
+
+ The endpoints list consists of a tuples mapping external ports
+ to internal ports.
+ """
+ interfaces = ['https']
+
+ # charms should inherit this context and set external ports
+ # and service namespace accordingly.
+ external_ports = []
+ service_namespace = None
+
+ def enable_modules(self):
+ cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
+ check_call(cmd)
+
+ def configure_cert(self, cn=None):
+ ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
+ mkdir(path=ssl_dir)
+ cert, key = get_cert(cn)
+ if cn:
+ cert_filename = 'cert_{}'.format(cn)
+ key_filename = 'key_{}'.format(cn)
+ else:
+ cert_filename = 'cert'
+ key_filename = 'key'
+
+ write_file(path=os.path.join(ssl_dir, cert_filename),
+ content=b64decode(cert))
+ write_file(path=os.path.join(ssl_dir, key_filename),
+ content=b64decode(key))
+
+ def configure_ca(self):
+ ca_cert = get_ca_cert()
+ if ca_cert:
+ install_ca_cert(b64decode(ca_cert))
+
+ def canonical_names(self):
+ """Figure out which canonical names clients will access this service.
+ """
+ cns = []
+ for r_id in relation_ids('identity-service'):
+ for unit in related_units(r_id):
+ rdata = relation_get(rid=r_id, unit=unit)
+ for k in rdata:
+ if k.startswith('ssl_key_'):
+ cns.append(k.lstrip('ssl_key_'))
+
+ return sorted(list(set(cns)))
+
+ def get_network_addresses(self):
+ """For each network configured, return corresponding address and vip
+ (if available).
+
+ Returns a list of tuples of the form:
+
+ [(address_in_net_a, vip_in_net_a),
+ (address_in_net_b, vip_in_net_b),
+ ...]
+
+ or, if no vip(s) available:
+
+ [(address_in_net_a, address_in_net_a),
+ (address_in_net_b, address_in_net_b),
+ ...]
+ """
+ addresses = []
+ if config('vip'):
+ vips = config('vip').split()
+ else:
+ vips = []
+
+ for net_type in ['os-internal-network', 'os-admin-network',
+ 'os-public-network']:
+ addr = get_address_in_network(config(net_type),
+ unit_get('private-address'))
+ if len(vips) > 1 and is_clustered():
+ if not config(net_type):
+ log("Multiple networks configured but net_type "
+ "is None (%s)." % net_type, level=WARNING)
+ continue
+
+ for vip in vips:
+ if is_address_in_network(config(net_type), vip):
+ addresses.append((addr, vip))
+ break
+
+ elif is_clustered() and config('vip'):
+ addresses.append((addr, config('vip')))
+ else:
+ addresses.append((addr, addr))
+
+ return sorted(addresses)
+
+ def __call__(self):
+ if isinstance(self.external_ports, six.string_types):
+ self.external_ports = [self.external_ports]
+
+ if not self.external_ports or not https():
+ return {}
+
+ self.configure_ca()
+ self.enable_modules()
+
+ ctxt = {'namespace': self.service_namespace,
+ 'endpoints': [],
+ 'ext_ports': []}
+
+ cns = self.canonical_names()
+ if cns:
+ for cn in cns:
+ self.configure_cert(cn)
+ else:
+ # Expect cert/key provided in config (currently assumed that ca
+ # uses ip for cn)
+ cn = resolve_address(endpoint_type=INTERNAL)
+ self.configure_cert(cn)
+
+ addresses = self.get_network_addresses()
+ for address, endpoint in sorted(set(addresses)):
+ for api_port in self.external_ports:
+ ext_port = determine_apache_port(api_port,
+ singlenode_mode=True)
+ int_port = determine_api_port(api_port, singlenode_mode=True)
+ portmap = (address, endpoint, int(ext_port), int(int_port))
+ ctxt['endpoints'].append(portmap)
+ ctxt['ext_ports'].append(int(ext_port))
+
+ ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
+ return ctxt
+
+
+class NeutronContext(OSContextGenerator):
+ interfaces = []
+
+ @property
+ def plugin(self):
+ return None
+
+ @property
+ def network_manager(self):
+ return None
+
+ @property
+ def packages(self):
+ return neutron_plugin_attribute(self.plugin, 'packages',
+ self.network_manager)
+
+ @property
+ def neutron_security_groups(self):
+ return None
+
+ def _ensure_packages(self):
+ for pkgs in self.packages:
+ ensure_packages(pkgs)
+
+ def _save_flag_file(self):
+ if self.network_manager == 'quantum':
+ _file = '/etc/nova/quantum_plugin.conf'
+ else:
+ _file = '/etc/nova/neutron_plugin.conf'
+
+ with open(_file, 'wb') as out:
+ out.write(self.plugin + '\n')
+
+ def ovs_ctxt(self):
+ driver = neutron_plugin_attribute(self.plugin, 'driver',
+ self.network_manager)
+ config = neutron_plugin_attribute(self.plugin, 'config',
+ self.network_manager)
+ ovs_ctxt = {'core_plugin': driver,
+ 'neutron_plugin': 'ovs',
+ 'neutron_security_groups': self.neutron_security_groups,
+ 'local_ip': unit_private_ip(),
+ 'config': config}
+
+ return ovs_ctxt
+
+ def nuage_ctxt(self):
+ driver = neutron_plugin_attribute(self.plugin, 'driver',
+ self.network_manager)
+ config = neutron_plugin_attribute(self.plugin, 'config',
+ self.network_manager)
+ nuage_ctxt = {'core_plugin': driver,
+ 'neutron_plugin': 'vsp',
+ 'neutron_security_groups': self.neutron_security_groups,
+ 'local_ip': unit_private_ip(),
+ 'config': config}
+
+ return nuage_ctxt
+
+ def nvp_ctxt(self):
+ driver = neutron_plugin_attribute(self.plugin, 'driver',
+ self.network_manager)
+ config = neutron_plugin_attribute(self.plugin, 'config',
+ self.network_manager)
+ nvp_ctxt = {'core_plugin': driver,
+ 'neutron_plugin': 'nvp',
+ 'neutron_security_groups': self.neutron_security_groups,
+ 'local_ip': unit_private_ip(),
+ 'config': config}
+
+ return nvp_ctxt
+
+ def n1kv_ctxt(self):
+ driver = neutron_plugin_attribute(self.plugin, 'driver',
+ self.network_manager)
+ n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
+ self.network_manager)
+ n1kv_user_config_flags = config('n1kv-config-flags')
+ restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
+ n1kv_ctxt = {'core_plugin': driver,
+ 'neutron_plugin': 'n1kv',
+ 'neutron_security_groups': self.neutron_security_groups,
+ 'local_ip': unit_private_ip(),
+ 'config': n1kv_config,
+ 'vsm_ip': config('n1kv-vsm-ip'),
+ 'vsm_username': config('n1kv-vsm-username'),
+ 'vsm_password': config('n1kv-vsm-password'),
+ 'restrict_policy_profiles': restrict_policy_profiles}
+
+ if n1kv_user_config_flags:
+ flags = config_flags_parser(n1kv_user_config_flags)
+ n1kv_ctxt['user_config_flags'] = flags
+
+ return n1kv_ctxt
+
+ def calico_ctxt(self):
+ driver = neutron_plugin_attribute(self.plugin, 'driver',
+ self.network_manager)
+ config = neutron_plugin_attribute(self.plugin, 'config',
+ self.network_manager)
+ calico_ctxt = {'core_plugin': driver,
+ 'neutron_plugin': 'Calico',
+ 'neutron_security_groups': self.neutron_security_groups,
+ 'local_ip': unit_private_ip(),
+ 'config': config}
+
+ return calico_ctxt
+
+ def neutron_ctxt(self):
+ if https():
+ proto = 'https'
+ else:
+ proto = 'http'
+
+ if is_clustered():
+ host = config('vip')
+ else:
+ host = unit_get('private-address')
+
+ ctxt = {'network_manager': self.network_manager,
+ 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
+ return ctxt
+
+ def pg_ctxt(self):
+ driver = neutron_plugin_attribute(self.plugin, 'driver',
+ self.network_manager)
+ config = neutron_plugin_attribute(self.plugin, 'config',
+ self.network_manager)
+ ovs_ctxt = {'core_plugin': driver,
+ 'neutron_plugin': 'plumgrid',
+ 'neutron_security_groups': self.neutron_security_groups,
+ 'local_ip': unit_private_ip(),
+ 'config': config}
+ return ovs_ctxt
+
+ def __call__(self):
+ if self.network_manager not in ['quantum', 'neutron']:
+ return {}
+
+ if not self.plugin:
+ return {}
+
+ ctxt = self.neutron_ctxt()
+
+ if self.plugin == 'ovs':
+ ctxt.update(self.ovs_ctxt())
+ elif self.plugin in ['nvp', 'nsx']:
+ ctxt.update(self.nvp_ctxt())
+ elif self.plugin == 'n1kv':
+ ctxt.update(self.n1kv_ctxt())
+ elif self.plugin == 'Calico':
+ ctxt.update(self.calico_ctxt())
+ elif self.plugin == 'vsp':
+ ctxt.update(self.nuage_ctxt())
+ elif self.plugin == 'plumgrid':
+ ctxt.update(self.pg_ctxt())
+
+ alchemy_flags = config('neutron-alchemy-flags')
+ if alchemy_flags:
+ flags = config_flags_parser(alchemy_flags)
+ ctxt['neutron_alchemy_flags'] = flags
+
+ self._save_flag_file()
+ return ctxt
+
+
+class NeutronPortContext(OSContextGenerator):
+
+ def resolve_ports(self, ports):
+ """Resolve NICs not yet bound to bridge(s)
+
+ If hwaddress provided then returns resolved hwaddress otherwise NIC.
+ """
+ if not ports:
+ return None
+
+ hwaddr_to_nic = {}
+ hwaddr_to_ip = {}
+ for nic in list_nics():
+ # Ignore virtual interfaces (bond masters will be identified from
+ # their slaves)
+ if not is_phy_iface(nic):
+ continue
+
+ _nic = get_bond_master(nic)
+ if _nic:
+ log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
+ level=DEBUG)
+ nic = _nic
+
+ hwaddr = get_nic_hwaddr(nic)
+ hwaddr_to_nic[hwaddr] = nic
+ addresses = get_ipv4_addr(nic, fatal=False)
+ addresses += get_ipv6_addr(iface=nic, fatal=False)
+ hwaddr_to_ip[hwaddr] = addresses
+
+ resolved = []
+ mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
+ for entry in ports:
+ if re.match(mac_regex, entry):
+ # NIC is in known NICs and does NOT hace an IP address
+ if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
+ # If the nic is part of a bridge then don't use it
+ if is_bridge_member(hwaddr_to_nic[entry]):
+ continue
+
+ # Entry is a MAC address for a valid interface that doesn't
+ # have an IP address assigned yet.
+ resolved.append(hwaddr_to_nic[entry])
+ else:
+ # If the passed entry is not a MAC address, assume it's a valid
+ # interface, and that the user put it there on purpose (we can
+ # trust it to be the real external network).
+ resolved.append(entry)
+
+ # Ensure no duplicates
+ return list(set(resolved))
+
+
+class OSConfigFlagContext(OSContextGenerator):
+ """Provides support for user-defined config flags.
+
+ Users can define a comma-seperated list of key=value pairs
+ in the charm configuration and apply them at any point in
+ any file by using a template flag.
+
+ Sometimes users might want config flags inserted within a
+ specific section so this class allows users to specify the
+ template flag name, allowing for multiple template flags
+ (sections) within the same context.
+
+ NOTE: the value of config-flags may be a comma-separated list of
+ key=value pairs and some Openstack config files support
+ comma-separated lists as values.
+ """
+
+ def __init__(self, charm_flag='config-flags',
+ template_flag='user_config_flags'):
+ """
+ :param charm_flag: config flags in charm configuration.
+ :param template_flag: insert point for user-defined flags in template
+ file.
+ """
+ super(OSConfigFlagContext, self).__init__()
+ self._charm_flag = charm_flag
+ self._template_flag = template_flag
+
+ def __call__(self):
+ config_flags = config(self._charm_flag)
+ if not config_flags:
+ return {}
+
+ return {self._template_flag:
+ config_flags_parser(config_flags)}
+
+
+class SubordinateConfigContext(OSContextGenerator):
+
+ """
+ Responsible for inspecting relations to subordinates that
+ may be exporting required config via a json blob.
+
+ The subordinate interface allows subordinates to export their
+ configuration requirements to the principle for multiple config
+ files and multiple serivces. Ie, a subordinate that has interfaces
+ to both glance and nova may export to following yaml blob as json::
+
+ glance:
+ /etc/glance/glance-api.conf:
+ sections:
+ DEFAULT:
+ - [key1, value1]
+ /etc/glance/glance-registry.conf:
+ MYSECTION:
+ - [key2, value2]
+ nova:
+ /etc/nova/nova.conf:
+ sections:
+ DEFAULT:
+ - [key3, value3]
+
+
+ It is then up to the principle charms to subscribe this context to
+ the service+config file it is interestd in. Configuration data will
+ be available in the template context, in glance's case, as::
+
+ ctxt = {
+ ... other context ...
+ 'subordinate_config': {
+ 'DEFAULT': {
+ 'key1': 'value1',
+ },
+ 'MYSECTION': {
+ 'key2': 'value2',
+ },
+ }
+ }
+ """
+
+ def __init__(self, service, config_file, interface):
+ """
+ :param service : Service name key to query in any subordinate
+ data found
+ :param config_file : Service's config file to query sections
+ :param interface : Subordinate interface to inspect
+ """
+ self.config_file = config_file
+ if isinstance(service, list):
+ self.services = service
+ else:
+ self.services = [service]
+ if isinstance(interface, list):
+ self.interfaces = interface
+ else:
+ self.interfaces = [interface]
+
+ def __call__(self):
+ ctxt = {'sections': {}}
+ rids = []
+ for interface in self.interfaces:
+ rids.extend(relation_ids(interface))
+ for rid in rids:
+ for unit in related_units(rid):
+ sub_config = relation_get('subordinate_configuration',
+ rid=rid, unit=unit)
+ if sub_config and sub_config != '':
+ try:
+ sub_config = json.loads(sub_config)
+ except:
+ log('Could not parse JSON from subordinate_config '
+ 'setting from %s' % rid, level=ERROR)
+ continue
+
+ for service in self.services:
+ if service not in sub_config:
+ log('Found subordinate_config on %s but it contained'
+ 'nothing for %s service' % (rid, service),
+ level=INFO)
+ continue
+
+ sub_config = sub_config[service]
+ if self.config_file not in sub_config:
+ log('Found subordinate_config on %s but it contained'
+ 'nothing for %s' % (rid, self.config_file),
+ level=INFO)
+ continue
+
+ sub_config = sub_config[self.config_file]
+ for k, v in six.iteritems(sub_config):
+ if k == 'sections':
+ for section, config_list in six.iteritems(v):
+ log("adding section '%s'" % (section),
+ level=DEBUG)
+ if ctxt[k].get(section):
+ ctxt[k][section].extend(config_list)
+ else:
+ ctxt[k][section] = config_list
+ else:
+ ctxt[k] = v
+ log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
+ return ctxt
+
+
+class LogLevelContext(OSContextGenerator):
+
+ def __call__(self):
+ ctxt = {}
+ ctxt['debug'] = \
+ False if config('debug') is None else config('debug')
+ ctxt['verbose'] = \
+ False if config('verbose') is None else config('verbose')
+
+ return ctxt
+
+
+class SyslogContext(OSContextGenerator):
+
+ def __call__(self):
+ ctxt = {'use_syslog': config('use-syslog')}
+ return ctxt
+
+
+class BindHostContext(OSContextGenerator):
+
+ def __call__(self):
+ if config('prefer-ipv6'):
+ return {'bind_host': '::'}
+ else:
+ return {'bind_host': '0.0.0.0'}
+
+
+class WorkerConfigContext(OSContextGenerator):
+
+ @property
+ def num_cpus(self):
+ try:
+ from psutil import NUM_CPUS
+ except ImportError:
+ apt_install('python-psutil', fatal=True)
+ from psutil import NUM_CPUS
+
+ return NUM_CPUS
+
+ def __call__(self):
+ multiplier = config('worker-multiplier') or 0
+ ctxt = {"workers": self.num_cpus * multiplier}
+ return ctxt
+
+
+class ZeroMQContext(OSContextGenerator):
+ interfaces = ['zeromq-configuration']
+
+ def __call__(self):
+ ctxt = {}
+ if is_relation_made('zeromq-configuration', 'host'):
+ for rid in relation_ids('zeromq-configuration'):
+ for unit in related_units(rid):
+ ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
+ ctxt['zmq_host'] = relation_get('host', unit, rid)
+ ctxt['zmq_redis_address'] = relation_get(
+ 'zmq_redis_address', unit, rid)
+
+ return ctxt
+
+
+class NotificationDriverContext(OSContextGenerator):
+
+ def __init__(self, zmq_relation='zeromq-configuration',
+ amqp_relation='amqp'):
+ """
+ :param zmq_relation: Name of Zeromq relation to check
+ """
+ self.zmq_relation = zmq_relation
+ self.amqp_relation = amqp_relation
+
+ def __call__(self):
+ ctxt = {'notifications': 'False'}
+ if is_relation_made(self.amqp_relation):
+ ctxt['notifications'] = "True"
+
+ return ctxt
+
+
+class SysctlContext(OSContextGenerator):
+ """This context check if the 'sysctl' option exists on configuration
+ then creates a file with the loaded contents"""
+ def __call__(self):
+ sysctl_dict = config('sysctl')
+ if sysctl_dict:
+ sysctl_create(sysctl_dict,
+ '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
+ return {'sysctl': sysctl_dict}
+
+
+class NeutronAPIContext(OSContextGenerator):
+ '''
+ Inspects current neutron-plugin-api relation for neutron settings. Return
+ defaults if it is not present.
+ '''
+ interfaces = ['neutron-plugin-api']
+
+ def __call__(self):
+ self.neutron_defaults = {
+ 'l2_population': {
+ 'rel_key': 'l2-population',
+ 'default': False,
+ },
+ 'overlay_network_type': {
+ 'rel_key': 'overlay-network-type',
+ 'default': 'gre',
+ },
+ 'neutron_security_groups': {
+ 'rel_key': 'neutron-security-groups',
+ 'default': False,
+ },
+ 'network_device_mtu': {
+ 'rel_key': 'network-device-mtu',
+ 'default': None,
+ },
+ 'enable_dvr': {
+ 'rel_key': 'enable-dvr',
+ 'default': False,
+ },
+ 'enable_l3ha': {
+ 'rel_key': 'enable-l3ha',
+ 'default': False,
+ },
+ }
+ ctxt = self.get_neutron_options({})
+ for rid in relation_ids('neutron-plugin-api'):
+ for unit in related_units(rid):
+ rdata = relation_get(rid=rid, unit=unit)
+ if 'l2-population' in rdata:
+ ctxt.update(self.get_neutron_options(rdata))
+
+ return ctxt
+
+ def get_neutron_options(self, rdata):
+ settings = {}
+ for nkey in self.neutron_defaults.keys():
+ defv = self.neutron_defaults[nkey]['default']
+ rkey = self.neutron_defaults[nkey]['rel_key']
+ if rkey in rdata.keys():
+ if type(defv) is bool:
+ settings[nkey] = bool_from_string(rdata[rkey])
+ else:
+ settings[nkey] = rdata[rkey]
+ else:
+ settings[nkey] = defv
+ return settings
+
+
+class ExternalPortContext(NeutronPortContext):
+
+ def __call__(self):
+ ctxt = {}
+ ports = config('ext-port')
+ if ports:
+ ports = [p.strip() for p in ports.split()]
+ ports = self.resolve_ports(ports)
+ if ports:
+ ctxt = {"ext_port": ports[0]}
+ napi_settings = NeutronAPIContext()()
+ mtu = napi_settings.get('network_device_mtu')
+ if mtu:
+ ctxt['ext_port_mtu'] = mtu
+
+ return ctxt
+
+
+class DataPortContext(NeutronPortContext):
+
+ def __call__(self):
+ ports = config('data-port')
+ if ports:
+ # Map of {port/mac:bridge}
+ portmap = parse_data_port_mappings(ports)
+ ports = portmap.keys()
+ # Resolve provided ports or mac addresses and filter out those
+ # already attached to a bridge.
+ resolved = self.resolve_ports(ports)
+ # FIXME: is this necessary?
+ normalized = {get_nic_hwaddr(port): port for port in resolved
+ if port not in ports}
+ normalized.update({port: port for port in resolved
+ if port in ports})
+ if resolved:
+ return {bridge: normalized[port] for port, bridge in
+ six.iteritems(portmap) if port in normalized.keys()}
+
+ return None
+
+
+class PhyNICMTUContext(DataPortContext):
+
+ def __call__(self):
+ ctxt = {}
+ mappings = super(PhyNICMTUContext, self).__call__()
+ if mappings and mappings.values():
+ ports = mappings.values()
+ napi_settings = NeutronAPIContext()()
+ mtu = napi_settings.get('network_device_mtu')
+ if mtu:
+ ctxt["devs"] = '\\n'.join(ports)
+ ctxt['mtu'] = mtu
+
+ return ctxt
+
+
+class NetworkServiceContext(OSContextGenerator):
+
+ def __init__(self, rel_name='quantum-network-service'):
+ self.rel_name = rel_name
+ self.interfaces = [rel_name]
+
+ def __call__(self):
+ for rid in relation_ids(self.rel_name):
+ for unit in related_units(rid):
+ rdata = relation_get(rid=rid, unit=unit)
+ ctxt = {
+ 'keystone_host': rdata.get('keystone_host'),
+ 'service_port': rdata.get('service_port'),
+ 'auth_port': rdata.get('auth_port'),
+ 'service_tenant': rdata.get('service_tenant'),
+ 'service_username': rdata.get('service_username'),
+ 'service_password': rdata.get('service_password'),
+ 'quantum_host': rdata.get('quantum_host'),
+ 'quantum_port': rdata.get('quantum_port'),
+ 'quantum_url': rdata.get('quantum_url'),
+ 'region': rdata.get('region'),
+ 'service_protocol':
+ rdata.get('service_protocol') or 'http',
+ 'auth_protocol':
+ rdata.get('auth_protocol') or 'http',
+ }
+ if self.context_complete(ctxt):
+ return ctxt
+ return {}
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/__init__.py
new file mode 100644
index 0000000..7587679
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/__init__.py
@@ -0,0 +1,18 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+# dummy __init__.py to fool syncer into thinking this is a syncable python
+# module
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh
new file mode 100755
index 0000000..eb8527f
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+#--------------------------------------------
+# This file is managed by Juju
+#--------------------------------------------
+#
+# Copyright 2009,2012 Canonical Ltd.
+# Author: Tom Haddon
+
+CRITICAL=0
+NOTACTIVE=''
+LOGFILE=/var/log/nagios/check_haproxy.log
+AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
+
+for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'});
+do
+ output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK')
+ if [ $? != 0 ]; then
+ date >> $LOGFILE
+ echo $output >> $LOGFILE
+ /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1
+ CRITICAL=1
+ NOTACTIVE="${NOTACTIVE} $appserver"
+ fi
+done
+
+if [ $CRITICAL = 1 ]; then
+ echo "CRITICAL:${NOTACTIVE}"
+ exit 2
+fi
+
+echo "OK: All haproxy instances looking good"
+exit 0
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
new file mode 100755
index 0000000..3ebb532
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+#--------------------------------------------
+# This file is managed by Juju
+#--------------------------------------------
+#
+# Copyright 2009,2012 Canonical Ltd.
+# Author: Tom Haddon
+
+# These should be config options at some stage
+CURRQthrsh=0
+MAXQthrsh=100
+
+AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
+
+HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
+
+for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}')
+do
+ CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3)
+ MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4)
+
+ if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then
+ echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ"
+ exit 2
+ fi
+done
+
+echo "OK: All haproxy queue depths looking good"
+exit 0
+
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/ip.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/ip.py
new file mode 100644
index 0000000..3dca6dc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/ip.py
@@ -0,0 +1,151 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from charmhelpers.core.hookenv import (
+ config,
+ unit_get,
+ service_name,
+)
+from charmhelpers.contrib.network.ip import (
+ get_address_in_network,
+ is_address_in_network,
+ is_ipv6,
+ get_ipv6_addr,
+)
+from charmhelpers.contrib.hahelpers.cluster import is_clustered
+
+PUBLIC = 'public'
+INTERNAL = 'int'
+ADMIN = 'admin'
+
+ADDRESS_MAP = {
+ PUBLIC: {
+ 'config': 'os-public-network',
+ 'fallback': 'public-address',
+ 'override': 'os-public-hostname',
+ },
+ INTERNAL: {
+ 'config': 'os-internal-network',
+ 'fallback': 'private-address',
+ 'override': 'os-internal-hostname',
+ },
+ ADMIN: {
+ 'config': 'os-admin-network',
+ 'fallback': 'private-address',
+ 'override': 'os-admin-hostname',
+ }
+}
+
+
+def canonical_url(configs, endpoint_type=PUBLIC):
+ """Returns the correct HTTP URL to this host given the state of HTTPS
+ configuration, hacluster and charm configuration.
+
+ :param configs: OSTemplateRenderer config templating object to inspect
+ for a complete https context.
+ :param endpoint_type: str endpoint type to resolve.
+ :param returns: str base URL for services on the current service unit.
+ """
+ scheme = _get_scheme(configs)
+
+ address = resolve_address(endpoint_type)
+ if is_ipv6(address):
+ address = "[{}]".format(address)
+
+ return '%s://%s' % (scheme, address)
+
+
+def _get_scheme(configs):
+ """Returns the scheme to use for the url (either http or https)
+ depending upon whether https is in the configs value.
+
+ :param configs: OSTemplateRenderer config templating object to inspect
+ for a complete https context.
+ :returns: either 'http' or 'https' depending on whether https is
+ configured within the configs context.
+ """
+ scheme = 'http'
+ if configs and 'https' in configs.complete_contexts():
+ scheme = 'https'
+ return scheme
+
+
+def _get_address_override(endpoint_type=PUBLIC):
+ """Returns any address overrides that the user has defined based on the
+ endpoint type.
+
+ Note: this function allows for the service name to be inserted into the
+ address if the user specifies {service_name}.somehost.org.
+
+ :param endpoint_type: the type of endpoint to retrieve the override
+ value for.
+ :returns: any endpoint address or hostname that the user has overridden
+ or None if an override is not present.
+ """
+ override_key = ADDRESS_MAP[endpoint_type]['override']
+ addr_override = config(override_key)
+ if not addr_override:
+ return None
+ else:
+ return addr_override.format(service_name=service_name())
+
+
+def resolve_address(endpoint_type=PUBLIC):
+ """Return unit address depending on net config.
+
+ If unit is clustered with vip(s) and has net splits defined, return vip on
+ correct network. If clustered with no nets defined, return primary vip.
+
+ If not clustered, return unit address ensuring address is on configured net
+ split if one is configured.
+
+ :param endpoint_type: Network endpoing type
+ """
+ resolved_address = _get_address_override(endpoint_type)
+ if resolved_address:
+ return resolved_address
+
+ vips = config('vip')
+ if vips:
+ vips = vips.split()
+
+ net_type = ADDRESS_MAP[endpoint_type]['config']
+ net_addr = config(net_type)
+ net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
+ clustered = is_clustered()
+ if clustered:
+ if not net_addr:
+ # If no net-splits defined, we expect a single vip
+ resolved_address = vips[0]
+ else:
+ for vip in vips:
+ if is_address_in_network(net_addr, vip):
+ resolved_address = vip
+ break
+ else:
+ if config('prefer-ipv6'):
+ fallback_addr = get_ipv6_addr(exc_list=vips)[0]
+ else:
+ fallback_addr = unit_get(net_fallback)
+
+ resolved_address = get_address_in_network(net_addr, fallback_addr)
+
+ if resolved_address is None:
+ raise ValueError("Unable to resolve a suitable IP address based on "
+ "charm state and configuration. (net_type=%s, "
+ "clustered=%s)" % (net_type, clustered))
+
+ return resolved_address
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/neutron.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/neutron.py
new file mode 100644
index 0000000..55b2037
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/neutron.py
@@ -0,0 +1,356 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+# Various utilies for dealing with Neutron and the renaming from Quantum.
+
+import six
+from subprocess import check_output
+
+from charmhelpers.core.hookenv import (
+ config,
+ log,
+ ERROR,
+)
+
+from charmhelpers.contrib.openstack.utils import os_release
+
+
+def headers_package():
+ """Ensures correct linux-headers for running kernel are installed,
+ for building DKMS package"""
+ kver = check_output(['uname', '-r']).decode('UTF-8').strip()
+ return 'linux-headers-%s' % kver
+
+QUANTUM_CONF_DIR = '/etc/quantum'
+
+
+def kernel_version():
+ """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
+ kver = check_output(['uname', '-r']).decode('UTF-8').strip()
+ kver = kver.split('.')
+ return (int(kver[0]), int(kver[1]))
+
+
+def determine_dkms_package():
+ """ Determine which DKMS package should be used based on kernel version """
+ # NOTE: 3.13 kernels have support for GRE and VXLAN native
+ if kernel_version() >= (3, 13):
+ return []
+ else:
+ return ['openvswitch-datapath-dkms']
+
+
+# legacy
+
+
+def quantum_plugins():
+ from charmhelpers.contrib.openstack import context
+ return {
+ 'ovs': {
+ 'config': '/etc/quantum/plugins/openvswitch/'
+ 'ovs_quantum_plugin.ini',
+ 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
+ 'OVSQuantumPluginV2',
+ 'contexts': [
+ context.SharedDBContext(user=config('neutron-database-user'),
+ database=config('neutron-database'),
+ relation_prefix='neutron',
+ ssl_dir=QUANTUM_CONF_DIR)],
+ 'services': ['quantum-plugin-openvswitch-agent'],
+ 'packages': [[headers_package()] + determine_dkms_package(),
+ ['quantum-plugin-openvswitch-agent']],
+ 'server_packages': ['quantum-server',
+ 'quantum-plugin-openvswitch'],
+ 'server_services': ['quantum-server']
+ },
+ 'nvp': {
+ 'config': '/etc/quantum/plugins/nicira/nvp.ini',
+ 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
+ 'QuantumPlugin.NvpPluginV2',
+ 'contexts': [
+ context.SharedDBContext(user=config('neutron-database-user'),
+ database=config('neutron-database'),
+ relation_prefix='neutron',
+ ssl_dir=QUANTUM_CONF_DIR)],
+ 'services': [],
+ 'packages': [],
+ 'server_packages': ['quantum-server',
+ 'quantum-plugin-nicira'],
+ 'server_services': ['quantum-server']
+ }
+ }
+
+NEUTRON_CONF_DIR = '/etc/neutron'
+
+
+def neutron_plugins():
+ from charmhelpers.contrib.openstack import context
+ release = os_release('nova-common')
+ plugins = {
+ 'ovs': {
+ 'config': '/etc/neutron/plugins/openvswitch/'
+ 'ovs_neutron_plugin.ini',
+ 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
+ 'OVSNeutronPluginV2',
+ 'contexts': [
+ context.SharedDBContext(user=config('neutron-database-user'),
+ database=config('neutron-database'),
+ relation_prefix='neutron',
+ ssl_dir=NEUTRON_CONF_DIR)],
+ 'services': ['neutron-plugin-openvswitch-agent'],
+ 'packages': [[headers_package()] + determine_dkms_package(),
+ ['neutron-plugin-openvswitch-agent']],
+ 'server_packages': ['neutron-server',
+ 'neutron-plugin-openvswitch'],
+ 'server_services': ['neutron-server']
+ },
+ 'nvp': {
+ 'config': '/etc/neutron/plugins/nicira/nvp.ini',
+ 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
+ 'NeutronPlugin.NvpPluginV2',
+ 'contexts': [
+ context.SharedDBContext(user=config('neutron-database-user'),
+ database=config('neutron-database'),
+ relation_prefix='neutron',
+ ssl_dir=NEUTRON_CONF_DIR)],
+ 'services': [],
+ 'packages': [],
+ 'server_packages': ['neutron-server',
+ 'neutron-plugin-nicira'],
+ 'server_services': ['neutron-server']
+ },
+ 'nsx': {
+ 'config': '/etc/neutron/plugins/vmware/nsx.ini',
+ 'driver': 'vmware',
+ 'contexts': [
+ context.SharedDBContext(user=config('neutron-database-user'),
+ database=config('neutron-database'),
+ relation_prefix='neutron',
+ ssl_dir=NEUTRON_CONF_DIR)],
+ 'services': [],
+ 'packages': [],
+ 'server_packages': ['neutron-server',
+ 'neutron-plugin-vmware'],
+ 'server_services': ['neutron-server']
+ },
+ 'n1kv': {
+ 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
+ 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
+ 'contexts': [
+ context.SharedDBContext(user=config('neutron-database-user'),
+ database=config('neutron-database'),
+ relation_prefix='neutron',
+ ssl_dir=NEUTRON_CONF_DIR)],
+ 'services': [],
+ 'packages': [[headers_package()] + determine_dkms_package(),
+ ['neutron-plugin-cisco']],
+ 'server_packages': ['neutron-server',
+ 'neutron-plugin-cisco'],
+ 'server_services': ['neutron-server']
+ },
+ 'Calico': {
+ 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
+ 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
+ 'contexts': [
+ context.SharedDBContext(user=config('neutron-database-user'),
+ database=config('neutron-database'),
+ relation_prefix='neutron',
+ ssl_dir=NEUTRON_CONF_DIR)],
+ 'services': ['calico-felix',
+ 'bird',
+ 'neutron-dhcp-agent',
+ 'nova-api-metadata',
+ 'etcd'],
+ 'packages': [[headers_package()] + determine_dkms_package(),
+ ['calico-compute',
+ 'bird',
+ 'neutron-dhcp-agent',
+ 'nova-api-metadata',
+ 'etcd']],
+ 'server_packages': ['neutron-server', 'calico-control', 'etcd'],
+ 'server_services': ['neutron-server', 'etcd']
+ },
+ 'vsp': {
+ 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
+ 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
+ 'contexts': [
+ context.SharedDBContext(user=config('neutron-database-user'),
+ database=config('neutron-database'),
+ relation_prefix='neutron',
+ ssl_dir=NEUTRON_CONF_DIR)],
+ 'services': [],
+ 'packages': [],
+ 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
+ 'server_services': ['neutron-server']
+ },
+ 'plumgrid': {
+ 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
+ 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
+ 'contexts': [
+ context.SharedDBContext(user=config('database-user'),
+ database=config('database'),
+ ssl_dir=NEUTRON_CONF_DIR)],
+ 'services': [],
+ 'packages': [['plumgrid-lxc'],
+ ['iovisor-dkms']],
+ 'server_packages': ['neutron-server',
+ 'neutron-plugin-plumgrid'],
+ 'server_services': ['neutron-server']
+ }
+ }
+ if release >= 'icehouse':
+ # NOTE: patch in ml2 plugin for icehouse onwards
+ plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
+ plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
+ plugins['ovs']['server_packages'] = ['neutron-server',
+ 'neutron-plugin-ml2']
+ # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
+ plugins['nvp'] = plugins['nsx']
+ return plugins
+
+
+def neutron_plugin_attribute(plugin, attr, net_manager=None):
+ manager = net_manager or network_manager()
+ if manager == 'quantum':
+ plugins = quantum_plugins()
+ elif manager == 'neutron':
+ plugins = neutron_plugins()
+ else:
+ log("Network manager '%s' does not support plugins." % (manager),
+ level=ERROR)
+ raise Exception
+
+ try:
+ _plugin = plugins[plugin]
+ except KeyError:
+ log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
+ raise Exception
+
+ try:
+ return _plugin[attr]
+ except KeyError:
+ return None
+
+
+def network_manager():
+ '''
+ Deals with the renaming of Quantum to Neutron in H and any situations
+ that require compatability (eg, deploying H with network-manager=quantum,
+ upgrading from G).
+ '''
+ release = os_release('nova-common')
+ manager = config('network-manager').lower()
+
+ if manager not in ['quantum', 'neutron']:
+ return manager
+
+ if release in ['essex']:
+ # E does not support neutron
+ log('Neutron networking not supported in Essex.', level=ERROR)
+ raise Exception
+ elif release in ['folsom', 'grizzly']:
+ # neutron is named quantum in F and G
+ return 'quantum'
+ else:
+ # ensure accurate naming for all releases post-H
+ return 'neutron'
+
+
+def parse_mappings(mappings, key_rvalue=False):
+ """By default mappings are lvalue keyed.
+
+ If key_rvalue is True, the mapping will be reversed to allow multiple
+ configs for the same lvalue.
+ """
+ parsed = {}
+ if mappings:
+ mappings = mappings.split()
+ for m in mappings:
+ p = m.partition(':')
+
+ if key_rvalue:
+ key_index = 2
+ val_index = 0
+ # if there is no rvalue skip to next
+ if not p[1]:
+ continue
+ else:
+ key_index = 0
+ val_index = 2
+
+ key = p[key_index].strip()
+ parsed[key] = p[val_index].strip()
+
+ return parsed
+
+
+def parse_bridge_mappings(mappings):
+ """Parse bridge mappings.
+
+ Mappings must be a space-delimited list of provider:bridge mappings.
+
+ Returns dict of the form {provider:bridge}.
+ """
+ return parse_mappings(mappings)
+
+
+def parse_data_port_mappings(mappings, default_bridge='br-data'):
+ """Parse data port mappings.
+
+ Mappings must be a space-delimited list of port:bridge mappings.
+
+ Returns dict of the form {port:bridge} where port may be an mac address or
+ interface name.
+ """
+
+ # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
+ # proposed for <port> since it may be a mac address which will differ
+ # across units this allowing first-known-good to be chosen.
+ _mappings = parse_mappings(mappings, key_rvalue=True)
+ if not _mappings or list(_mappings.values()) == ['']:
+ if not mappings:
+ return {}
+
+ # For backwards-compatibility we need to support port-only provided in
+ # config.
+ _mappings = {mappings.split()[0]: default_bridge}
+
+ ports = _mappings.keys()
+ if len(set(ports)) != len(ports):
+ raise Exception("It is not allowed to have the same port configured "
+ "on more than one bridge")
+
+ return _mappings
+
+
+def parse_vlan_range_mappings(mappings):
+ """Parse vlan range mappings.
+
+ Mappings must be a space-delimited list of provider:start:end mappings.
+
+ The start:end range is optional and may be omitted.
+
+ Returns dict of the form {provider: (start, end)}.
+ """
+ _mappings = parse_mappings(mappings)
+ if not _mappings:
+ return {}
+
+ mappings = {}
+ for p, r in six.iteritems(_mappings):
+ mappings[p] = tuple(r.split(':'))
+
+ return mappings
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/__init__.py
new file mode 100644
index 0000000..7587679
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/__init__.py
@@ -0,0 +1,18 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+# dummy __init__.py to fool syncer into thinking this is a syncable python
+# module
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/ceph.conf
new file mode 100644
index 0000000..b99851c
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/ceph.conf
@@ -0,0 +1,15 @@
+###############################################################################
+# [ WARNING ]
+# cinder configuration file maintained by Juju
+# local changes may be overwritten.
+###############################################################################
+[global]
+{% if auth -%}
+auth_supported = {{ auth }}
+keyring = /etc/ceph/$cluster.$name.keyring
+mon host = {{ mon_hosts }}
+{% endif -%}
+log to syslog = {{ use_syslog }}
+err to syslog = {{ use_syslog }}
+clog to syslog = {{ use_syslog }}
+
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/git.upstart b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/git.upstart
new file mode 100644
index 0000000..4bed404
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/git.upstart
@@ -0,0 +1,17 @@
+description "{{ service_description }}"
+author "Juju {{ service_name }} Charm <juju@localhost>"
+
+start on runlevel [2345]
+stop on runlevel [!2345]
+
+respawn
+
+exec start-stop-daemon --start --chuid {{ user_name }} \
+ --chdir {{ start_dir }} --name {{ process_name }} \
+ --exec {{ executable_name }} -- \
+ {% for config_file in config_files -%}
+ --config-file={{ config_file }} \
+ {% endfor -%}
+ {% if log_file -%}
+ --log-file={{ log_file }}
+ {% endif -%}
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
new file mode 100644
index 0000000..ad875f1
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
@@ -0,0 +1,58 @@
+global
+ log {{ local_host }} local0
+ log {{ local_host }} local1 notice
+ maxconn 20000
+ user haproxy
+ group haproxy
+ spread-checks 0
+
+defaults
+ log global
+ mode tcp
+ option tcplog
+ option dontlognull
+ retries 3
+ timeout queue 1000
+ timeout connect 1000
+{% if haproxy_client_timeout -%}
+ timeout client {{ haproxy_client_timeout }}
+{% else -%}
+ timeout client 30000
+{% endif -%}
+
+{% if haproxy_server_timeout -%}
+ timeout server {{ haproxy_server_timeout }}
+{% else -%}
+ timeout server 30000
+{% endif -%}
+
+listen stats {{ stat_port }}
+ mode http
+ stats enable
+ stats hide-version
+ stats realm Haproxy\ Statistics
+ stats uri /
+ stats auth admin:password
+
+{% if frontends -%}
+{% for service, ports in service_ports.items() -%}
+frontend tcp-in_{{ service }}
+ bind *:{{ ports[0] }}
+ {% if ipv6 -%}
+ bind :::{{ ports[0] }}
+ {% endif -%}
+ {% for frontend in frontends -%}
+ acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
+ use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
+ {% endfor -%}
+ default_backend {{ service }}_{{ default_backend }}
+
+{% for frontend in frontends -%}
+backend {{ service }}_{{ frontend }}
+ balance leastconn
+ {% for unit, address in frontends[frontend]['backends'].items() -%}
+ server {{ unit }} {{ address }}:{{ ports[1] }} check
+ {% endfor %}
+{% endfor -%}
+{% endfor -%}
+{% endif -%}
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend
new file mode 100644
index 0000000..ce28fa3
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend
@@ -0,0 +1,24 @@
+{% if endpoints -%}
+{% for ext_port in ext_ports -%}
+Listen {{ ext_port }}
+{% endfor -%}
+{% for address, endpoint, ext, int in endpoints -%}
+<VirtualHost {{ address }}:{{ ext }}>
+ ServerName {{ endpoint }}
+ SSLEngine on
+ SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
+ SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
+ ProxyPass / http://localhost:{{ int }}/
+ ProxyPassReverse / http://localhost:{{ int }}/
+ ProxyPreserveHost on
+</VirtualHost>
+{% endfor -%}
+<Proxy *>
+ Order deny,allow
+ Allow from all
+</Proxy>
+<Location />
+ Order allow,deny
+ Allow from all
+</Location>
+{% endif -%}
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf
new file mode 100644
index 0000000..ce28fa3
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf
@@ -0,0 +1,24 @@
+{% if endpoints -%}
+{% for ext_port in ext_ports -%}
+Listen {{ ext_port }}
+{% endfor -%}
+{% for address, endpoint, ext, int in endpoints -%}
+<VirtualHost {{ address }}:{{ ext }}>
+ ServerName {{ endpoint }}
+ SSLEngine on
+ SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
+ SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
+ ProxyPass / http://localhost:{{ int }}/
+ ProxyPassReverse / http://localhost:{{ int }}/
+ ProxyPreserveHost on
+</VirtualHost>
+{% endfor -%}
+<Proxy *>
+ Order deny,allow
+ Allow from all
+</Proxy>
+<Location />
+ Order allow,deny
+ Allow from all
+</Location>
+{% endif -%}
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
new file mode 100644
index 0000000..2a37edd
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
@@ -0,0 +1,9 @@
+{% if auth_host -%}
+[keystone_authtoken]
+identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }}
+auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }}
+admin_tenant_name = {{ admin_tenant_name }}
+admin_user = {{ admin_user }}
+admin_password = {{ admin_password }}
+signing_dir = {{ signing_dir }}
+{% endif -%}
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo
new file mode 100644
index 0000000..b444c9c
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo
@@ -0,0 +1,22 @@
+{% if rabbitmq_host or rabbitmq_hosts -%}
+[oslo_messaging_rabbit]
+rabbit_userid = {{ rabbitmq_user }}
+rabbit_virtual_host = {{ rabbitmq_virtual_host }}
+rabbit_password = {{ rabbitmq_password }}
+{% if rabbitmq_hosts -%}
+rabbit_hosts = {{ rabbitmq_hosts }}
+{% if rabbitmq_ha_queues -%}
+rabbit_ha_queues = True
+rabbit_durable_queues = False
+{% endif -%}
+{% else -%}
+rabbit_host = {{ rabbitmq_host }}
+{% endif -%}
+{% if rabbit_ssl_port -%}
+rabbit_use_ssl = True
+rabbit_port = {{ rabbit_ssl_port }}
+{% if rabbit_ssl_ca -%}
+kombu_ssl_ca_certs = {{ rabbit_ssl_ca }}
+{% endif -%}
+{% endif -%}
+{% endif -%}
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-zeromq b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-zeromq
new file mode 100644
index 0000000..95f1a76
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-zeromq
@@ -0,0 +1,14 @@
+{% if zmq_host -%}
+# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }})
+rpc_backend = zmq
+rpc_zmq_host = {{ zmq_host }}
+{% if zmq_redis_address -%}
+rpc_zmq_matchmaker = redis
+matchmaker_heartbeat_freq = 15
+matchmaker_heartbeat_ttl = 30
+[matchmaker_redis]
+host = {{ zmq_redis_address }}
+{% else -%}
+rpc_zmq_matchmaker = ring
+{% endif -%}
+{% endif -%}
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templating.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templating.py
new file mode 100644
index 0000000..e5e3cb1
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templating.py
@@ -0,0 +1,323 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+import six
+
+from charmhelpers.fetch import apt_install, apt_update
+from charmhelpers.core.hookenv import (
+ log,
+ ERROR,
+ INFO
+)
+from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
+
+try:
+ from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
+except ImportError:
+ apt_update(fatal=True)
+ apt_install('python-jinja2', fatal=True)
+ from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
+
+
+class OSConfigException(Exception):
+ pass
+
+
+def get_loader(templates_dir, os_release):
+ """
+ Create a jinja2.ChoiceLoader containing template dirs up to
+ and including os_release. If directory template directory
+ is missing at templates_dir, it will be omitted from the loader.
+ templates_dir is added to the bottom of the search list as a base
+ loading dir.
+
+ A charm may also ship a templates dir with this module
+ and it will be appended to the bottom of the search list, eg::
+
+ hooks/charmhelpers/contrib/openstack/templates
+
+ :param templates_dir (str): Base template directory containing release
+ sub-directories.
+ :param os_release (str): OpenStack release codename to construct template
+ loader.
+ :returns: jinja2.ChoiceLoader constructed with a list of
+ jinja2.FilesystemLoaders, ordered in descending
+ order by OpenStack release.
+ """
+ tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
+ for rel in six.itervalues(OPENSTACK_CODENAMES)]
+
+ if not os.path.isdir(templates_dir):
+ log('Templates directory not found @ %s.' % templates_dir,
+ level=ERROR)
+ raise OSConfigException
+
+ # the bottom contains tempaltes_dir and possibly a common templates dir
+ # shipped with the helper.
+ loaders = [FileSystemLoader(templates_dir)]
+ helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
+ if os.path.isdir(helper_templates):
+ loaders.append(FileSystemLoader(helper_templates))
+
+ for rel, tmpl_dir in tmpl_dirs:
+ if os.path.isdir(tmpl_dir):
+ loaders.insert(0, FileSystemLoader(tmpl_dir))
+ if rel == os_release:
+ break
+ log('Creating choice loader with dirs: %s' %
+ [l.searchpath for l in loaders], level=INFO)
+ return ChoiceLoader(loaders)
+
+
+class OSConfigTemplate(object):
+ """
+ Associates a config file template with a list of context generators.
+ Responsible for constructing a template context based on those generators.
+ """
+ def __init__(self, config_file, contexts):
+ self.config_file = config_file
+
+ if hasattr(contexts, '__call__'):
+ self.contexts = [contexts]
+ else:
+ self.contexts = contexts
+
+ self._complete_contexts = []
+
+ def context(self):
+ ctxt = {}
+ for context in self.contexts:
+ _ctxt = context()
+ if _ctxt:
+ ctxt.update(_ctxt)
+ # track interfaces for every complete context.
+ [self._complete_contexts.append(interface)
+ for interface in context.interfaces
+ if interface not in self._complete_contexts]
+ return ctxt
+
+ def complete_contexts(self):
+ '''
+ Return a list of interfaces that have satisfied contexts.
+ '''
+ if self._complete_contexts:
+ return self._complete_contexts
+ self.context()
+ return self._complete_contexts
+
+
+class OSConfigRenderer(object):
+ """
+ This class provides a common templating system to be used by OpenStack
+ charms. It is intended to help charms share common code and templates,
+ and ease the burden of managing config templates across multiple OpenStack
+ releases.
+
+ Basic usage::
+
+ # import some common context generates from charmhelpers
+ from charmhelpers.contrib.openstack import context
+
+ # Create a renderer object for a specific OS release.
+ configs = OSConfigRenderer(templates_dir='/tmp/templates',
+ openstack_release='folsom')
+ # register some config files with context generators.
+ configs.register(config_file='/etc/nova/nova.conf',
+ contexts=[context.SharedDBContext(),
+ context.AMQPContext()])
+ configs.register(config_file='/etc/nova/api-paste.ini',
+ contexts=[context.IdentityServiceContext()])
+ configs.register(config_file='/etc/haproxy/haproxy.conf',
+ contexts=[context.HAProxyContext()])
+ # write out a single config
+ configs.write('/etc/nova/nova.conf')
+ # write out all registered configs
+ configs.write_all()
+
+ **OpenStack Releases and template loading**
+
+ When the object is instantiated, it is associated with a specific OS
+ release. This dictates how the template loader will be constructed.
+
+ The constructed loader attempts to load the template from several places
+ in the following order:
+ - from the most recent OS release-specific template dir (if one exists)
+ - the base templates_dir
+ - a template directory shipped in the charm with this helper file.
+
+ For the example above, '/tmp/templates' contains the following structure::
+
+ /tmp/templates/nova.conf
+ /tmp/templates/api-paste.ini
+ /tmp/templates/grizzly/api-paste.ini
+ /tmp/templates/havana/api-paste.ini
+
+ Since it was registered with the grizzly release, it first seraches
+ the grizzly directory for nova.conf, then the templates dir.
+
+ When writing api-paste.ini, it will find the template in the grizzly
+ directory.
+
+ If the object were created with folsom, it would fall back to the
+ base templates dir for its api-paste.ini template.
+
+ This system should help manage changes in config files through
+ openstack releases, allowing charms to fall back to the most recently
+ updated config template for a given release
+
+ The haproxy.conf, since it is not shipped in the templates dir, will
+ be loaded from the module directory's template directory, eg
+ $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
+ us to ship common templates (haproxy, apache) with the helpers.
+
+ **Context generators**
+
+ Context generators are used to generate template contexts during hook
+ execution. Doing so may require inspecting service relations, charm
+ config, etc. When registered, a config file is associated with a list
+ of generators. When a template is rendered and written, all context
+ generates are called in a chain to generate the context dictionary
+ passed to the jinja2 template. See context.py for more info.
+ """
+ def __init__(self, templates_dir, openstack_release):
+ if not os.path.isdir(templates_dir):
+ log('Could not locate templates dir %s' % templates_dir,
+ level=ERROR)
+ raise OSConfigException
+
+ self.templates_dir = templates_dir
+ self.openstack_release = openstack_release
+ self.templates = {}
+ self._tmpl_env = None
+
+ if None in [Environment, ChoiceLoader, FileSystemLoader]:
+ # if this code is running, the object is created pre-install hook.
+ # jinja2 shouldn't get touched until the module is reloaded on next
+ # hook execution, with proper jinja2 bits successfully imported.
+ apt_install('python-jinja2')
+
+ def register(self, config_file, contexts):
+ """
+ Register a config file with a list of context generators to be called
+ during rendering.
+ """
+ self.templates[config_file] = OSConfigTemplate(config_file=config_file,
+ contexts=contexts)
+ log('Registered config file: %s' % config_file, level=INFO)
+
+ def _get_tmpl_env(self):
+ if not self._tmpl_env:
+ loader = get_loader(self.templates_dir, self.openstack_release)
+ self._tmpl_env = Environment(loader=loader)
+
+ def _get_template(self, template):
+ self._get_tmpl_env()
+ template = self._tmpl_env.get_template(template)
+ log('Loaded template from %s' % template.filename, level=INFO)
+ return template
+
+ def render(self, config_file):
+ if config_file not in self.templates:
+ log('Config not registered: %s' % config_file, level=ERROR)
+ raise OSConfigException
+ ctxt = self.templates[config_file].context()
+
+ _tmpl = os.path.basename(config_file)
+ try:
+ template = self._get_template(_tmpl)
+ except exceptions.TemplateNotFound:
+ # if no template is found with basename, try looking for it
+ # using a munged full path, eg:
+ # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
+ _tmpl = '_'.join(config_file.split('/')[1:])
+ try:
+ template = self._get_template(_tmpl)
+ except exceptions.TemplateNotFound as e:
+ log('Could not load template from %s by %s or %s.' %
+ (self.templates_dir, os.path.basename(config_file), _tmpl),
+ level=ERROR)
+ raise e
+
+ log('Rendering from template: %s' % _tmpl, level=INFO)
+ return template.render(ctxt)
+
+ def write(self, config_file):
+ """
+ Write a single config file, raises if config file is not registered.
+ """
+ if config_file not in self.templates:
+ log('Config not registered: %s' % config_file, level=ERROR)
+ raise OSConfigException
+
+ _out = self.render(config_file)
+
+ with open(config_file, 'wb') as out:
+ out.write(_out)
+
+ log('Wrote template %s.' % config_file, level=INFO)
+
+ def write_all(self):
+ """
+ Write out all registered config files.
+ """
+ [self.write(k) for k in six.iterkeys(self.templates)]
+
+ def set_release(self, openstack_release):
+ """
+ Resets the template environment and generates a new template loader
+ based on a the new openstack release.
+ """
+ self._tmpl_env = None
+ self.openstack_release = openstack_release
+ self._get_tmpl_env()
+
+ def complete_contexts(self):
+ '''
+ Returns a list of context interfaces that yield a complete context.
+ '''
+ interfaces = []
+ [interfaces.extend(i.complete_contexts())
+ for i in six.itervalues(self.templates)]
+ return interfaces
+
+ def get_incomplete_context_data(self, interfaces):
+ '''
+ Return dictionary of relation status of interfaces and any missing
+ required context data. Example:
+ {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
+ 'zeromq-configuration': {'related': False}}
+ '''
+ incomplete_context_data = {}
+
+ for i in six.itervalues(self.templates):
+ for context in i.contexts:
+ for interface in interfaces:
+ related = False
+ if interface in context.interfaces:
+ related = context.get_related()
+ missing_data = context.missing_data
+ if missing_data:
+ incomplete_context_data[interface] = {'missing_data': missing_data}
+ if related:
+ if incomplete_context_data.get(interface):
+ incomplete_context_data[interface].update({'related': True})
+ else:
+ incomplete_context_data[interface] = {'related': True}
+ else:
+ incomplete_context_data[interface] = {'related': False}
+ return incomplete_context_data
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/utils.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/utils.py
new file mode 100644
index 0000000..24b998d
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/utils.py
@@ -0,0 +1,926 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+# Common python helper functions used for OpenStack charms.
+from collections import OrderedDict
+from functools import wraps
+
+import subprocess
+import json
+import os
+import sys
+import re
+
+import six
+import yaml
+
+from charmhelpers.contrib.network import ip
+
+from charmhelpers.core import (
+ unitdata,
+)
+
+from charmhelpers.core.hookenv import (
+ config,
+ log as juju_log,
+ charm_dir,
+ INFO,
+ relation_ids,
+ relation_set,
+ status_set,
+ hook_name
+)
+
+from charmhelpers.contrib.storage.linux.lvm import (
+ deactivate_lvm_volume_group,
+ is_lvm_physical_volume,
+ remove_lvm_physical_volume,
+)
+
+from charmhelpers.contrib.network.ip import (
+ get_ipv6_addr
+)
+
+from charmhelpers.contrib.python.packages import (
+ pip_create_virtualenv,
+ pip_install,
+)
+
+from charmhelpers.core.host import lsb_release, mounts, umount
+from charmhelpers.fetch import apt_install, apt_cache, install_remote
+from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
+from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
+
+CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
+CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
+
+DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
+ 'restricted main multiverse universe')
+
+UBUNTU_OPENSTACK_RELEASE = OrderedDict([
+ ('oneiric', 'diablo'),
+ ('precise', 'essex'),
+ ('quantal', 'folsom'),
+ ('raring', 'grizzly'),
+ ('saucy', 'havana'),
+ ('trusty', 'icehouse'),
+ ('utopic', 'juno'),
+ ('vivid', 'kilo'),
+ ('wily', 'liberty'),
+])
+
+
+OPENSTACK_CODENAMES = OrderedDict([
+ ('2011.2', 'diablo'),
+ ('2012.1', 'essex'),
+ ('2012.2', 'folsom'),
+ ('2013.1', 'grizzly'),
+ ('2013.2', 'havana'),
+ ('2014.1', 'icehouse'),
+ ('2014.2', 'juno'),
+ ('2015.1', 'kilo'),
+ ('2015.2', 'liberty'),
+])
+
+# The ugly duckling
+SWIFT_CODENAMES = OrderedDict([
+ ('1.4.3', 'diablo'),
+ ('1.4.8', 'essex'),
+ ('1.7.4', 'folsom'),
+ ('1.8.0', 'grizzly'),
+ ('1.7.7', 'grizzly'),
+ ('1.7.6', 'grizzly'),
+ ('1.10.0', 'havana'),
+ ('1.9.1', 'havana'),
+ ('1.9.0', 'havana'),
+ ('1.13.1', 'icehouse'),
+ ('1.13.0', 'icehouse'),
+ ('1.12.0', 'icehouse'),
+ ('1.11.0', 'icehouse'),
+ ('2.0.0', 'juno'),
+ ('2.1.0', 'juno'),
+ ('2.2.0', 'juno'),
+ ('2.2.1', 'kilo'),
+ ('2.2.2', 'kilo'),
+ ('2.3.0', 'liberty'),
+ ('2.4.0', 'liberty'),
+])
+
+# >= Liberty version->codename mapping
+PACKAGE_CODENAMES = {
+ 'nova-common': OrderedDict([
+ ('12.0.0', 'liberty'),
+ ]),
+ 'neutron-common': OrderedDict([
+ ('7.0.0', 'liberty'),
+ ]),
+ 'cinder-common': OrderedDict([
+ ('7.0.0', 'liberty'),
+ ]),
+ 'keystone': OrderedDict([
+ ('8.0.0', 'liberty'),
+ ]),
+ 'horizon-common': OrderedDict([
+ ('8.0.0', 'liberty'),
+ ]),
+ 'ceilometer-common': OrderedDict([
+ ('5.0.0', 'liberty'),
+ ]),
+ 'heat-common': OrderedDict([
+ ('5.0.0', 'liberty'),
+ ]),
+ 'glance-common': OrderedDict([
+ ('11.0.0', 'liberty'),
+ ]),
+ 'openstack-dashboard': OrderedDict([
+ ('8.0.0', 'liberty'),
+ ]),
+}
+
+DEFAULT_LOOPBACK_SIZE = '5G'
+
+
+def error_out(msg):
+ juju_log("FATAL ERROR: %s" % msg, level='ERROR')
+ sys.exit(1)
+
+
+def get_os_codename_install_source(src):
+ '''Derive OpenStack release codename from a given installation source.'''
+ ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
+ rel = ''
+ if src is None:
+ return rel
+ if src in ['distro', 'distro-proposed']:
+ try:
+ rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
+ except KeyError:
+ e = 'Could not derive openstack release for '\
+ 'this Ubuntu release: %s' % ubuntu_rel
+ error_out(e)
+ return rel
+
+ if src.startswith('cloud:'):
+ ca_rel = src.split(':')[1]
+ ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
+ return ca_rel
+
+ # Best guess match based on deb string provided
+ if src.startswith('deb') or src.startswith('ppa'):
+ for k, v in six.iteritems(OPENSTACK_CODENAMES):
+ if v in src:
+ return v
+
+
+def get_os_version_install_source(src):
+ codename = get_os_codename_install_source(src)
+ return get_os_version_codename(codename)
+
+
+def get_os_codename_version(vers):
+ '''Determine OpenStack codename from version number.'''
+ try:
+ return OPENSTACK_CODENAMES[vers]
+ except KeyError:
+ e = 'Could not determine OpenStack codename for version %s' % vers
+ error_out(e)
+
+
+def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
+ '''Determine OpenStack version number from codename.'''
+ for k, v in six.iteritems(version_map):
+ if v == codename:
+ return k
+ e = 'Could not derive OpenStack version for '\
+ 'codename: %s' % codename
+ error_out(e)
+
+
+def get_os_codename_package(package, fatal=True):
+ '''Derive OpenStack release codename from an installed package.'''
+ import apt_pkg as apt
+
+ cache = apt_cache()
+
+ try:
+ pkg = cache[package]
+ except:
+ if not fatal:
+ return None
+ # the package is unknown to the current apt cache.
+ e = 'Could not determine version of package with no installation '\
+ 'candidate: %s' % package
+ error_out(e)
+
+ if not pkg.current_ver:
+ if not fatal:
+ return None
+ # package is known, but no version is currently installed.
+ e = 'Could not determine version of uninstalled package: %s' % package
+ error_out(e)
+
+ vers = apt.upstream_version(pkg.current_ver.ver_str)
+ match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
+ if match:
+ vers = match.group(0)
+
+ # >= Liberty independent project versions
+ if (package in PACKAGE_CODENAMES and
+ vers in PACKAGE_CODENAMES[package]):
+ return PACKAGE_CODENAMES[package][vers]
+ else:
+ # < Liberty co-ordinated project versions
+ try:
+ if 'swift' in pkg.name:
+ swift_vers = vers[:5]
+ if swift_vers not in SWIFT_CODENAMES:
+ # Deal with 1.10.0 upward
+ swift_vers = vers[:6]
+ return SWIFT_CODENAMES[swift_vers]
+ else:
+ vers = vers[:6]
+ return OPENSTACK_CODENAMES[vers]
+ except KeyError:
+ if not fatal:
+ return None
+ e = 'Could not determine OpenStack codename for version %s' % vers
+ error_out(e)
+
+
+def get_os_version_package(pkg, fatal=True):
+ '''Derive OpenStack version number from an installed package.'''
+ codename = get_os_codename_package(pkg, fatal=fatal)
+
+ if not codename:
+ return None
+
+ if 'swift' in pkg:
+ vers_map = SWIFT_CODENAMES
+ else:
+ vers_map = OPENSTACK_CODENAMES
+
+ for version, cname in six.iteritems(vers_map):
+ if cname == codename:
+ return version
+ # e = "Could not determine OpenStack version for package: %s" % pkg
+ # error_out(e)
+
+
+os_rel = None
+
+
+def os_release(package, base='essex'):
+ '''
+ Returns OpenStack release codename from a cached global.
+ If the codename can not be determined from either an installed package or
+ the installation source, the earliest release supported by the charm should
+ be returned.
+ '''
+ global os_rel
+ if os_rel:
+ return os_rel
+ os_rel = (get_os_codename_package(package, fatal=False) or
+ get_os_codename_install_source(config('openstack-origin')) or
+ base)
+ return os_rel
+
+
+def import_key(keyid):
+ cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
+ "--recv-keys %s" % keyid
+ try:
+ subprocess.check_call(cmd.split(' '))
+ except subprocess.CalledProcessError:
+ error_out("Error importing repo key %s" % keyid)
+
+
+def configure_installation_source(rel):
+ '''Configure apt installation source.'''
+ if rel == 'distro':
+ return
+ elif rel == 'distro-proposed':
+ ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
+ with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
+ f.write(DISTRO_PROPOSED % ubuntu_rel)
+ elif rel[:4] == "ppa:":
+ src = rel
+ subprocess.check_call(["add-apt-repository", "-y", src])
+ elif rel[:3] == "deb":
+ l = len(rel.split('|'))
+ if l == 2:
+ src, key = rel.split('|')
+ juju_log("Importing PPA key from keyserver for %s" % src)
+ import_key(key)
+ elif l == 1:
+ src = rel
+ with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
+ f.write(src)
+ elif rel[:6] == 'cloud:':
+ ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
+ rel = rel.split(':')[1]
+ u_rel = rel.split('-')[0]
+ ca_rel = rel.split('-')[1]
+
+ if u_rel != ubuntu_rel:
+ e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
+ 'version (%s)' % (ca_rel, ubuntu_rel)
+ error_out(e)
+
+ if 'staging' in ca_rel:
+ # staging is just a regular PPA.
+ os_rel = ca_rel.split('/')[0]
+ ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
+ cmd = 'add-apt-repository -y %s' % ppa
+ subprocess.check_call(cmd.split(' '))
+ return
+
+ # map charm config options to actual archive pockets.
+ pockets = {
+ 'folsom': 'precise-updates/folsom',
+ 'folsom/updates': 'precise-updates/folsom',
+ 'folsom/proposed': 'precise-proposed/folsom',
+ 'grizzly': 'precise-updates/grizzly',
+ 'grizzly/updates': 'precise-updates/grizzly',
+ 'grizzly/proposed': 'precise-proposed/grizzly',
+ 'havana': 'precise-updates/havana',
+ 'havana/updates': 'precise-updates/havana',
+ 'havana/proposed': 'precise-proposed/havana',
+ 'icehouse': 'precise-updates/icehouse',
+ 'icehouse/updates': 'precise-updates/icehouse',
+ 'icehouse/proposed': 'precise-proposed/icehouse',
+ 'juno': 'trusty-updates/juno',
+ 'juno/updates': 'trusty-updates/juno',
+ 'juno/proposed': 'trusty-proposed/juno',
+ 'kilo': 'trusty-updates/kilo',
+ 'kilo/updates': 'trusty-updates/kilo',
+ 'kilo/proposed': 'trusty-proposed/kilo',
+ 'liberty': 'trusty-updates/liberty',
+ 'liberty/updates': 'trusty-updates/liberty',
+ 'liberty/proposed': 'trusty-proposed/liberty',
+ 'mitaka': 'trusty-updates/mitaka',
+ 'mitaka/updates': 'trusty-updates/mitaka',
+ 'mitaka/proposed': 'trusty-proposed/mitaka',
+ }
+
+ try:
+ pocket = pockets[ca_rel]
+ except KeyError:
+ e = 'Invalid Cloud Archive release specified: %s' % rel
+ error_out(e)
+
+ src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
+ apt_install('ubuntu-cloud-keyring', fatal=True)
+
+ with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
+ f.write(src)
+ else:
+ error_out("Invalid openstack-release specified: %s" % rel)
+
+
+def config_value_changed(option):
+ """
+ Determine if config value changed since last call to this function.
+ """
+ hook_data = unitdata.HookData()
+ with hook_data():
+ db = unitdata.kv()
+ current = config(option)
+ saved = db.get(option)
+ db.set(option, current)
+ if saved is None:
+ return False
+ return current != saved
+
+
+def save_script_rc(script_path="scripts/scriptrc", **env_vars):
+ """
+ Write an rc file in the charm-delivered directory containing
+ exported environment variables provided by env_vars. Any charm scripts run
+ outside the juju hook environment can source this scriptrc to obtain
+ updated config information necessary to perform health checks or
+ service changes.
+ """
+ juju_rc_path = "%s/%s" % (charm_dir(), script_path)
+ if not os.path.exists(os.path.dirname(juju_rc_path)):
+ os.mkdir(os.path.dirname(juju_rc_path))
+ with open(juju_rc_path, 'wb') as rc_script:
+ rc_script.write(
+ "#!/bin/bash\n")
+ [rc_script.write('export %s=%s\n' % (u, p))
+ for u, p in six.iteritems(env_vars) if u != "script_path"]
+
+
+def openstack_upgrade_available(package):
+ """
+ Determines if an OpenStack upgrade is available from installation
+ source, based on version of installed package.
+
+ :param package: str: Name of installed package.
+
+ :returns: bool: : Returns True if configured installation source offers
+ a newer version of package.
+
+ """
+
+ import apt_pkg as apt
+ src = config('openstack-origin')
+ cur_vers = get_os_version_package(package)
+ if "swift" in package:
+ codename = get_os_codename_install_source(src)
+ available_vers = get_os_version_codename(codename, SWIFT_CODENAMES)
+ else:
+ available_vers = get_os_version_install_source(src)
+ apt.init()
+ return apt.version_compare(available_vers, cur_vers) == 1
+
+
+def ensure_block_device(block_device):
+ '''
+ Confirm block_device, create as loopback if necessary.
+
+ :param block_device: str: Full path of block device to ensure.
+
+ :returns: str: Full path of ensured block device.
+ '''
+ _none = ['None', 'none', None]
+ if (block_device in _none):
+ error_out('prepare_storage(): Missing required input: block_device=%s.'
+ % block_device)
+
+ if block_device.startswith('/dev/'):
+ bdev = block_device
+ elif block_device.startswith('/'):
+ _bd = block_device.split('|')
+ if len(_bd) == 2:
+ bdev, size = _bd
+ else:
+ bdev = block_device
+ size = DEFAULT_LOOPBACK_SIZE
+ bdev = ensure_loopback_device(bdev, size)
+ else:
+ bdev = '/dev/%s' % block_device
+
+ if not is_block_device(bdev):
+ error_out('Failed to locate valid block device at %s' % bdev)
+
+ return bdev
+
+
+def clean_storage(block_device):
+ '''
+ Ensures a block device is clean. That is:
+ - unmounted
+ - any lvm volume groups are deactivated
+ - any lvm physical device signatures removed
+ - partition table wiped
+
+ :param block_device: str: Full path to block device to clean.
+ '''
+ for mp, d in mounts():
+ if d == block_device:
+ juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
+ (d, mp), level=INFO)
+ umount(mp, persist=True)
+
+ if is_lvm_physical_volume(block_device):
+ deactivate_lvm_volume_group(block_device)
+ remove_lvm_physical_volume(block_device)
+ else:
+ zap_disk(block_device)
+
+is_ip = ip.is_ip
+ns_query = ip.ns_query
+get_host_ip = ip.get_host_ip
+get_hostname = ip.get_hostname
+
+
+def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
+ mm_map = {}
+ if os.path.isfile(mm_file):
+ with open(mm_file, 'r') as f:
+ mm_map = json.load(f)
+ return mm_map
+
+
+def sync_db_with_multi_ipv6_addresses(database, database_user,
+ relation_prefix=None):
+ hosts = get_ipv6_addr(dynamic_only=False)
+
+ kwargs = {'database': database,
+ 'username': database_user,
+ 'hostname': json.dumps(hosts)}
+
+ if relation_prefix:
+ for key in list(kwargs.keys()):
+ kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
+ del kwargs[key]
+
+ for rid in relation_ids('shared-db'):
+ relation_set(relation_id=rid, **kwargs)
+
+
+def os_requires_version(ostack_release, pkg):
+ """
+ Decorator for hook to specify minimum supported release
+ """
+ def wrap(f):
+ @wraps(f)
+ def wrapped_f(*args):
+ if os_release(pkg) < ostack_release:
+ raise Exception("This hook is not supported on releases"
+ " before %s" % ostack_release)
+ f(*args)
+ return wrapped_f
+ return wrap
+
+
+def git_install_requested():
+ """
+ Returns true if openstack-origin-git is specified.
+ """
+ return config('openstack-origin-git') is not None
+
+
+requirements_dir = None
+
+
+def _git_yaml_load(projects_yaml):
+ """
+ Load the specified yaml into a dictionary.
+ """
+ if not projects_yaml:
+ return None
+
+ return yaml.load(projects_yaml)
+
+
+def git_clone_and_install(projects_yaml, core_project, depth=1):
+ """
+ Clone/install all specified OpenStack repositories.
+
+ The expected format of projects_yaml is:
+
+ repositories:
+ - {name: keystone,
+ repository: 'git://git.openstack.org/openstack/keystone.git',
+ branch: 'stable/icehouse'}
+ - {name: requirements,
+ repository: 'git://git.openstack.org/openstack/requirements.git',
+ branch: 'stable/icehouse'}
+
+ directory: /mnt/openstack-git
+ http_proxy: squid-proxy-url
+ https_proxy: squid-proxy-url
+
+ The directory, http_proxy, and https_proxy keys are optional.
+
+ """
+ global requirements_dir
+ parent_dir = '/mnt/openstack-git'
+ http_proxy = None
+
+ projects = _git_yaml_load(projects_yaml)
+ _git_validate_projects_yaml(projects, core_project)
+
+ old_environ = dict(os.environ)
+
+ if 'http_proxy' in projects.keys():
+ http_proxy = projects['http_proxy']
+ os.environ['http_proxy'] = projects['http_proxy']
+ if 'https_proxy' in projects.keys():
+ os.environ['https_proxy'] = projects['https_proxy']
+
+ if 'directory' in projects.keys():
+ parent_dir = projects['directory']
+
+ pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
+
+ # Upgrade setuptools and pip from default virtualenv versions. The default
+ # versions in trusty break master OpenStack branch deployments.
+ for p in ['pip', 'setuptools']:
+ pip_install(p, upgrade=True, proxy=http_proxy,
+ venv=os.path.join(parent_dir, 'venv'))
+
+ for p in projects['repositories']:
+ repo = p['repository']
+ branch = p['branch']
+ if p['name'] == 'requirements':
+ repo_dir = _git_clone_and_install_single(repo, branch, depth,
+ parent_dir, http_proxy,
+ update_requirements=False)
+ requirements_dir = repo_dir
+ else:
+ repo_dir = _git_clone_and_install_single(repo, branch, depth,
+ parent_dir, http_proxy,
+ update_requirements=True)
+
+ os.environ = old_environ
+
+
+def _git_validate_projects_yaml(projects, core_project):
+ """
+ Validate the projects yaml.
+ """
+ _git_ensure_key_exists('repositories', projects)
+
+ for project in projects['repositories']:
+ _git_ensure_key_exists('name', project.keys())
+ _git_ensure_key_exists('repository', project.keys())
+ _git_ensure_key_exists('branch', project.keys())
+
+ if projects['repositories'][0]['name'] != 'requirements':
+ error_out('{} git repo must be specified first'.format('requirements'))
+
+ if projects['repositories'][-1]['name'] != core_project:
+ error_out('{} git repo must be specified last'.format(core_project))
+
+
+def _git_ensure_key_exists(key, keys):
+ """
+ Ensure that key exists in keys.
+ """
+ if key not in keys:
+ error_out('openstack-origin-git key \'{}\' is missing'.format(key))
+
+
+def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
+ update_requirements):
+ """
+ Clone and install a single git repository.
+ """
+ dest_dir = os.path.join(parent_dir, os.path.basename(repo))
+
+ if not os.path.exists(parent_dir):
+ juju_log('Directory already exists at {}. '
+ 'No need to create directory.'.format(parent_dir))
+ os.mkdir(parent_dir)
+
+ if not os.path.exists(dest_dir):
+ juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
+ repo_dir = install_remote(repo, dest=parent_dir, branch=branch,
+ depth=depth)
+ else:
+ repo_dir = dest_dir
+
+ venv = os.path.join(parent_dir, 'venv')
+
+ if update_requirements:
+ if not requirements_dir:
+ error_out('requirements repo must be cloned before '
+ 'updating from global requirements.')
+ _git_update_requirements(venv, repo_dir, requirements_dir)
+
+ juju_log('Installing git repo from dir: {}'.format(repo_dir))
+ if http_proxy:
+ pip_install(repo_dir, proxy=http_proxy, venv=venv)
+ else:
+ pip_install(repo_dir, venv=venv)
+
+ return repo_dir
+
+
+def _git_update_requirements(venv, package_dir, reqs_dir):
+ """
+ Update from global requirements.
+
+ Update an OpenStack git directory's requirements.txt and
+ test-requirements.txt from global-requirements.txt.
+ """
+ orig_dir = os.getcwd()
+ os.chdir(reqs_dir)
+ python = os.path.join(venv, 'bin/python')
+ cmd = [python, 'update.py', package_dir]
+ try:
+ subprocess.check_call(cmd)
+ except subprocess.CalledProcessError:
+ package = os.path.basename(package_dir)
+ error_out("Error updating {} from "
+ "global-requirements.txt".format(package))
+ os.chdir(orig_dir)
+
+
+def git_pip_venv_dir(projects_yaml):
+ """
+ Return the pip virtualenv path.
+ """
+ parent_dir = '/mnt/openstack-git'
+
+ projects = _git_yaml_load(projects_yaml)
+
+ if 'directory' in projects.keys():
+ parent_dir = projects['directory']
+
+ return os.path.join(parent_dir, 'venv')
+
+
+def git_src_dir(projects_yaml, project):
+ """
+ Return the directory where the specified project's source is located.
+ """
+ parent_dir = '/mnt/openstack-git'
+
+ projects = _git_yaml_load(projects_yaml)
+
+ if 'directory' in projects.keys():
+ parent_dir = projects['directory']
+
+ for p in projects['repositories']:
+ if p['name'] == project:
+ return os.path.join(parent_dir, os.path.basename(p['repository']))
+
+ return None
+
+
+def git_yaml_value(projects_yaml, key):
+ """
+ Return the value in projects_yaml for the specified key.
+ """
+ projects = _git_yaml_load(projects_yaml)
+
+ if key in projects.keys():
+ return projects[key]
+
+ return None
+
+
+def os_workload_status(configs, required_interfaces, charm_func=None):
+ """
+ Decorator to set workload status based on complete contexts
+ """
+ def wrap(f):
+ @wraps(f)
+ def wrapped_f(*args, **kwargs):
+ # Run the original function first
+ f(*args, **kwargs)
+ # Set workload status now that contexts have been
+ # acted on
+ set_os_workload_status(configs, required_interfaces, charm_func)
+ return wrapped_f
+ return wrap
+
+
+def set_os_workload_status(configs, required_interfaces, charm_func=None):
+ """
+ Set workload status based on complete contexts.
+ status-set missing or incomplete contexts
+ and juju-log details of missing required data.
+ charm_func is a charm specific function to run checking
+ for charm specific requirements such as a VIP setting.
+ """
+ incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
+ state = 'active'
+ missing_relations = []
+ incomplete_relations = []
+ message = None
+ charm_state = None
+ charm_message = None
+
+ for generic_interface in incomplete_rel_data.keys():
+ related_interface = None
+ missing_data = {}
+ # Related or not?
+ for interface in incomplete_rel_data[generic_interface]:
+ if incomplete_rel_data[generic_interface][interface].get('related'):
+ related_interface = interface
+ missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data')
+ # No relation ID for the generic_interface
+ if not related_interface:
+ juju_log("{} relation is missing and must be related for "
+ "functionality. ".format(generic_interface), 'WARN')
+ state = 'blocked'
+ if generic_interface not in missing_relations:
+ missing_relations.append(generic_interface)
+ else:
+ # Relation ID exists but no related unit
+ if not missing_data:
+ # Edge case relation ID exists but departing
+ if ('departed' in hook_name() or 'broken' in hook_name()) \
+ and related_interface in hook_name():
+ state = 'blocked'
+ if generic_interface not in missing_relations:
+ missing_relations.append(generic_interface)
+ juju_log("{} relation's interface, {}, "
+ "relationship is departed or broken "
+ "and is required for functionality."
+ "".format(generic_interface, related_interface), "WARN")
+ # Normal case relation ID exists but no related unit
+ # (joining)
+ else:
+ juju_log("{} relations's interface, {}, is related but has "
+ "no units in the relation."
+ "".format(generic_interface, related_interface), "INFO")
+ # Related unit exists and data missing on the relation
+ else:
+ juju_log("{} relation's interface, {}, is related awaiting "
+ "the following data from the relationship: {}. "
+ "".format(generic_interface, related_interface,
+ ", ".join(missing_data)), "INFO")
+ if state != 'blocked':
+ state = 'waiting'
+ if generic_interface not in incomplete_relations \
+ and generic_interface not in missing_relations:
+ incomplete_relations.append(generic_interface)
+
+ if missing_relations:
+ message = "Missing relations: {}".format(", ".join(missing_relations))
+ if incomplete_relations:
+ message += "; incomplete relations: {}" \
+ "".format(", ".join(incomplete_relations))
+ state = 'blocked'
+ elif incomplete_relations:
+ message = "Incomplete relations: {}" \
+ "".format(", ".join(incomplete_relations))
+ state = 'waiting'
+
+ # Run charm specific checks
+ if charm_func:
+ charm_state, charm_message = charm_func(configs)
+ if charm_state != 'active' and charm_state != 'unknown':
+ state = workload_state_compare(state, charm_state)
+ if message:
+ message = "{} {}".format(message, charm_message)
+ else:
+ message = charm_message
+
+ # Set to active if all requirements have been met
+ if state == 'active':
+ message = "Unit is ready"
+ juju_log(message, "INFO")
+
+ status_set(state, message)
+
+
+def workload_state_compare(current_workload_state, workload_state):
+ """ Return highest priority of two states"""
+ hierarchy = {'unknown': -1,
+ 'active': 0,
+ 'maintenance': 1,
+ 'waiting': 2,
+ 'blocked': 3,
+ }
+
+ if hierarchy.get(workload_state) is None:
+ workload_state = 'unknown'
+ if hierarchy.get(current_workload_state) is None:
+ current_workload_state = 'unknown'
+
+ # Set workload_state based on hierarchy of statuses
+ if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
+ return current_workload_state
+ else:
+ return workload_state
+
+
+def incomplete_relation_data(configs, required_interfaces):
+ """
+ Check complete contexts against required_interfaces
+ Return dictionary of incomplete relation data.
+
+ configs is an OSConfigRenderer object with configs registered
+
+ required_interfaces is a dictionary of required general interfaces
+ with dictionary values of possible specific interfaces.
+ Example:
+ required_interfaces = {'database': ['shared-db', 'pgsql-db']}
+
+ The interface is said to be satisfied if anyone of the interfaces in the
+ list has a complete context.
+
+ Return dictionary of incomplete or missing required contexts with relation
+ status of interfaces and any missing data points. Example:
+ {'message':
+ {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
+ 'zeromq-configuration': {'related': False}},
+ 'identity':
+ {'identity-service': {'related': False}},
+ 'database':
+ {'pgsql-db': {'related': False},
+ 'shared-db': {'related': True}}}
+ """
+ complete_ctxts = configs.complete_contexts()
+ incomplete_relations = []
+ for svc_type in required_interfaces.keys():
+ # Avoid duplicates
+ found_ctxt = False
+ for interface in required_interfaces[svc_type]:
+ if interface in complete_ctxts:
+ found_ctxt = True
+ if not found_ctxt:
+ incomplete_relations.append(svc_type)
+ incomplete_context_data = {}
+ for i in incomplete_relations:
+ incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i])
+ return incomplete_context_data
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/debug.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/debug.py
new file mode 100644
index 0000000..871cd6f
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/debug.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import print_function
+
+import atexit
+import sys
+
+from charmhelpers.contrib.python.rpdb import Rpdb
+from charmhelpers.core.hookenv import (
+ open_port,
+ close_port,
+ ERROR,
+ log
+)
+
+__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
+
+DEFAULT_ADDR = "0.0.0.0"
+DEFAULT_PORT = 4444
+
+
+def _error(message):
+ log(message, level=ERROR)
+
+
+def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT):
+ """
+ Set a trace point using the remote debugger
+ """
+ atexit.register(close_port, port)
+ try:
+ log("Starting a remote python debugger session on %s:%s" % (addr,
+ port))
+ open_port(port)
+ debugger = Rpdb(addr=addr, port=port)
+ debugger.set_trace(sys._getframe().f_back)
+ except:
+ _error("Cannot start a remote debug session on %s:%s" % (addr,
+ port))
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/packages.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/packages.py
new file mode 100644
index 0000000..10b32e3
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/packages.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import subprocess
+
+from charmhelpers.fetch import apt_install, apt_update
+from charmhelpers.core.hookenv import charm_dir, log
+
+try:
+ from pip import main as pip_execute
+except ImportError:
+ apt_update()
+ apt_install('python-pip')
+ from pip import main as pip_execute
+
+__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
+
+
+def parse_options(given, available):
+ """Given a set of options, check if available"""
+ for key, value in sorted(given.items()):
+ if not value:
+ continue
+ if key in available:
+ yield "--{0}={1}".format(key, value)
+
+
+def pip_install_requirements(requirements, **options):
+ """Install a requirements file """
+ command = ["install"]
+
+ available_options = ('proxy', 'src', 'log', )
+ for option in parse_options(options, available_options):
+ command.append(option)
+
+ command.append("-r {0}".format(requirements))
+ log("Installing from file: {} with options: {}".format(requirements,
+ command))
+ pip_execute(command)
+
+
+def pip_install(package, fatal=False, upgrade=False, venv=None, **options):
+ """Install a python package"""
+ if venv:
+ venv_python = os.path.join(venv, 'bin/pip')
+ command = [venv_python, "install"]
+ else:
+ command = ["install"]
+
+ available_options = ('proxy', 'src', 'log', 'index-url', )
+ for option in parse_options(options, available_options):
+ command.append(option)
+
+ if upgrade:
+ command.append('--upgrade')
+
+ if isinstance(package, list):
+ command.extend(package)
+ else:
+ command.append(package)
+
+ log("Installing {} package with options: {}".format(package,
+ command))
+ if venv:
+ subprocess.check_call(command)
+ else:
+ pip_execute(command)
+
+
+def pip_uninstall(package, **options):
+ """Uninstall a python package"""
+ command = ["uninstall", "-q", "-y"]
+
+ available_options = ('proxy', 'log', )
+ for option in parse_options(options, available_options):
+ command.append(option)
+
+ if isinstance(package, list):
+ command.extend(package)
+ else:
+ command.append(package)
+
+ log("Uninstalling {} package with options: {}".format(package,
+ command))
+ pip_execute(command)
+
+
+def pip_list():
+ """Returns the list of current python installed packages
+ """
+ return pip_execute(["list"])
+
+
+def pip_create_virtualenv(path=None):
+ """Create an isolated Python environment."""
+ apt_install('python-virtualenv')
+
+ if path:
+ venv_path = path
+ else:
+ venv_path = os.path.join(charm_dir(), 'venv')
+
+ if not os.path.exists(venv_path):
+ subprocess.check_call(['virtualenv', venv_path])
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/rpdb.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/rpdb.py
new file mode 100644
index 0000000..d503f88
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/rpdb.py
@@ -0,0 +1,58 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+"""Remote Python Debugger (pdb wrapper)."""
+
+import pdb
+import socket
+import sys
+
+__author__ = "Bertrand Janin <b@janin.com>"
+__version__ = "0.1.3"
+
+
+class Rpdb(pdb.Pdb):
+
+ def __init__(self, addr="127.0.0.1", port=4444):
+ """Initialize the socket and initialize pdb."""
+
+ # Backup stdin and stdout before replacing them by the socket handle
+ self.old_stdout = sys.stdout
+ self.old_stdin = sys.stdin
+
+ # Open a 'reusable' socket to let the webapp reload on the same port
+ self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
+ self.skt.bind((addr, port))
+ self.skt.listen(1)
+ (clientsocket, address) = self.skt.accept()
+ handle = clientsocket.makefile('rw')
+ pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle)
+ sys.stdout = sys.stdin = handle
+
+ def shutdown(self):
+ """Revert stdin and stdout, close the socket."""
+ sys.stdout = self.old_stdout
+ sys.stdin = self.old_stdin
+ self.skt.close()
+ self.set_continue()
+
+ def do_continue(self, arg):
+ """Stop all operation on ``continue``."""
+ self.shutdown()
+ return 1
+
+ do_EOF = do_quit = do_exit = do_c = do_cont = do_continue
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/version.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/version.py
new file mode 100644
index 0000000..c39fcbf
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/version.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import sys
+
+__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
+
+
+def current_version():
+ """Current system python version"""
+ return sys.version_info
+
+
+def current_version_string():
+ """Current system python version as string major.minor.micro"""
+ return "{0}.{1}.{2}".format(sys.version_info.major,
+ sys.version_info.minor,
+ sys.version_info.micro)
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/ceph.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/ceph.py
new file mode 100644
index 0000000..83f264d
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/ceph.py
@@ -0,0 +1,657 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+#
+# Copyright 2012 Canonical Ltd.
+#
+# This file is sourced from lp:openstack-charm-helpers
+#
+# Authors:
+# James Page <james.page@ubuntu.com>
+# Adam Gandelman <adamg@ubuntu.com>
+#
+
+import os
+import shutil
+import json
+import time
+import uuid
+
+from subprocess import (
+ check_call,
+ check_output,
+ CalledProcessError,
+)
+from charmhelpers.core.hookenv import (
+ local_unit,
+ relation_get,
+ relation_ids,
+ relation_set,
+ related_units,
+ log,
+ DEBUG,
+ INFO,
+ WARNING,
+ ERROR,
+)
+from charmhelpers.core.host import (
+ mount,
+ mounts,
+ service_start,
+ service_stop,
+ service_running,
+ umount,
+)
+from charmhelpers.fetch import (
+ apt_install,
+)
+
+from charmhelpers.core.kernel import modprobe
+
+KEYRING = '/etc/ceph/ceph.client.{}.keyring'
+KEYFILE = '/etc/ceph/ceph.client.{}.key'
+
+CEPH_CONF = """[global]
+auth supported = {auth}
+keyring = {keyring}
+mon host = {mon_hosts}
+log to syslog = {use_syslog}
+err to syslog = {use_syslog}
+clog to syslog = {use_syslog}
+"""
+
+
+def install():
+ """Basic Ceph client installation."""
+ ceph_dir = "/etc/ceph"
+ if not os.path.exists(ceph_dir):
+ os.mkdir(ceph_dir)
+
+ apt_install('ceph-common', fatal=True)
+
+
+def rbd_exists(service, pool, rbd_img):
+ """Check to see if a RADOS block device exists."""
+ try:
+ out = check_output(['rbd', 'list', '--id',
+ service, '--pool', pool]).decode('UTF-8')
+ except CalledProcessError:
+ return False
+
+ return rbd_img in out
+
+
+def create_rbd_image(service, pool, image, sizemb):
+ """Create a new RADOS block device."""
+ cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
+ '--pool', pool]
+ check_call(cmd)
+
+
+def pool_exists(service, name):
+ """Check to see if a RADOS pool already exists."""
+ try:
+ out = check_output(['rados', '--id', service,
+ 'lspools']).decode('UTF-8')
+ except CalledProcessError:
+ return False
+
+ return name in out
+
+
+def get_osds(service):
+ """Return a list of all Ceph Object Storage Daemons currently in the
+ cluster.
+ """
+ version = ceph_version()
+ if version and version >= '0.56':
+ return json.loads(check_output(['ceph', '--id', service,
+ 'osd', 'ls',
+ '--format=json']).decode('UTF-8'))
+
+ return None
+
+
+def create_pool(service, name, replicas=3):
+ """Create a new RADOS pool."""
+ if pool_exists(service, name):
+ log("Ceph pool {} already exists, skipping creation".format(name),
+ level=WARNING)
+ return
+
+ # Calculate the number of placement groups based
+ # on upstream recommended best practices.
+ osds = get_osds(service)
+ if osds:
+ pgnum = (len(osds) * 100 // replicas)
+ else:
+ # NOTE(james-page): Default to 200 for older ceph versions
+ # which don't support OSD query from cli
+ pgnum = 200
+
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
+ check_call(cmd)
+
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
+ str(replicas)]
+ check_call(cmd)
+
+
+def delete_pool(service, name):
+ """Delete a RADOS pool from ceph."""
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
+ '--yes-i-really-really-mean-it']
+ check_call(cmd)
+
+
+def _keyfile_path(service):
+ return KEYFILE.format(service)
+
+
+def _keyring_path(service):
+ return KEYRING.format(service)
+
+
+def create_keyring(service, key):
+ """Create a new Ceph keyring containing key."""
+ keyring = _keyring_path(service)
+ if os.path.exists(keyring):
+ log('Ceph keyring exists at %s.' % keyring, level=WARNING)
+ return
+
+ cmd = ['ceph-authtool', keyring, '--create-keyring',
+ '--name=client.{}'.format(service), '--add-key={}'.format(key)]
+ check_call(cmd)
+ log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
+
+
+def delete_keyring(service):
+ """Delete an existing Ceph keyring."""
+ keyring = _keyring_path(service)
+ if not os.path.exists(keyring):
+ log('Keyring does not exist at %s' % keyring, level=WARNING)
+ return
+
+ os.remove(keyring)
+ log('Deleted ring at %s.' % keyring, level=INFO)
+
+
+def create_key_file(service, key):
+ """Create a file containing key."""
+ keyfile = _keyfile_path(service)
+ if os.path.exists(keyfile):
+ log('Keyfile exists at %s.' % keyfile, level=WARNING)
+ return
+
+ with open(keyfile, 'w') as fd:
+ fd.write(key)
+
+ log('Created new keyfile at %s.' % keyfile, level=INFO)
+
+
+def get_ceph_nodes():
+ """Query named relation 'ceph' to determine current nodes."""
+ hosts = []
+ for r_id in relation_ids('ceph'):
+ for unit in related_units(r_id):
+ hosts.append(relation_get('private-address', unit=unit, rid=r_id))
+
+ return hosts
+
+
+def configure(service, key, auth, use_syslog):
+ """Perform basic configuration of Ceph."""
+ create_keyring(service, key)
+ create_key_file(service, key)
+ hosts = get_ceph_nodes()
+ with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
+ ceph_conf.write(CEPH_CONF.format(auth=auth,
+ keyring=_keyring_path(service),
+ mon_hosts=",".join(map(str, hosts)),
+ use_syslog=use_syslog))
+ modprobe('rbd')
+
+
+def image_mapped(name):
+ """Determine whether a RADOS block device is mapped locally."""
+ try:
+ out = check_output(['rbd', 'showmapped']).decode('UTF-8')
+ except CalledProcessError:
+ return False
+
+ return name in out
+
+
+def map_block_storage(service, pool, image):
+ """Map a RADOS block device for local use."""
+ cmd = [
+ 'rbd',
+ 'map',
+ '{}/{}'.format(pool, image),
+ '--user',
+ service,
+ '--secret',
+ _keyfile_path(service),
+ ]
+ check_call(cmd)
+
+
+def filesystem_mounted(fs):
+ """Determine whether a filesytems is already mounted."""
+ return fs in [f for f, m in mounts()]
+
+
+def make_filesystem(blk_device, fstype='ext4', timeout=10):
+ """Make a new filesystem on the specified block device."""
+ count = 0
+ e_noent = os.errno.ENOENT
+ while not os.path.exists(blk_device):
+ if count >= timeout:
+ log('Gave up waiting on block device %s' % blk_device,
+ level=ERROR)
+ raise IOError(e_noent, os.strerror(e_noent), blk_device)
+
+ log('Waiting for block device %s to appear' % blk_device,
+ level=DEBUG)
+ count += 1
+ time.sleep(1)
+ else:
+ log('Formatting block device %s as filesystem %s.' %
+ (blk_device, fstype), level=INFO)
+ check_call(['mkfs', '-t', fstype, blk_device])
+
+
+def place_data_on_block_device(blk_device, data_src_dst):
+ """Migrate data in data_src_dst to blk_device and then remount."""
+ # mount block device into /mnt
+ mount(blk_device, '/mnt')
+ # copy data to /mnt
+ copy_files(data_src_dst, '/mnt')
+ # umount block device
+ umount('/mnt')
+ # Grab user/group ID's from original source
+ _dir = os.stat(data_src_dst)
+ uid = _dir.st_uid
+ gid = _dir.st_gid
+ # re-mount where the data should originally be
+ # TODO: persist is currently a NO-OP in core.host
+ mount(blk_device, data_src_dst, persist=True)
+ # ensure original ownership of new mount.
+ os.chown(data_src_dst, uid, gid)
+
+
+def copy_files(src, dst, symlinks=False, ignore=None):
+ """Copy files from src to dst."""
+ for item in os.listdir(src):
+ s = os.path.join(src, item)
+ d = os.path.join(dst, item)
+ if os.path.isdir(s):
+ shutil.copytree(s, d, symlinks, ignore)
+ else:
+ shutil.copy2(s, d)
+
+
+def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
+ blk_device, fstype, system_services=[],
+ replicas=3):
+ """NOTE: This function must only be called from a single service unit for
+ the same rbd_img otherwise data loss will occur.
+
+ Ensures given pool and RBD image exists, is mapped to a block device,
+ and the device is formatted and mounted at the given mount_point.
+
+ If formatting a device for the first time, data existing at mount_point
+ will be migrated to the RBD device before being re-mounted.
+
+ All services listed in system_services will be stopped prior to data
+ migration and restarted when complete.
+ """
+ # Ensure pool, RBD image, RBD mappings are in place.
+ if not pool_exists(service, pool):
+ log('Creating new pool {}.'.format(pool), level=INFO)
+ create_pool(service, pool, replicas=replicas)
+
+ if not rbd_exists(service, pool, rbd_img):
+ log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
+ create_rbd_image(service, pool, rbd_img, sizemb)
+
+ if not image_mapped(rbd_img):
+ log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
+ level=INFO)
+ map_block_storage(service, pool, rbd_img)
+
+ # make file system
+ # TODO: What happens if for whatever reason this is run again and
+ # the data is already in the rbd device and/or is mounted??
+ # When it is mounted already, it will fail to make the fs
+ # XXX: This is really sketchy! Need to at least add an fstab entry
+ # otherwise this hook will blow away existing data if its executed
+ # after a reboot.
+ if not filesystem_mounted(mount_point):
+ make_filesystem(blk_device, fstype)
+
+ for svc in system_services:
+ if service_running(svc):
+ log('Stopping services {} prior to migrating data.'
+ .format(svc), level=DEBUG)
+ service_stop(svc)
+
+ place_data_on_block_device(blk_device, mount_point)
+
+ for svc in system_services:
+ log('Starting service {} after migrating data.'
+ .format(svc), level=DEBUG)
+ service_start(svc)
+
+
+def ensure_ceph_keyring(service, user=None, group=None):
+ """Ensures a ceph keyring is created for a named service and optionally
+ ensures user and group ownership.
+
+ Returns False if no ceph key is available in relation state.
+ """
+ key = None
+ for rid in relation_ids('ceph'):
+ for unit in related_units(rid):
+ key = relation_get('key', rid=rid, unit=unit)
+ if key:
+ break
+
+ if not key:
+ return False
+
+ create_keyring(service=service, key=key)
+ keyring = _keyring_path(service)
+ if user and group:
+ check_call(['chown', '%s.%s' % (user, group), keyring])
+
+ return True
+
+
+def ceph_version():
+ """Retrieve the local version of ceph."""
+ if os.path.exists('/usr/bin/ceph'):
+ cmd = ['ceph', '-v']
+ output = check_output(cmd).decode('US-ASCII')
+ output = output.split()
+ if len(output) > 3:
+ return output[2]
+ else:
+ return None
+ else:
+ return None
+
+
+class CephBrokerRq(object):
+ """Ceph broker request.
+
+ Multiple operations can be added to a request and sent to the Ceph broker
+ to be executed.
+
+ Request is json-encoded for sending over the wire.
+
+ The API is versioned and defaults to version 1.
+ """
+ def __init__(self, api_version=1, request_id=None):
+ self.api_version = api_version
+ if request_id:
+ self.request_id = request_id
+ else:
+ self.request_id = str(uuid.uuid1())
+ self.ops = []
+
+ def add_op_create_pool(self, name, replica_count=3):
+ self.ops.append({'op': 'create-pool', 'name': name,
+ 'replicas': replica_count})
+
+ def set_ops(self, ops):
+ """Set request ops to provided value.
+
+ Useful for injecting ops that come from a previous request
+ to allow comparisons to ensure validity.
+ """
+ self.ops = ops
+
+ @property
+ def request(self):
+ return json.dumps({'api-version': self.api_version, 'ops': self.ops,
+ 'request-id': self.request_id})
+
+ def _ops_equal(self, other):
+ if len(self.ops) == len(other.ops):
+ for req_no in range(0, len(self.ops)):
+ for key in ['replicas', 'name', 'op']:
+ if self.ops[req_no][key] != other.ops[req_no][key]:
+ return False
+ else:
+ return False
+ return True
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return False
+ if self.api_version == other.api_version and \
+ self._ops_equal(other):
+ return True
+ else:
+ return False
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+
+class CephBrokerRsp(object):
+ """Ceph broker response.
+
+ Response is json-decoded and contents provided as methods/properties.
+
+ The API is versioned and defaults to version 1.
+ """
+
+ def __init__(self, encoded_rsp):
+ self.api_version = None
+ self.rsp = json.loads(encoded_rsp)
+
+ @property
+ def request_id(self):
+ return self.rsp.get('request-id')
+
+ @property
+ def exit_code(self):
+ return self.rsp.get('exit-code')
+
+ @property
+ def exit_msg(self):
+ return self.rsp.get('stderr')
+
+
+# Ceph Broker Conversation:
+# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
+# and send that request to ceph via the ceph relation. The CephBrokerRq has a
+# unique id so that the client can identity which CephBrokerRsp is associated
+# with the request. Ceph will also respond to each client unit individually
+# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
+# via key broker-rsp-glance-0
+#
+# To use this the charm can just do something like:
+#
+# from charmhelpers.contrib.storage.linux.ceph import (
+# send_request_if_needed,
+# is_request_complete,
+# CephBrokerRq,
+# )
+#
+# @hooks.hook('ceph-relation-changed')
+# def ceph_changed():
+# rq = CephBrokerRq()
+# rq.add_op_create_pool(name='poolname', replica_count=3)
+#
+# if is_request_complete(rq):
+# <Request complete actions>
+# else:
+# send_request_if_needed(get_ceph_request())
+#
+# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
+# of glance having sent a request to ceph which ceph has successfully processed
+# 'ceph:8': {
+# 'ceph/0': {
+# 'auth': 'cephx',
+# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
+# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
+# 'ceph-public-address': '10.5.44.103',
+# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
+# 'private-address': '10.5.44.103',
+# },
+# 'glance/0': {
+# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
+# '"ops": [{"replicas": 3, "name": "glance", '
+# '"op": "create-pool"}]}'),
+# 'private-address': '10.5.44.109',
+# },
+# }
+
+def get_previous_request(rid):
+ """Return the last ceph broker request sent on a given relation
+
+ @param rid: Relation id to query for request
+ """
+ request = None
+ broker_req = relation_get(attribute='broker_req', rid=rid,
+ unit=local_unit())
+ if broker_req:
+ request_data = json.loads(broker_req)
+ request = CephBrokerRq(api_version=request_data['api-version'],
+ request_id=request_data['request-id'])
+ request.set_ops(request_data['ops'])
+
+ return request
+
+
+def get_request_states(request):
+ """Return a dict of requests per relation id with their corresponding
+ completion state.
+
+ This allows a charm, which has a request for ceph, to see whether there is
+ an equivalent request already being processed and if so what state that
+ request is in.
+
+ @param request: A CephBrokerRq object
+ """
+ complete = []
+ requests = {}
+ for rid in relation_ids('ceph'):
+ complete = False
+ previous_request = get_previous_request(rid)
+ if request == previous_request:
+ sent = True
+ complete = is_request_complete_for_rid(previous_request, rid)
+ else:
+ sent = False
+ complete = False
+
+ requests[rid] = {
+ 'sent': sent,
+ 'complete': complete,
+ }
+
+ return requests
+
+
+def is_request_sent(request):
+ """Check to see if a functionally equivalent request has already been sent
+
+ Returns True if a similair request has been sent
+
+ @param request: A CephBrokerRq object
+ """
+ states = get_request_states(request)
+ for rid in states.keys():
+ if not states[rid]['sent']:
+ return False
+
+ return True
+
+
+def is_request_complete(request):
+ """Check to see if a functionally equivalent request has already been
+ completed
+
+ Returns True if a similair request has been completed
+
+ @param request: A CephBrokerRq object
+ """
+ states = get_request_states(request)
+ for rid in states.keys():
+ if not states[rid]['complete']:
+ return False
+
+ return True
+
+
+def is_request_complete_for_rid(request, rid):
+ """Check if a given request has been completed on the given relation
+
+ @param request: A CephBrokerRq object
+ @param rid: Relation ID
+ """
+ broker_key = get_broker_rsp_key()
+ for unit in related_units(rid):
+ rdata = relation_get(rid=rid, unit=unit)
+ if rdata.get(broker_key):
+ rsp = CephBrokerRsp(rdata.get(broker_key))
+ if rsp.request_id == request.request_id:
+ if not rsp.exit_code:
+ return True
+ else:
+ # The remote unit sent no reply targeted at this unit so either the
+ # remote ceph cluster does not support unit targeted replies or it
+ # has not processed our request yet.
+ if rdata.get('broker_rsp'):
+ request_data = json.loads(rdata['broker_rsp'])
+ if request_data.get('request-id'):
+ log('Ignoring legacy broker_rsp without unit key as remote '
+ 'service supports unit specific replies', level=DEBUG)
+ else:
+ log('Using legacy broker_rsp as remote service does not '
+ 'supports unit specific replies', level=DEBUG)
+ rsp = CephBrokerRsp(rdata['broker_rsp'])
+ if not rsp.exit_code:
+ return True
+
+ return False
+
+
+def get_broker_rsp_key():
+ """Return broker response key for this unit
+
+ This is the key that ceph is going to use to pass request status
+ information back to this unit
+ """
+ return 'broker-rsp-' + local_unit().replace('/', '-')
+
+
+def send_request_if_needed(request):
+ """Send broker request if an equivalent request has not already been sent
+
+ @param request: A CephBrokerRq object
+ """
+ if is_request_sent(request):
+ log('Request already sent but not complete, not sending new request',
+ level=DEBUG)
+ else:
+ for rid in relation_ids('ceph'):
+ log('Sending request {}'.format(request.request_id), level=DEBUG)
+ relation_set(relation_id=rid, broker_req=request.request)
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/loopback.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/loopback.py
new file mode 100644
index 0000000..c296f09
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/loopback.py
@@ -0,0 +1,78 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import re
+from subprocess import (
+ check_call,
+ check_output,
+)
+
+import six
+
+
+##################################################
+# loopback device helpers.
+##################################################
+def loopback_devices():
+ '''
+ Parse through 'losetup -a' output to determine currently mapped
+ loopback devices. Output is expected to look like:
+
+ /dev/loop0: [0807]:961814 (/tmp/my.img)
+
+ :returns: dict: a dict mapping {loopback_dev: backing_file}
+ '''
+ loopbacks = {}
+ cmd = ['losetup', '-a']
+ devs = [d.strip().split(' ') for d in
+ check_output(cmd).splitlines() if d != '']
+ for dev, _, f in devs:
+ loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
+ return loopbacks
+
+
+def create_loopback(file_path):
+ '''
+ Create a loopback device for a given backing file.
+
+ :returns: str: Full path to new loopback device (eg, /dev/loop0)
+ '''
+ file_path = os.path.abspath(file_path)
+ check_call(['losetup', '--find', file_path])
+ for d, f in six.iteritems(loopback_devices()):
+ if f == file_path:
+ return d
+
+
+def ensure_loopback_device(path, size):
+ '''
+ Ensure a loopback device exists for a given backing file path and size.
+ If it a loopback device is not mapped to file, a new one will be created.
+
+ TODO: Confirm size of found loopback device.
+
+ :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
+ '''
+ for d, f in six.iteritems(loopback_devices()):
+ if f == path:
+ return d
+
+ if not os.path.exists(path):
+ cmd = ['truncate', '--size', size, path]
+ check_call(cmd)
+
+ return create_loopback(path)
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/lvm.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/lvm.py
new file mode 100644
index 0000000..34b5f71
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/lvm.py
@@ -0,0 +1,105 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from subprocess import (
+ CalledProcessError,
+ check_call,
+ check_output,
+ Popen,
+ PIPE,
+)
+
+
+##################################################
+# LVM helpers.
+##################################################
+def deactivate_lvm_volume_group(block_device):
+ '''
+ Deactivate any volume gruop associated with an LVM physical volume.
+
+ :param block_device: str: Full path to LVM physical volume
+ '''
+ vg = list_lvm_volume_group(block_device)
+ if vg:
+ cmd = ['vgchange', '-an', vg]
+ check_call(cmd)
+
+
+def is_lvm_physical_volume(block_device):
+ '''
+ Determine whether a block device is initialized as an LVM PV.
+
+ :param block_device: str: Full path of block device to inspect.
+
+ :returns: boolean: True if block device is a PV, False if not.
+ '''
+ try:
+ check_output(['pvdisplay', block_device])
+ return True
+ except CalledProcessError:
+ return False
+
+
+def remove_lvm_physical_volume(block_device):
+ '''
+ Remove LVM PV signatures from a given block device.
+
+ :param block_device: str: Full path of block device to scrub.
+ '''
+ p = Popen(['pvremove', '-ff', block_device],
+ stdin=PIPE)
+ p.communicate(input='y\n')
+
+
+def list_lvm_volume_group(block_device):
+ '''
+ List LVM volume group associated with a given block device.
+
+ Assumes block device is a valid LVM PV.
+
+ :param block_device: str: Full path of block device to inspect.
+
+ :returns: str: Name of volume group associated with block device or None
+ '''
+ vg = None
+ pvd = check_output(['pvdisplay', block_device]).splitlines()
+ for l in pvd:
+ l = l.decode('UTF-8')
+ if l.strip().startswith('VG Name'):
+ vg = ' '.join(l.strip().split()[2:])
+ return vg
+
+
+def create_lvm_physical_volume(block_device):
+ '''
+ Initialize a block device as an LVM physical volume.
+
+ :param block_device: str: Full path of block device to initialize.
+
+ '''
+ check_call(['pvcreate', block_device])
+
+
+def create_lvm_volume_group(volume_group, block_device):
+ '''
+ Create an LVM volume group backed by a given block device.
+
+ Assumes block device has already been initialized as an LVM PV.
+
+ :param volume_group: str: Name of volume group to create.
+ :block_device: str: Full path of PV-initialized block device.
+ '''
+ check_call(['vgcreate', volume_group, block_device])
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/utils.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/utils.py
new file mode 100644
index 0000000..1e57941
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/utils.py
@@ -0,0 +1,71 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import re
+from stat import S_ISBLK
+
+from subprocess import (
+ check_call,
+ check_output,
+ call
+)
+
+
+def is_block_device(path):
+ '''
+ Confirm device at path is a valid block device node.
+
+ :returns: boolean: True if path is a block device, False if not.
+ '''
+ if not os.path.exists(path):
+ return False
+ return S_ISBLK(os.stat(path).st_mode)
+
+
+def zap_disk(block_device):
+ '''
+ Clear a block device of partition table. Relies on sgdisk, which is
+ installed as pat of the 'gdisk' package in Ubuntu.
+
+ :param block_device: str: Full path of block device to clean.
+ '''
+ # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
+ # sometimes sgdisk exits non-zero; this is OK, dd will clean up
+ call(['sgdisk', '--zap-all', '--', block_device])
+ call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
+ dev_end = check_output(['blockdev', '--getsz',
+ block_device]).decode('UTF-8')
+ gpt_end = int(dev_end.split()[0]) - 100
+ check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
+ 'bs=1M', 'count=1'])
+ check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
+ 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
+
+
+def is_device_mounted(device):
+ '''Given a device path, return True if that device is mounted, and False
+ if it isn't.
+
+ :param device: str: Full path of the device to check.
+ :returns: boolean: True if the path represents a mounted device, False if
+ it doesn't.
+ '''
+ is_partition = bool(re.search(r".*[0-9]+\b", device))
+ out = check_output(['mount']).decode('UTF-8')
+ if is_partition:
+ return bool(re.search(device + r"\b", out))
+ return bool(re.search(device + r"[0-9]*\b", out))
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/__init__.py
new file mode 100644
index 0000000..d1400a0
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/decorators.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/decorators.py
new file mode 100644
index 0000000..bb05620
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/decorators.py
@@ -0,0 +1,57 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+#
+# Copyright 2014 Canonical Ltd.
+#
+# Authors:
+# Edward Hope-Morley <opentastic@gmail.com>
+#
+
+import time
+
+from charmhelpers.core.hookenv import (
+ log,
+ INFO,
+)
+
+
+def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
+ """If the decorated function raises exception exc_type, allow num_retries
+ retry attempts before raise the exception.
+ """
+ def _retry_on_exception_inner_1(f):
+ def _retry_on_exception_inner_2(*args, **kwargs):
+ retries = num_retries
+ multiplier = 1
+ while True:
+ try:
+ return f(*args, **kwargs)
+ except exc_type:
+ if not retries:
+ raise
+
+ delay = base_delay * multiplier
+ multiplier += 1
+ log("Retrying '%s' %d more times (delay=%s)" %
+ (f.__name__, retries, delay), level=INFO)
+ retries -= 1
+ if delay:
+ time.sleep(delay)
+
+ return _retry_on_exception_inner_2
+
+ return _retry_on_exception_inner_1
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/files.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/files.py
new file mode 100644
index 0000000..0f12d32
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/files.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
+
+import os
+import subprocess
+
+
+def sed(filename, before, after, flags='g'):
+ """
+ Search and replaces the given pattern on filename.
+
+ :param filename: relative or absolute file path.
+ :param before: expression to be replaced (see 'man sed')
+ :param after: expression to replace with (see 'man sed')
+ :param flags: sed-compatible regex flags in example, to make
+ the search and replace case insensitive, specify ``flags="i"``.
+ The ``g`` flag is always specified regardless, so you do not
+ need to remember to include it when overriding this parameter.
+ :returns: If the sed command exit code was zero then return,
+ otherwise raise CalledProcessError.
+ """
+ expression = r's/{0}/{1}/{2}'.format(before,
+ after, flags)
+
+ return subprocess.check_call(["sed", "-i", "-r", "-e",
+ expression,
+ os.path.expanduser(filename)])
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/fstab.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/fstab.py
new file mode 100644
index 0000000..3056fba
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/fstab.py
@@ -0,0 +1,134 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import io
+import os
+
+__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
+
+
+class Fstab(io.FileIO):
+ """This class extends file in order to implement a file reader/writer
+ for file `/etc/fstab`
+ """
+
+ class Entry(object):
+ """Entry class represents a non-comment line on the `/etc/fstab` file
+ """
+ def __init__(self, device, mountpoint, filesystem,
+ options, d=0, p=0):
+ self.device = device
+ self.mountpoint = mountpoint
+ self.filesystem = filesystem
+
+ if not options:
+ options = "defaults"
+
+ self.options = options
+ self.d = int(d)
+ self.p = int(p)
+
+ def __eq__(self, o):
+ return str(self) == str(o)
+
+ def __str__(self):
+ return "{} {} {} {} {} {}".format(self.device,
+ self.mountpoint,
+ self.filesystem,
+ self.options,
+ self.d,
+ self.p)
+
+ DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
+
+ def __init__(self, path=None):
+ if path:
+ self._path = path
+ else:
+ self._path = self.DEFAULT_PATH
+ super(Fstab, self).__init__(self._path, 'rb+')
+
+ def _hydrate_entry(self, line):
+ # NOTE: use split with no arguments to split on any
+ # whitespace including tabs
+ return Fstab.Entry(*filter(
+ lambda x: x not in ('', None),
+ line.strip("\n").split()))
+
+ @property
+ def entries(self):
+ self.seek(0)
+ for line in self.readlines():
+ line = line.decode('us-ascii')
+ try:
+ if line.strip() and not line.strip().startswith("#"):
+ yield self._hydrate_entry(line)
+ except ValueError:
+ pass
+
+ def get_entry_by_attr(self, attr, value):
+ for entry in self.entries:
+ e_attr = getattr(entry, attr)
+ if e_attr == value:
+ return entry
+ return None
+
+ def add_entry(self, entry):
+ if self.get_entry_by_attr('device', entry.device):
+ return False
+
+ self.write((str(entry) + '\n').encode('us-ascii'))
+ self.truncate()
+ return entry
+
+ def remove_entry(self, entry):
+ self.seek(0)
+
+ lines = [l.decode('us-ascii') for l in self.readlines()]
+
+ found = False
+ for index, line in enumerate(lines):
+ if line.strip() and not line.strip().startswith("#"):
+ if self._hydrate_entry(line) == entry:
+ found = True
+ break
+
+ if not found:
+ return False
+
+ lines.remove(line)
+
+ self.seek(0)
+ self.write(''.join(lines).encode('us-ascii'))
+ self.truncate()
+ return True
+
+ @classmethod
+ def remove_by_mountpoint(cls, mountpoint, path=None):
+ fstab = cls(path=path)
+ entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
+ if entry:
+ return fstab.remove_entry(entry)
+ return False
+
+ @classmethod
+ def add(cls, device, mountpoint, filesystem, options=None, path=None):
+ return cls(path=path).add_entry(Fstab.Entry(device,
+ mountpoint, filesystem,
+ options=options))
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/hookenv.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/hookenv.py
new file mode 100644
index 0000000..ab53a78
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/hookenv.py
@@ -0,0 +1,898 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+"Interactions with the Juju environment"
+# Copyright 2013 Canonical Ltd.
+#
+# Authors:
+# Charm Helpers Developers <juju@lists.ubuntu.com>
+
+from __future__ import print_function
+import copy
+from distutils.version import LooseVersion
+from functools import wraps
+import glob
+import os
+import json
+import yaml
+import subprocess
+import sys
+import errno
+import tempfile
+from subprocess import CalledProcessError
+
+import six
+if not six.PY3:
+ from UserDict import UserDict
+else:
+ from collections import UserDict
+
+CRITICAL = "CRITICAL"
+ERROR = "ERROR"
+WARNING = "WARNING"
+INFO = "INFO"
+DEBUG = "DEBUG"
+MARKER = object()
+
+cache = {}
+
+
+def cached(func):
+ """Cache return values for multiple executions of func + args
+
+ For example::
+
+ @cached
+ def unit_get(attribute):
+ pass
+
+ unit_get('test')
+
+ will cache the result of unit_get + 'test' for future calls.
+ """
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ global cache
+ key = str((func, args, kwargs))
+ try:
+ return cache[key]
+ except KeyError:
+ pass # Drop out of the exception handler scope.
+ res = func(*args, **kwargs)
+ cache[key] = res
+ return res
+ wrapper._wrapped = func
+ return wrapper
+
+
+def flush(key):
+ """Flushes any entries from function cache where the
+ key is found in the function+args """
+ flush_list = []
+ for item in cache:
+ if key in item:
+ flush_list.append(item)
+ for item in flush_list:
+ del cache[item]
+
+
+def log(message, level=None):
+ """Write a message to the juju log"""
+ command = ['juju-log']
+ if level:
+ command += ['-l', level]
+ if not isinstance(message, six.string_types):
+ message = repr(message)
+ command += [message]
+ # Missing juju-log should not cause failures in unit tests
+ # Send log output to stderr
+ try:
+ subprocess.call(command)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ if level:
+ message = "{}: {}".format(level, message)
+ message = "juju-log: {}".format(message)
+ print(message, file=sys.stderr)
+ else:
+ raise
+
+
+class Serializable(UserDict):
+ """Wrapper, an object that can be serialized to yaml or json"""
+
+ def __init__(self, obj):
+ # wrap the object
+ UserDict.__init__(self)
+ self.data = obj
+
+ def __getattr__(self, attr):
+ # See if this object has attribute.
+ if attr in ("json", "yaml", "data"):
+ return self.__dict__[attr]
+ # Check for attribute in wrapped object.
+ got = getattr(self.data, attr, MARKER)
+ if got is not MARKER:
+ return got
+ # Proxy to the wrapped object via dict interface.
+ try:
+ return self.data[attr]
+ except KeyError:
+ raise AttributeError(attr)
+
+ def __getstate__(self):
+ # Pickle as a standard dictionary.
+ return self.data
+
+ def __setstate__(self, state):
+ # Unpickle into our wrapper.
+ self.data = state
+
+ def json(self):
+ """Serialize the object to json"""
+ return json.dumps(self.data)
+
+ def yaml(self):
+ """Serialize the object to yaml"""
+ return yaml.dump(self.data)
+
+
+def execution_environment():
+ """A convenient bundling of the current execution context"""
+ context = {}
+ context['conf'] = config()
+ if relation_id():
+ context['reltype'] = relation_type()
+ context['relid'] = relation_id()
+ context['rel'] = relation_get()
+ context['unit'] = local_unit()
+ context['rels'] = relations()
+ context['env'] = os.environ
+ return context
+
+
+def in_relation_hook():
+ """Determine whether we're running in a relation hook"""
+ return 'JUJU_RELATION' in os.environ
+
+
+def relation_type():
+ """The scope for the current relation hook"""
+ return os.environ.get('JUJU_RELATION', None)
+
+
+@cached
+def relation_id(relation_name=None, service_or_unit=None):
+ """The relation ID for the current or a specified relation"""
+ if not relation_name and not service_or_unit:
+ return os.environ.get('JUJU_RELATION_ID', None)
+ elif relation_name and service_or_unit:
+ service_name = service_or_unit.split('/')[0]
+ for relid in relation_ids(relation_name):
+ remote_service = remote_service_name(relid)
+ if remote_service == service_name:
+ return relid
+ else:
+ raise ValueError('Must specify neither or both of relation_name and service_or_unit')
+
+
+def local_unit():
+ """Local unit ID"""
+ return os.environ['JUJU_UNIT_NAME']
+
+
+def remote_unit():
+ """The remote unit for the current relation hook"""
+ return os.environ.get('JUJU_REMOTE_UNIT', None)
+
+
+def service_name():
+ """The name service group this unit belongs to"""
+ return local_unit().split('/')[0]
+
+
+@cached
+def remote_service_name(relid=None):
+ """The remote service name for a given relation-id (or the current relation)"""
+ if relid is None:
+ unit = remote_unit()
+ else:
+ units = related_units(relid)
+ unit = units[0] if units else None
+ return unit.split('/')[0] if unit else None
+
+
+def hook_name():
+ """The name of the currently executing hook"""
+ return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
+
+
+class Config(dict):
+ """A dictionary representation of the charm's config.yaml, with some
+ extra features:
+
+ - See which values in the dictionary have changed since the previous hook.
+ - For values that have changed, see what the previous value was.
+ - Store arbitrary data for use in a later hook.
+
+ NOTE: Do not instantiate this object directly - instead call
+ ``hookenv.config()``, which will return an instance of :class:`Config`.
+
+ Example usage::
+
+ >>> # inside a hook
+ >>> from charmhelpers.core import hookenv
+ >>> config = hookenv.config()
+ >>> config['foo']
+ 'bar'
+ >>> # store a new key/value for later use
+ >>> config['mykey'] = 'myval'
+
+
+ >>> # user runs `juju set mycharm foo=baz`
+ >>> # now we're inside subsequent config-changed hook
+ >>> config = hookenv.config()
+ >>> config['foo']
+ 'baz'
+ >>> # test to see if this val has changed since last hook
+ >>> config.changed('foo')
+ True
+ >>> # what was the previous value?
+ >>> config.previous('foo')
+ 'bar'
+ >>> # keys/values that we add are preserved across hooks
+ >>> config['mykey']
+ 'myval'
+
+ """
+ CONFIG_FILE_NAME = '.juju-persistent-config'
+
+ def __init__(self, *args, **kw):
+ super(Config, self).__init__(*args, **kw)
+ self.implicit_save = True
+ self._prev_dict = None
+ self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
+ if os.path.exists(self.path):
+ self.load_previous()
+ atexit(self._implicit_save)
+
+ def load_previous(self, path=None):
+ """Load previous copy of config from disk.
+
+ In normal usage you don't need to call this method directly - it
+ is called automatically at object initialization.
+
+ :param path:
+
+ File path from which to load the previous config. If `None`,
+ config is loaded from the default location. If `path` is
+ specified, subsequent `save()` calls will write to the same
+ path.
+
+ """
+ self.path = path or self.path
+ with open(self.path) as f:
+ self._prev_dict = json.load(f)
+ for k, v in copy.deepcopy(self._prev_dict).items():
+ if k not in self:
+ self[k] = v
+
+ def changed(self, key):
+ """Return True if the current value for this key is different from
+ the previous value.
+
+ """
+ if self._prev_dict is None:
+ return True
+ return self.previous(key) != self.get(key)
+
+ def previous(self, key):
+ """Return previous value for this key, or None if there
+ is no previous value.
+
+ """
+ if self._prev_dict:
+ return self._prev_dict.get(key)
+ return None
+
+ def save(self):
+ """Save this config to disk.
+
+ If the charm is using the :mod:`Services Framework <services.base>`
+ or :meth:'@hook <Hooks.hook>' decorator, this
+ is called automatically at the end of successful hook execution.
+ Otherwise, it should be called directly by user code.
+
+ To disable automatic saves, set ``implicit_save=False`` on this
+ instance.
+
+ """
+ with open(self.path, 'w') as f:
+ json.dump(self, f)
+
+ def _implicit_save(self):
+ if self.implicit_save:
+ self.save()
+
+
+@cached
+def config(scope=None):
+ """Juju charm configuration"""
+ config_cmd_line = ['config-get']
+ if scope is not None:
+ config_cmd_line.append(scope)
+ config_cmd_line.append('--format=json')
+ try:
+ config_data = json.loads(
+ subprocess.check_output(config_cmd_line).decode('UTF-8'))
+ if scope is not None:
+ return config_data
+ return Config(config_data)
+ except ValueError:
+ return None
+
+
+@cached
+def relation_get(attribute=None, unit=None, rid=None):
+ """Get relation information"""
+ _args = ['relation-get', '--format=json']
+ if rid:
+ _args.append('-r')
+ _args.append(rid)
+ _args.append(attribute or '-')
+ if unit:
+ _args.append(unit)
+ try:
+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+ except ValueError:
+ return None
+ except CalledProcessError as e:
+ if e.returncode == 2:
+ return None
+ raise
+
+
+def relation_set(relation_id=None, relation_settings=None, **kwargs):
+ """Set relation information for the current unit"""
+ relation_settings = relation_settings if relation_settings else {}
+ relation_cmd_line = ['relation-set']
+ accepts_file = "--file" in subprocess.check_output(
+ relation_cmd_line + ["--help"], universal_newlines=True)
+ if relation_id is not None:
+ relation_cmd_line.extend(('-r', relation_id))
+ settings = relation_settings.copy()
+ settings.update(kwargs)
+ for key, value in settings.items():
+ # Force value to be a string: it always should, but some call
+ # sites pass in things like dicts or numbers.
+ if value is not None:
+ settings[key] = "{}".format(value)
+ if accepts_file:
+ # --file was introduced in Juju 1.23.2. Use it by default if
+ # available, since otherwise we'll break if the relation data is
+ # too big. Ideally we should tell relation-set to read the data from
+ # stdin, but that feature is broken in 1.23.2: Bug #1454678.
+ with tempfile.NamedTemporaryFile(delete=False) as settings_file:
+ settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
+ subprocess.check_call(
+ relation_cmd_line + ["--file", settings_file.name])
+ os.remove(settings_file.name)
+ else:
+ for key, value in settings.items():
+ if value is None:
+ relation_cmd_line.append('{}='.format(key))
+ else:
+ relation_cmd_line.append('{}={}'.format(key, value))
+ subprocess.check_call(relation_cmd_line)
+ # Flush cache of any relation-gets for local unit
+ flush(local_unit())
+
+
+def relation_clear(r_id=None):
+ ''' Clears any relation data already set on relation r_id '''
+ settings = relation_get(rid=r_id,
+ unit=local_unit())
+ for setting in settings:
+ if setting not in ['public-address', 'private-address']:
+ settings[setting] = None
+ relation_set(relation_id=r_id,
+ **settings)
+
+
+@cached
+def relation_ids(reltype=None):
+ """A list of relation_ids"""
+ reltype = reltype or relation_type()
+ relid_cmd_line = ['relation-ids', '--format=json']
+ if reltype is not None:
+ relid_cmd_line.append(reltype)
+ return json.loads(
+ subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
+ return []
+
+
+@cached
+def related_units(relid=None):
+ """A list of related units"""
+ relid = relid or relation_id()
+ units_cmd_line = ['relation-list', '--format=json']
+ if relid is not None:
+ units_cmd_line.extend(('-r', relid))
+ return json.loads(
+ subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
+
+
+@cached
+def relation_for_unit(unit=None, rid=None):
+ """Get the json represenation of a unit's relation"""
+ unit = unit or remote_unit()
+ relation = relation_get(unit=unit, rid=rid)
+ for key in relation:
+ if key.endswith('-list'):
+ relation[key] = relation[key].split()
+ relation['__unit__'] = unit
+ return relation
+
+
+@cached
+def relations_for_id(relid=None):
+ """Get relations of a specific relation ID"""
+ relation_data = []
+ relid = relid or relation_ids()
+ for unit in related_units(relid):
+ unit_data = relation_for_unit(unit, relid)
+ unit_data['__relid__'] = relid
+ relation_data.append(unit_data)
+ return relation_data
+
+
+@cached
+def relations_of_type(reltype=None):
+ """Get relations of a specific type"""
+ relation_data = []
+ reltype = reltype or relation_type()
+ for relid in relation_ids(reltype):
+ for relation in relations_for_id(relid):
+ relation['__relid__'] = relid
+ relation_data.append(relation)
+ return relation_data
+
+
+@cached
+def metadata():
+ """Get the current charm metadata.yaml contents as a python object"""
+ with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
+ return yaml.safe_load(md)
+
+
+@cached
+def relation_types():
+ """Get a list of relation types supported by this charm"""
+ rel_types = []
+ md = metadata()
+ for key in ('provides', 'requires', 'peers'):
+ section = md.get(key)
+ if section:
+ rel_types.extend(section.keys())
+ return rel_types
+
+
+@cached
+def relation_to_interface(relation_name):
+ """
+ Given the name of a relation, return the interface that relation uses.
+
+ :returns: The interface name, or ``None``.
+ """
+ return relation_to_role_and_interface(relation_name)[1]
+
+
+@cached
+def relation_to_role_and_interface(relation_name):
+ """
+ Given the name of a relation, return the role and the name of the interface
+ that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
+
+ :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
+ """
+ _metadata = metadata()
+ for role in ('provides', 'requires', 'peer'):
+ interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
+ if interface:
+ return role, interface
+ return None, None
+
+
+@cached
+def role_and_interface_to_relations(role, interface_name):
+ """
+ Given a role and interface name, return a list of relation names for the
+ current charm that use that interface under that role (where role is one
+ of ``provides``, ``requires``, or ``peer``).
+
+ :returns: A list of relation names.
+ """
+ _metadata = metadata()
+ results = []
+ for relation_name, relation in _metadata.get(role, {}).items():
+ if relation['interface'] == interface_name:
+ results.append(relation_name)
+ return results
+
+
+@cached
+def interface_to_relations(interface_name):
+ """
+ Given an interface, return a list of relation names for the current
+ charm that use that interface.
+
+ :returns: A list of relation names.
+ """
+ results = []
+ for role in ('provides', 'requires', 'peer'):
+ results.extend(role_and_interface_to_relations(role, interface_name))
+ return results
+
+
+@cached
+def charm_name():
+ """Get the name of the current charm as is specified on metadata.yaml"""
+ return metadata().get('name')
+
+
+@cached
+def relations():
+ """Get a nested dictionary of relation data for all related units"""
+ rels = {}
+ for reltype in relation_types():
+ relids = {}
+ for relid in relation_ids(reltype):
+ units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
+ for unit in related_units(relid):
+ reldata = relation_get(unit=unit, rid=relid)
+ units[unit] = reldata
+ relids[relid] = units
+ rels[reltype] = relids
+ return rels
+
+
+@cached
+def is_relation_made(relation, keys='private-address'):
+ '''
+ Determine whether a relation is established by checking for
+ presence of key(s). If a list of keys is provided, they
+ must all be present for the relation to be identified as made
+ '''
+ if isinstance(keys, str):
+ keys = [keys]
+ for r_id in relation_ids(relation):
+ for unit in related_units(r_id):
+ context = {}
+ for k in keys:
+ context[k] = relation_get(k, rid=r_id,
+ unit=unit)
+ if None not in context.values():
+ return True
+ return False
+
+
+def open_port(port, protocol="TCP"):
+ """Open a service network port"""
+ _args = ['open-port']
+ _args.append('{}/{}'.format(port, protocol))
+ subprocess.check_call(_args)
+
+
+def close_port(port, protocol="TCP"):
+ """Close a service network port"""
+ _args = ['close-port']
+ _args.append('{}/{}'.format(port, protocol))
+ subprocess.check_call(_args)
+
+
+@cached
+def unit_get(attribute):
+ """Get the unit ID for the remote unit"""
+ _args = ['unit-get', '--format=json', attribute]
+ try:
+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+ except ValueError:
+ return None
+
+
+def unit_public_ip():
+ """Get this unit's public IP address"""
+ return unit_get('public-address')
+
+
+def unit_private_ip():
+ """Get this unit's private IP address"""
+ return unit_get('private-address')
+
+
+class UnregisteredHookError(Exception):
+ """Raised when an undefined hook is called"""
+ pass
+
+
+class Hooks(object):
+ """A convenient handler for hook functions.
+
+ Example::
+
+ hooks = Hooks()
+
+ # register a hook, taking its name from the function name
+ @hooks.hook()
+ def install():
+ pass # your code here
+
+ # register a hook, providing a custom hook name
+ @hooks.hook("config-changed")
+ def config_changed():
+ pass # your code here
+
+ if __name__ == "__main__":
+ # execute a hook based on the name the program is called by
+ hooks.execute(sys.argv)
+ """
+
+ def __init__(self, config_save=None):
+ super(Hooks, self).__init__()
+ self._hooks = {}
+
+ # For unknown reasons, we allow the Hooks constructor to override
+ # config().implicit_save.
+ if config_save is not None:
+ config().implicit_save = config_save
+
+ def register(self, name, function):
+ """Register a hook"""
+ self._hooks[name] = function
+
+ def execute(self, args):
+ """Execute a registered hook based on args[0]"""
+ _run_atstart()
+ hook_name = os.path.basename(args[0])
+ if hook_name in self._hooks:
+ try:
+ self._hooks[hook_name]()
+ except SystemExit as x:
+ if x.code is None or x.code == 0:
+ _run_atexit()
+ raise
+ _run_atexit()
+ else:
+ raise UnregisteredHookError(hook_name)
+
+ def hook(self, *hook_names):
+ """Decorator, registering them as hooks"""
+ def wrapper(decorated):
+ for hook_name in hook_names:
+ self.register(hook_name, decorated)
+ else:
+ self.register(decorated.__name__, decorated)
+ if '_' in decorated.__name__:
+ self.register(
+ decorated.__name__.replace('_', '-'), decorated)
+ return decorated
+ return wrapper
+
+
+def charm_dir():
+ """Return the root directory of the current charm"""
+ return os.environ.get('CHARM_DIR')
+
+
+@cached
+def action_get(key=None):
+ """Gets the value of an action parameter, or all key/value param pairs"""
+ cmd = ['action-get']
+ if key is not None:
+ cmd.append(key)
+ cmd.append('--format=json')
+ action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+ return action_data
+
+
+def action_set(values):
+ """Sets the values to be returned after the action finishes"""
+ cmd = ['action-set']
+ for k, v in list(values.items()):
+ cmd.append('{}={}'.format(k, v))
+ subprocess.check_call(cmd)
+
+
+def action_fail(message):
+ """Sets the action status to failed and sets the error message.
+
+ The results set by action_set are preserved."""
+ subprocess.check_call(['action-fail', message])
+
+
+def action_name():
+ """Get the name of the currently executing action."""
+ return os.environ.get('JUJU_ACTION_NAME')
+
+
+def action_uuid():
+ """Get the UUID of the currently executing action."""
+ return os.environ.get('JUJU_ACTION_UUID')
+
+
+def action_tag():
+ """Get the tag for the currently executing action."""
+ return os.environ.get('JUJU_ACTION_TAG')
+
+
+def status_set(workload_state, message):
+ """Set the workload state with a message
+
+ Use status-set to set the workload state with a message which is visible
+ to the user via juju status. If the status-set command is not found then
+ assume this is juju < 1.23 and juju-log the message unstead.
+
+ workload_state -- valid juju workload state.
+ message -- status update message
+ """
+ valid_states = ['maintenance', 'blocked', 'waiting', 'active']
+ if workload_state not in valid_states:
+ raise ValueError(
+ '{!r} is not a valid workload state'.format(workload_state)
+ )
+ cmd = ['status-set', workload_state, message]
+ try:
+ ret = subprocess.call(cmd)
+ if ret == 0:
+ return
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ log_message = 'status-set failed: {} {}'.format(workload_state,
+ message)
+ log(log_message, level='INFO')
+
+
+def status_get():
+ """Retrieve the previously set juju workload state and message
+
+ If the status-get command is not found then assume this is juju < 1.23 and
+ return 'unknown', ""
+
+ """
+ cmd = ['status-get', "--format=json", "--include-data"]
+ try:
+ raw_status = subprocess.check_output(cmd)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ return ('unknown', "")
+ else:
+ raise
+ else:
+ status = json.loads(raw_status.decode("UTF-8"))
+ return (status["status"], status["message"])
+
+
+def translate_exc(from_exc, to_exc):
+ def inner_translate_exc1(f):
+ def inner_translate_exc2(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except from_exc:
+ raise to_exc
+
+ return inner_translate_exc2
+
+ return inner_translate_exc1
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def is_leader():
+ """Does the current unit hold the juju leadership
+
+ Uses juju to determine whether the current unit is the leader of its peers
+ """
+ cmd = ['is-leader', '--format=json']
+ return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def leader_get(attribute=None):
+ """Juju leader get value(s)"""
+ cmd = ['leader-get', '--format=json'] + [attribute or '-']
+ return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def leader_set(settings=None, **kwargs):
+ """Juju leader set value(s)"""
+ # Don't log secrets.
+ # log("Juju leader-set '%s'" % (settings), level=DEBUG)
+ cmd = ['leader-set']
+ settings = settings or {}
+ settings.update(kwargs)
+ for k, v in settings.items():
+ if v is None:
+ cmd.append('{}='.format(k))
+ else:
+ cmd.append('{}={}'.format(k, v))
+ subprocess.check_call(cmd)
+
+
+@cached
+def juju_version():
+ """Full version string (eg. '1.23.3.1-trusty-amd64')"""
+ # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
+ jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
+ return subprocess.check_output([jujud, 'version'],
+ universal_newlines=True).strip()
+
+
+@cached
+def has_juju_version(minimum_version):
+ """Return True if the Juju version is at least the provided version"""
+ return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
+
+
+_atexit = []
+_atstart = []
+
+
+def atstart(callback, *args, **kwargs):
+ '''Schedule a callback to run before the main hook.
+
+ Callbacks are run in the order they were added.
+
+ This is useful for modules and classes to perform initialization
+ and inject behavior. In particular:
+
+ - Run common code before all of your hooks, such as logging
+ the hook name or interesting relation data.
+ - Defer object or module initialization that requires a hook
+ context until we know there actually is a hook context,
+ making testing easier.
+ - Rather than requiring charm authors to include boilerplate to
+ invoke your helper's behavior, have it run automatically if
+ your object is instantiated or module imported.
+
+ This is not at all useful after your hook framework as been launched.
+ '''
+ global _atstart
+ _atstart.append((callback, args, kwargs))
+
+
+def atexit(callback, *args, **kwargs):
+ '''Schedule a callback to run on successful hook completion.
+
+ Callbacks are run in the reverse order that they were added.'''
+ _atexit.append((callback, args, kwargs))
+
+
+def _run_atstart():
+ '''Hook frameworks must invoke this before running the main hook body.'''
+ global _atstart
+ for callback, args, kwargs in _atstart:
+ callback(*args, **kwargs)
+ del _atstart[:]
+
+
+def _run_atexit():
+ '''Hook frameworks must invoke this after the main hook body has
+ successfully completed. Do not invoke it if the hook fails.'''
+ global _atexit
+ for callback, args, kwargs in reversed(_atexit):
+ callback(*args, **kwargs)
+ del _atexit[:]
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/host.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/host.py
new file mode 100644
index 0000000..cb3c527
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/host.py
@@ -0,0 +1,586 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+"""Tools for working with the host system"""
+# Copyright 2012 Canonical Ltd.
+#
+# Authors:
+# Nick Moffitt <nick.moffitt@canonical.com>
+# Matthew Wedgwood <matthew.wedgwood@canonical.com>
+
+import os
+import re
+import pwd
+import glob
+import grp
+import random
+import string
+import subprocess
+import hashlib
+from contextlib import contextmanager
+from collections import OrderedDict
+
+import six
+
+from .hookenv import log
+from .fstab import Fstab
+
+
+def service_start(service_name):
+ """Start a system service"""
+ return service('start', service_name)
+
+
+def service_stop(service_name):
+ """Stop a system service"""
+ return service('stop', service_name)
+
+
+def service_restart(service_name):
+ """Restart a system service"""
+ return service('restart', service_name)
+
+
+def service_reload(service_name, restart_on_failure=False):
+ """Reload a system service, optionally falling back to restart if
+ reload fails"""
+ service_result = service('reload', service_name)
+ if not service_result and restart_on_failure:
+ service_result = service('restart', service_name)
+ return service_result
+
+
+def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
+ """Pause a system service.
+
+ Stop it, and prevent it from starting again at boot."""
+ stopped = service_stop(service_name)
+ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
+ sysv_file = os.path.join(initd_dir, service_name)
+ if os.path.exists(upstart_file):
+ override_path = os.path.join(
+ init_dir, '{}.override'.format(service_name))
+ with open(override_path, 'w') as fh:
+ fh.write("manual\n")
+ elif os.path.exists(sysv_file):
+ subprocess.check_call(["update-rc.d", service_name, "disable"])
+ else:
+ # XXX: Support SystemD too
+ raise ValueError(
+ "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
+ service_name, upstart_file, sysv_file))
+ return stopped
+
+
+def service_resume(service_name, init_dir="/etc/init",
+ initd_dir="/etc/init.d"):
+ """Resume a system service.
+
+ Reenable starting again at boot. Start the service"""
+ upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
+ sysv_file = os.path.join(initd_dir, service_name)
+ if os.path.exists(upstart_file):
+ override_path = os.path.join(
+ init_dir, '{}.override'.format(service_name))
+ if os.path.exists(override_path):
+ os.unlink(override_path)
+ elif os.path.exists(sysv_file):
+ subprocess.check_call(["update-rc.d", service_name, "enable"])
+ else:
+ # XXX: Support SystemD too
+ raise ValueError(
+ "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
+ service_name, upstart_file, sysv_file))
+
+ started = service_start(service_name)
+ return started
+
+
+def service(action, service_name):
+ """Control a system service"""
+ cmd = ['service', service_name, action]
+ return subprocess.call(cmd) == 0
+
+
+def service_running(service):
+ """Determine whether a system service is running"""
+ try:
+ output = subprocess.check_output(
+ ['service', service, 'status'],
+ stderr=subprocess.STDOUT).decode('UTF-8')
+ except subprocess.CalledProcessError:
+ return False
+ else:
+ if ("start/running" in output or "is running" in output):
+ return True
+ else:
+ return False
+
+
+def service_available(service_name):
+ """Determine whether a system service is available"""
+ try:
+ subprocess.check_output(
+ ['service', service_name, 'status'],
+ stderr=subprocess.STDOUT).decode('UTF-8')
+ except subprocess.CalledProcessError as e:
+ return b'unrecognized service' not in e.output
+ else:
+ return True
+
+
+def adduser(username, password=None, shell='/bin/bash', system_user=False):
+ """Add a user to the system"""
+ try:
+ user_info = pwd.getpwnam(username)
+ log('user {0} already exists!'.format(username))
+ except KeyError:
+ log('creating user {0}'.format(username))
+ cmd = ['useradd']
+ if system_user or password is None:
+ cmd.append('--system')
+ else:
+ cmd.extend([
+ '--create-home',
+ '--shell', shell,
+ '--password', password,
+ ])
+ cmd.append(username)
+ subprocess.check_call(cmd)
+ user_info = pwd.getpwnam(username)
+ return user_info
+
+
+def user_exists(username):
+ """Check if a user exists"""
+ try:
+ pwd.getpwnam(username)
+ user_exists = True
+ except KeyError:
+ user_exists = False
+ return user_exists
+
+
+def add_group(group_name, system_group=False):
+ """Add a group to the system"""
+ try:
+ group_info = grp.getgrnam(group_name)
+ log('group {0} already exists!'.format(group_name))
+ except KeyError:
+ log('creating group {0}'.format(group_name))
+ cmd = ['addgroup']
+ if system_group:
+ cmd.append('--system')
+ else:
+ cmd.extend([
+ '--group',
+ ])
+ cmd.append(group_name)
+ subprocess.check_call(cmd)
+ group_info = grp.getgrnam(group_name)
+ return group_info
+
+
+def add_user_to_group(username, group):
+ """Add a user to a group"""
+ cmd = ['gpasswd', '-a', username, group]
+ log("Adding user {} to group {}".format(username, group))
+ subprocess.check_call(cmd)
+
+
+def rsync(from_path, to_path, flags='-r', options=None):
+ """Replicate the contents of a path"""
+ options = options or ['--delete', '--executability']
+ cmd = ['/usr/bin/rsync', flags]
+ cmd.extend(options)
+ cmd.append(from_path)
+ cmd.append(to_path)
+ log(" ".join(cmd))
+ return subprocess.check_output(cmd).decode('UTF-8').strip()
+
+
+def symlink(source, destination):
+ """Create a symbolic link"""
+ log("Symlinking {} as {}".format(source, destination))
+ cmd = [
+ 'ln',
+ '-sf',
+ source,
+ destination,
+ ]
+ subprocess.check_call(cmd)
+
+
+def mkdir(path, owner='root', group='root', perms=0o555, force=False):
+ """Create a directory"""
+ log("Making dir {} {}:{} {:o}".format(path, owner, group,
+ perms))
+ uid = pwd.getpwnam(owner).pw_uid
+ gid = grp.getgrnam(group).gr_gid
+ realpath = os.path.abspath(path)
+ path_exists = os.path.exists(realpath)
+ if path_exists and force:
+ if not os.path.isdir(realpath):
+ log("Removing non-directory file {} prior to mkdir()".format(path))
+ os.unlink(realpath)
+ os.makedirs(realpath, perms)
+ elif not path_exists:
+ os.makedirs(realpath, perms)
+ os.chown(realpath, uid, gid)
+ os.chmod(realpath, perms)
+
+
+def write_file(path, content, owner='root', group='root', perms=0o444):
+ """Create or overwrite a file with the contents of a byte string."""
+ log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
+ uid = pwd.getpwnam(owner).pw_uid
+ gid = grp.getgrnam(group).gr_gid
+ with open(path, 'wb') as target:
+ os.fchown(target.fileno(), uid, gid)
+ os.fchmod(target.fileno(), perms)
+ target.write(content)
+
+
+def fstab_remove(mp):
+ """Remove the given mountpoint entry from /etc/fstab
+ """
+ return Fstab.remove_by_mountpoint(mp)
+
+
+def fstab_add(dev, mp, fs, options=None):
+ """Adds the given device entry to the /etc/fstab file
+ """
+ return Fstab.add(dev, mp, fs, options=options)
+
+
+def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
+ """Mount a filesystem at a particular mountpoint"""
+ cmd_args = ['mount']
+ if options is not None:
+ cmd_args.extend(['-o', options])
+ cmd_args.extend([device, mountpoint])
+ try:
+ subprocess.check_output(cmd_args)
+ except subprocess.CalledProcessError as e:
+ log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
+ return False
+
+ if persist:
+ return fstab_add(device, mountpoint, filesystem, options=options)
+ return True
+
+
+def umount(mountpoint, persist=False):
+ """Unmount a filesystem"""
+ cmd_args = ['umount', mountpoint]
+ try:
+ subprocess.check_output(cmd_args)
+ except subprocess.CalledProcessError as e:
+ log('Error unmounting {}\n{}'.format(mountpoint, e.output))
+ return False
+
+ if persist:
+ return fstab_remove(mountpoint)
+ return True
+
+
+def mounts():
+ """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
+ with open('/proc/mounts') as f:
+ # [['/mount/point','/dev/path'],[...]]
+ system_mounts = [m[1::-1] for m in [l.strip().split()
+ for l in f.readlines()]]
+ return system_mounts
+
+
+def fstab_mount(mountpoint):
+ """Mount filesystem using fstab"""
+ cmd_args = ['mount', mountpoint]
+ try:
+ subprocess.check_output(cmd_args)
+ except subprocess.CalledProcessError as e:
+ log('Error unmounting {}\n{}'.format(mountpoint, e.output))
+ return False
+ return True
+
+
+def file_hash(path, hash_type='md5'):
+ """
+ Generate a hash checksum of the contents of 'path' or None if not found.
+
+ :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
+ such as md5, sha1, sha256, sha512, etc.
+ """
+ if os.path.exists(path):
+ h = getattr(hashlib, hash_type)()
+ with open(path, 'rb') as source:
+ h.update(source.read())
+ return h.hexdigest()
+ else:
+ return None
+
+
+def path_hash(path):
+ """
+ Generate a hash checksum of all files matching 'path'. Standard wildcards
+ like '*' and '?' are supported, see documentation for the 'glob' module for
+ more information.
+
+ :return: dict: A { filename: hash } dictionary for all matched files.
+ Empty if none found.
+ """
+ return {
+ filename: file_hash(filename)
+ for filename in glob.iglob(path)
+ }
+
+
+def check_hash(path, checksum, hash_type='md5'):
+ """
+ Validate a file using a cryptographic checksum.
+
+ :param str checksum: Value of the checksum used to validate the file.
+ :param str hash_type: Hash algorithm used to generate `checksum`.
+ Can be any hash alrgorithm supported by :mod:`hashlib`,
+ such as md5, sha1, sha256, sha512, etc.
+ :raises ChecksumError: If the file fails the checksum
+
+ """
+ actual_checksum = file_hash(path, hash_type)
+ if checksum != actual_checksum:
+ raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
+
+
+class ChecksumError(ValueError):
+ pass
+
+
+def restart_on_change(restart_map, stopstart=False):
+ """Restart services based on configuration files changing
+
+ This function is used a decorator, for example::
+
+ @restart_on_change({
+ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
+ '/etc/apache/sites-enabled/*': [ 'apache2' ]
+ })
+ def config_changed():
+ pass # your code here
+
+ In this example, the cinder-api and cinder-volume services
+ would be restarted if /etc/ceph/ceph.conf is changed by the
+ ceph_client_changed function. The apache2 service would be
+ restarted if any file matching the pattern got changed, created
+ or removed. Standard wildcards are supported, see documentation
+ for the 'glob' module for more information.
+ """
+ def wrap(f):
+ def wrapped_f(*args, **kwargs):
+ checksums = {path: path_hash(path) for path in restart_map}
+ f(*args, **kwargs)
+ restarts = []
+ for path in restart_map:
+ if path_hash(path) != checksums[path]:
+ restarts += restart_map[path]
+ services_list = list(OrderedDict.fromkeys(restarts))
+ if not stopstart:
+ for service_name in services_list:
+ service('restart', service_name)
+ else:
+ for action in ['stop', 'start']:
+ for service_name in services_list:
+ service(action, service_name)
+ return wrapped_f
+ return wrap
+
+
+def lsb_release():
+ """Return /etc/lsb-release in a dict"""
+ d = {}
+ with open('/etc/lsb-release', 'r') as lsb:
+ for l in lsb:
+ k, v = l.split('=')
+ d[k.strip()] = v.strip()
+ return d
+
+
+def pwgen(length=None):
+ """Generate a random pasword."""
+ if length is None:
+ # A random length is ok to use a weak PRNG
+ length = random.choice(range(35, 45))
+ alphanumeric_chars = [
+ l for l in (string.ascii_letters + string.digits)
+ if l not in 'l0QD1vAEIOUaeiou']
+ # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
+ # actual password
+ random_generator = random.SystemRandom()
+ random_chars = [
+ random_generator.choice(alphanumeric_chars) for _ in range(length)]
+ return(''.join(random_chars))
+
+
+def is_phy_iface(interface):
+ """Returns True if interface is not virtual, otherwise False."""
+ if interface:
+ sys_net = '/sys/class/net'
+ if os.path.isdir(sys_net):
+ for iface in glob.glob(os.path.join(sys_net, '*')):
+ if '/virtual/' in os.path.realpath(iface):
+ continue
+
+ if interface == os.path.basename(iface):
+ return True
+
+ return False
+
+
+def get_bond_master(interface):
+ """Returns bond master if interface is bond slave otherwise None.
+
+ NOTE: the provided interface is expected to be physical
+ """
+ if interface:
+ iface_path = '/sys/class/net/%s' % (interface)
+ if os.path.exists(iface_path):
+ if '/virtual/' in os.path.realpath(iface_path):
+ return None
+
+ master = os.path.join(iface_path, 'master')
+ if os.path.exists(master):
+ master = os.path.realpath(master)
+ # make sure it is a bond master
+ if os.path.exists(os.path.join(master, 'bonding')):
+ return os.path.basename(master)
+
+ return None
+
+
+def list_nics(nic_type=None):
+ '''Return a list of nics of given type(s)'''
+ if isinstance(nic_type, six.string_types):
+ int_types = [nic_type]
+ else:
+ int_types = nic_type
+
+ interfaces = []
+ if nic_type:
+ for int_type in int_types:
+ cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
+ ip_output = subprocess.check_output(cmd).decode('UTF-8')
+ ip_output = ip_output.split('\n')
+ ip_output = (line for line in ip_output if line)
+ for line in ip_output:
+ if line.split()[1].startswith(int_type):
+ matched = re.search('.*: (' + int_type +
+ r'[0-9]+\.[0-9]+)@.*', line)
+ if matched:
+ iface = matched.groups()[0]
+ else:
+ iface = line.split()[1].replace(":", "")
+
+ if iface not in interfaces:
+ interfaces.append(iface)
+ else:
+ cmd = ['ip', 'a']
+ ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
+ ip_output = (line.strip() for line in ip_output if line)
+
+ key = re.compile('^[0-9]+:\s+(.+):')
+ for line in ip_output:
+ matched = re.search(key, line)
+ if matched:
+ iface = matched.group(1)
+ iface = iface.partition("@")[0]
+ if iface not in interfaces:
+ interfaces.append(iface)
+
+ return interfaces
+
+
+def set_nic_mtu(nic, mtu):
+ '''Set MTU on a network interface'''
+ cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
+ subprocess.check_call(cmd)
+
+
+def get_nic_mtu(nic):
+ cmd = ['ip', 'addr', 'show', nic]
+ ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
+ mtu = ""
+ for line in ip_output:
+ words = line.split()
+ if 'mtu' in words:
+ mtu = words[words.index("mtu") + 1]
+ return mtu
+
+
+def get_nic_hwaddr(nic):
+ cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
+ ip_output = subprocess.check_output(cmd).decode('UTF-8')
+ hwaddr = ""
+ words = ip_output.split()
+ if 'link/ether' in words:
+ hwaddr = words[words.index('link/ether') + 1]
+ return hwaddr
+
+
+def cmp_pkgrevno(package, revno, pkgcache=None):
+ '''Compare supplied revno with the revno of the installed package
+
+ * 1 => Installed revno is greater than supplied arg
+ * 0 => Installed revno is the same as supplied arg
+ * -1 => Installed revno is less than supplied arg
+
+ This function imports apt_cache function from charmhelpers.fetch if
+ the pkgcache argument is None. Be sure to add charmhelpers.fetch if
+ you call this function, or pass an apt_pkg.Cache() instance.
+ '''
+ import apt_pkg
+ if not pkgcache:
+ from charmhelpers.fetch import apt_cache
+ pkgcache = apt_cache()
+ pkg = pkgcache[package]
+ return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
+
+
+@contextmanager
+def chdir(d):
+ cur = os.getcwd()
+ try:
+ yield os.chdir(d)
+ finally:
+ os.chdir(cur)
+
+
+def chownr(path, owner, group, follow_links=True):
+ uid = pwd.getpwnam(owner).pw_uid
+ gid = grp.getgrnam(group).gr_gid
+ if follow_links:
+ chown = os.chown
+ else:
+ chown = os.lchown
+
+ for root, dirs, files in os.walk(path):
+ for name in dirs + files:
+ full = os.path.join(root, name)
+ broken_symlink = os.path.lexists(full) and not os.path.exists(full)
+ if not broken_symlink:
+ chown(full, uid, gid)
+
+
+def lchownr(path, owner, group):
+ chownr(path, owner, group, follow_links=False)
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/hugepage.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/hugepage.py
new file mode 100644
index 0000000..4aaca3f
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/hugepage.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import yaml
+from charmhelpers.core import fstab
+from charmhelpers.core import sysctl
+from charmhelpers.core.host import (
+ add_group,
+ add_user_to_group,
+ fstab_mount,
+ mkdir,
+)
+from charmhelpers.core.strutils import bytes_from_string
+from subprocess import check_output
+
+
+def hugepage_support(user, group='hugetlb', nr_hugepages=256,
+ max_map_count=65536, mnt_point='/run/hugepages/kvm',
+ pagesize='2MB', mount=True, set_shmmax=False):
+ """Enable hugepages on system.
+
+ Args:
+ user (str) -- Username to allow access to hugepages to
+ group (str) -- Group name to own hugepages
+ nr_hugepages (int) -- Number of pages to reserve
+ max_map_count (int) -- Number of Virtual Memory Areas a process can own
+ mnt_point (str) -- Directory to mount hugepages on
+ pagesize (str) -- Size of hugepages
+ mount (bool) -- Whether to Mount hugepages
+ """
+ group_info = add_group(group)
+ gid = group_info.gr_gid
+ add_user_to_group(user, group)
+ sysctl_settings = {
+ 'vm.nr_hugepages': nr_hugepages,
+ 'vm.max_map_count': max_map_count,
+ 'vm.hugetlb_shm_group': gid,
+ }
+ if set_shmmax:
+ shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
+ shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
+ if shmmax_minsize > shmmax_current:
+ sysctl_settings['kernel.shmmax'] = shmmax_minsize
+ sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
+ mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
+ lfstab = fstab.Fstab()
+ fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
+ if fstab_entry:
+ lfstab.remove_entry(fstab_entry)
+ entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
+ 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
+ lfstab.add_entry(entry)
+ if mount:
+ fstab_mount(mnt_point)
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/kernel.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/kernel.py
new file mode 100644
index 0000000..5dc6495
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/kernel.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
+
+from charmhelpers.core.hookenv import (
+ log,
+ INFO
+)
+
+from subprocess import check_call, check_output
+import re
+
+
+def modprobe(module, persist=True):
+ """Load a kernel module and configure for auto-load on reboot."""
+ cmd = ['modprobe', module]
+
+ log('Loading kernel module %s' % module, level=INFO)
+
+ check_call(cmd)
+ if persist:
+ with open('/etc/modules', 'r+') as modules:
+ if module not in modules.read():
+ modules.write(module)
+
+
+def rmmod(module, force=False):
+ """Remove a module from the linux kernel"""
+ cmd = ['rmmod']
+ if force:
+ cmd.append('-f')
+ cmd.append(module)
+ log('Removing kernel module %s' % module, level=INFO)
+ return check_call(cmd)
+
+
+def lsmod():
+ """Shows what kernel modules are currently loaded"""
+ return check_output(['lsmod'],
+ universal_newlines=True)
+
+
+def is_module_loaded(module):
+ """Checks if a kernel module is already loaded"""
+ matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
+ return len(matches) > 0
+
+
+def update_initramfs(version='all'):
+ """Updates an initramfs image"""
+ return check_call(["update-initramfs", "-k", version, "-u"])
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/__init__.py
new file mode 100644
index 0000000..0928158
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/__init__.py
@@ -0,0 +1,18 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+from .base import * # NOQA
+from .helpers import * # NOQA
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/base.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/base.py
new file mode 100644
index 0000000..a42660c
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/base.py
@@ -0,0 +1,353 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import json
+from inspect import getargspec
+from collections import Iterable, OrderedDict
+
+from charmhelpers.core import host
+from charmhelpers.core import hookenv
+
+
+__all__ = ['ServiceManager', 'ManagerCallback',
+ 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
+ 'service_restart', 'service_stop']
+
+
+class ServiceManager(object):
+ def __init__(self, services=None):
+ """
+ Register a list of services, given their definitions.
+
+ Service definitions are dicts in the following formats (all keys except
+ 'service' are optional)::
+
+ {
+ "service": <service name>,
+ "required_data": <list of required data contexts>,
+ "provided_data": <list of provided data contexts>,
+ "data_ready": <one or more callbacks>,
+ "data_lost": <one or more callbacks>,
+ "start": <one or more callbacks>,
+ "stop": <one or more callbacks>,
+ "ports": <list of ports to manage>,
+ }
+
+ The 'required_data' list should contain dicts of required data (or
+ dependency managers that act like dicts and know how to collect the data).
+ Only when all items in the 'required_data' list are populated are the list
+ of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
+ information.
+
+ The 'provided_data' list should contain relation data providers, most likely
+ a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
+ that will indicate a set of data to set on a given relation.
+
+ The 'data_ready' value should be either a single callback, or a list of
+ callbacks, to be called when all items in 'required_data' pass `is_ready()`.
+ Each callback will be called with the service name as the only parameter.
+ After all of the 'data_ready' callbacks are called, the 'start' callbacks
+ are fired.
+
+ The 'data_lost' value should be either a single callback, or a list of
+ callbacks, to be called when a 'required_data' item no longer passes
+ `is_ready()`. Each callback will be called with the service name as the
+ only parameter. After all of the 'data_lost' callbacks are called,
+ the 'stop' callbacks are fired.
+
+ The 'start' value should be either a single callback, or a list of
+ callbacks, to be called when starting the service, after the 'data_ready'
+ callbacks are complete. Each callback will be called with the service
+ name as the only parameter. This defaults to
+ `[host.service_start, services.open_ports]`.
+
+ The 'stop' value should be either a single callback, or a list of
+ callbacks, to be called when stopping the service. If the service is
+ being stopped because it no longer has all of its 'required_data', this
+ will be called after all of the 'data_lost' callbacks are complete.
+ Each callback will be called with the service name as the only parameter.
+ This defaults to `[services.close_ports, host.service_stop]`.
+
+ The 'ports' value should be a list of ports to manage. The default
+ 'start' handler will open the ports after the service is started,
+ and the default 'stop' handler will close the ports prior to stopping
+ the service.
+
+
+ Examples:
+
+ The following registers an Upstart service called bingod that depends on
+ a mongodb relation and which runs a custom `db_migrate` function prior to
+ restarting the service, and a Runit service called spadesd::
+
+ manager = services.ServiceManager([
+ {
+ 'service': 'bingod',
+ 'ports': [80, 443],
+ 'required_data': [MongoRelation(), config(), {'my': 'data'}],
+ 'data_ready': [
+ services.template(source='bingod.conf'),
+ services.template(source='bingod.ini',
+ target='/etc/bingod.ini',
+ owner='bingo', perms=0400),
+ ],
+ },
+ {
+ 'service': 'spadesd',
+ 'data_ready': services.template(source='spadesd_run.j2',
+ target='/etc/sv/spadesd/run',
+ perms=0555),
+ 'start': runit_start,
+ 'stop': runit_stop,
+ },
+ ])
+ manager.manage()
+ """
+ self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
+ self._ready = None
+ self.services = OrderedDict()
+ for service in services or []:
+ service_name = service['service']
+ self.services[service_name] = service
+
+ def manage(self):
+ """
+ Handle the current hook by doing The Right Thing with the registered services.
+ """
+ hookenv._run_atstart()
+ try:
+ hook_name = hookenv.hook_name()
+ if hook_name == 'stop':
+ self.stop_services()
+ else:
+ self.reconfigure_services()
+ self.provide_data()
+ except SystemExit as x:
+ if x.code is None or x.code == 0:
+ hookenv._run_atexit()
+ hookenv._run_atexit()
+
+ def provide_data(self):
+ """
+ Set the relation data for each provider in the ``provided_data`` list.
+
+ A provider must have a `name` attribute, which indicates which relation
+ to set data on, and a `provide_data()` method, which returns a dict of
+ data to set.
+
+ The `provide_data()` method can optionally accept two parameters:
+
+ * ``remote_service`` The name of the remote service that the data will
+ be provided to. The `provide_data()` method will be called once
+ for each connected service (not unit). This allows the method to
+ tailor its data to the given service.
+ * ``service_ready`` Whether or not the service definition had all of
+ its requirements met, and thus the ``data_ready`` callbacks run.
+
+ Note that the ``provided_data`` methods are now called **after** the
+ ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
+ a chance to generate any data necessary for the providing to the remote
+ services.
+ """
+ for service_name, service in self.services.items():
+ service_ready = self.is_ready(service_name)
+ for provider in service.get('provided_data', []):
+ for relid in hookenv.relation_ids(provider.name):
+ units = hookenv.related_units(relid)
+ if not units:
+ continue
+ remote_service = units[0].split('/')[0]
+ argspec = getargspec(provider.provide_data)
+ if len(argspec.args) > 1:
+ data = provider.provide_data(remote_service, service_ready)
+ else:
+ data = provider.provide_data()
+ if data:
+ hookenv.relation_set(relid, data)
+
+ def reconfigure_services(self, *service_names):
+ """
+ Update all files for one or more registered services, and,
+ if ready, optionally restart them.
+
+ If no service names are given, reconfigures all registered services.
+ """
+ for service_name in service_names or self.services.keys():
+ if self.is_ready(service_name):
+ self.fire_event('data_ready', service_name)
+ self.fire_event('start', service_name, default=[
+ service_restart,
+ manage_ports])
+ self.save_ready(service_name)
+ else:
+ if self.was_ready(service_name):
+ self.fire_event('data_lost', service_name)
+ self.fire_event('stop', service_name, default=[
+ manage_ports,
+ service_stop])
+ self.save_lost(service_name)
+
+ def stop_services(self, *service_names):
+ """
+ Stop one or more registered services, by name.
+
+ If no service names are given, stops all registered services.
+ """
+ for service_name in service_names or self.services.keys():
+ self.fire_event('stop', service_name, default=[
+ manage_ports,
+ service_stop])
+
+ def get_service(self, service_name):
+ """
+ Given the name of a registered service, return its service definition.
+ """
+ service = self.services.get(service_name)
+ if not service:
+ raise KeyError('Service not registered: %s' % service_name)
+ return service
+
+ def fire_event(self, event_name, service_name, default=None):
+ """
+ Fire a data_ready, data_lost, start, or stop event on a given service.
+ """
+ service = self.get_service(service_name)
+ callbacks = service.get(event_name, default)
+ if not callbacks:
+ return
+ if not isinstance(callbacks, Iterable):
+ callbacks = [callbacks]
+ for callback in callbacks:
+ if isinstance(callback, ManagerCallback):
+ callback(self, service_name, event_name)
+ else:
+ callback(service_name)
+
+ def is_ready(self, service_name):
+ """
+ Determine if a registered service is ready, by checking its 'required_data'.
+
+ A 'required_data' item can be any mapping type, and is considered ready
+ if `bool(item)` evaluates as True.
+ """
+ service = self.get_service(service_name)
+ reqs = service.get('required_data', [])
+ return all(bool(req) for req in reqs)
+
+ def _load_ready_file(self):
+ if self._ready is not None:
+ return
+ if os.path.exists(self._ready_file):
+ with open(self._ready_file) as fp:
+ self._ready = set(json.load(fp))
+ else:
+ self._ready = set()
+
+ def _save_ready_file(self):
+ if self._ready is None:
+ return
+ with open(self._ready_file, 'w') as fp:
+ json.dump(list(self._ready), fp)
+
+ def save_ready(self, service_name):
+ """
+ Save an indicator that the given service is now data_ready.
+ """
+ self._load_ready_file()
+ self._ready.add(service_name)
+ self._save_ready_file()
+
+ def save_lost(self, service_name):
+ """
+ Save an indicator that the given service is no longer data_ready.
+ """
+ self._load_ready_file()
+ self._ready.discard(service_name)
+ self._save_ready_file()
+
+ def was_ready(self, service_name):
+ """
+ Determine if the given service was previously data_ready.
+ """
+ self._load_ready_file()
+ return service_name in self._ready
+
+
+class ManagerCallback(object):
+ """
+ Special case of a callback that takes the `ServiceManager` instance
+ in addition to the service name.
+
+ Subclasses should implement `__call__` which should accept three parameters:
+
+ * `manager` The `ServiceManager` instance
+ * `service_name` The name of the service it's being triggered for
+ * `event_name` The name of the event that this callback is handling
+ """
+ def __call__(self, manager, service_name, event_name):
+ raise NotImplementedError()
+
+
+class PortManagerCallback(ManagerCallback):
+ """
+ Callback class that will open or close ports, for use as either
+ a start or stop action.
+ """
+ def __call__(self, manager, service_name, event_name):
+ service = manager.get_service(service_name)
+ new_ports = service.get('ports', [])
+ port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
+ if os.path.exists(port_file):
+ with open(port_file) as fp:
+ old_ports = fp.read().split(',')
+ for old_port in old_ports:
+ if bool(old_port):
+ old_port = int(old_port)
+ if old_port not in new_ports:
+ hookenv.close_port(old_port)
+ with open(port_file, 'w') as fp:
+ fp.write(','.join(str(port) for port in new_ports))
+ for port in new_ports:
+ if event_name == 'start':
+ hookenv.open_port(port)
+ elif event_name == 'stop':
+ hookenv.close_port(port)
+
+
+def service_stop(service_name):
+ """
+ Wrapper around host.service_stop to prevent spurious "unknown service"
+ messages in the logs.
+ """
+ if host.service_running(service_name):
+ host.service_stop(service_name)
+
+
+def service_restart(service_name):
+ """
+ Wrapper around host.service_restart to prevent spurious "unknown service"
+ messages in the logs.
+ """
+ if host.service_available(service_name):
+ if host.service_running(service_name):
+ host.service_restart(service_name)
+ else:
+ host.service_start(service_name)
+
+
+# Convenience aliases
+open_ports = close_ports = manage_ports = PortManagerCallback()
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/helpers.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/helpers.py
new file mode 100644
index 0000000..3f67783
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/helpers.py
@@ -0,0 +1,283 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import yaml
+
+from charmhelpers.core import hookenv
+from charmhelpers.core import host
+from charmhelpers.core import templating
+
+from charmhelpers.core.services.base import ManagerCallback
+
+
+__all__ = ['RelationContext', 'TemplateCallback',
+ 'render_template', 'template']
+
+
+class RelationContext(dict):
+ """
+ Base class for a context generator that gets relation data from juju.
+
+ Subclasses must provide the attributes `name`, which is the name of the
+ interface of interest, `interface`, which is the type of the interface of
+ interest, and `required_keys`, which is the set of keys required for the
+ relation to be considered complete. The data for all interfaces matching
+ the `name` attribute that are complete will used to populate the dictionary
+ values (see `get_data`, below).
+
+ The generated context will be namespaced under the relation :attr:`name`,
+ to prevent potential naming conflicts.
+
+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
+ """
+ name = None
+ interface = None
+
+ def __init__(self, name=None, additional_required_keys=None):
+ if not hasattr(self, 'required_keys'):
+ self.required_keys = []
+
+ if name is not None:
+ self.name = name
+ if additional_required_keys:
+ self.required_keys.extend(additional_required_keys)
+ self.get_data()
+
+ def __bool__(self):
+ """
+ Returns True if all of the required_keys are available.
+ """
+ return self.is_ready()
+
+ __nonzero__ = __bool__
+
+ def __repr__(self):
+ return super(RelationContext, self).__repr__()
+
+ def is_ready(self):
+ """
+ Returns True if all of the `required_keys` are available from any units.
+ """
+ ready = len(self.get(self.name, [])) > 0
+ if not ready:
+ hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
+ return ready
+
+ def _is_ready(self, unit_data):
+ """
+ Helper method that tests a set of relation data and returns True if
+ all of the `required_keys` are present.
+ """
+ return set(unit_data.keys()).issuperset(set(self.required_keys))
+
+ def get_data(self):
+ """
+ Retrieve the relation data for each unit involved in a relation and,
+ if complete, store it in a list under `self[self.name]`. This
+ is automatically called when the RelationContext is instantiated.
+
+ The units are sorted lexographically first by the service ID, then by
+ the unit ID. Thus, if an interface has two other services, 'db:1'
+ and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
+ and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
+ set of data, the relation data for the units will be stored in the
+ order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
+
+ If you only care about a single unit on the relation, you can just
+ access it as `{{ interface[0]['key'] }}`. However, if you can at all
+ support multiple units on a relation, you should iterate over the list,
+ like::
+
+ {% for unit in interface -%}
+ {{ unit['key'] }}{% if not loop.last %},{% endif %}
+ {%- endfor %}
+
+ Note that since all sets of relation data from all related services and
+ units are in a single list, if you need to know which service or unit a
+ set of data came from, you'll need to extend this class to preserve
+ that information.
+ """
+ if not hookenv.relation_ids(self.name):
+ return
+
+ ns = self.setdefault(self.name, [])
+ for rid in sorted(hookenv.relation_ids(self.name)):
+ for unit in sorted(hookenv.related_units(rid)):
+ reldata = hookenv.relation_get(rid=rid, unit=unit)
+ if self._is_ready(reldata):
+ ns.append(reldata)
+
+ def provide_data(self):
+ """
+ Return data to be relation_set for this interface.
+ """
+ return {}
+
+
+class MysqlRelation(RelationContext):
+ """
+ Relation context for the `mysql` interface.
+
+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
+ """
+ name = 'db'
+ interface = 'mysql'
+
+ def __init__(self, *args, **kwargs):
+ self.required_keys = ['host', 'user', 'password', 'database']
+ RelationContext.__init__(self, *args, **kwargs)
+
+
+class HttpRelation(RelationContext):
+ """
+ Relation context for the `http` interface.
+
+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
+ """
+ name = 'website'
+ interface = 'http'
+
+ def __init__(self, *args, **kwargs):
+ self.required_keys = ['host', 'port']
+ RelationContext.__init__(self, *args, **kwargs)
+
+ def provide_data(self):
+ return {
+ 'host': hookenv.unit_get('private-address'),
+ 'port': 80,
+ }
+
+
+class RequiredConfig(dict):
+ """
+ Data context that loads config options with one or more mandatory options.
+
+ Once the required options have been changed from their default values, all
+ config options will be available, namespaced under `config` to prevent
+ potential naming conflicts (for example, between a config option and a
+ relation property).
+
+ :param list *args: List of options that must be changed from their default values.
+ """
+
+ def __init__(self, *args):
+ self.required_options = args
+ self['config'] = hookenv.config()
+ with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
+ self.config = yaml.load(fp).get('options', {})
+
+ def __bool__(self):
+ for option in self.required_options:
+ if option not in self['config']:
+ return False
+ current_value = self['config'][option]
+ default_value = self.config[option].get('default')
+ if current_value == default_value:
+ return False
+ if current_value in (None, '') and default_value in (None, ''):
+ return False
+ return True
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+
+class StoredContext(dict):
+ """
+ A data context that always returns the data that it was first created with.
+
+ This is useful to do a one-time generation of things like passwords, that
+ will thereafter use the same value that was originally generated, instead
+ of generating a new value each time it is run.
+ """
+ def __init__(self, file_name, config_data):
+ """
+ If the file exists, populate `self` with the data from the file.
+ Otherwise, populate with the given data and persist it to the file.
+ """
+ if os.path.exists(file_name):
+ self.update(self.read_context(file_name))
+ else:
+ self.store_context(file_name, config_data)
+ self.update(config_data)
+
+ def store_context(self, file_name, config_data):
+ if not os.path.isabs(file_name):
+ file_name = os.path.join(hookenv.charm_dir(), file_name)
+ with open(file_name, 'w') as file_stream:
+ os.fchmod(file_stream.fileno(), 0o600)
+ yaml.dump(config_data, file_stream)
+
+ def read_context(self, file_name):
+ if not os.path.isabs(file_name):
+ file_name = os.path.join(hookenv.charm_dir(), file_name)
+ with open(file_name, 'r') as file_stream:
+ data = yaml.load(file_stream)
+ if not data:
+ raise OSError("%s is empty" % file_name)
+ return data
+
+
+class TemplateCallback(ManagerCallback):
+ """
+ Callback class that will render a Jinja2 template, for use as a ready
+ action.
+
+ :param str source: The template source file, relative to
+ `$CHARM_DIR/templates`
+
+ :param str target: The target to write the rendered template to
+ :param str owner: The owner of the rendered file
+ :param str group: The group of the rendered file
+ :param int perms: The permissions of the rendered file
+ :param partial on_change_action: functools partial to be executed when
+ rendered file changes
+ """
+ def __init__(self, source, target,
+ owner='root', group='root', perms=0o444,
+ on_change_action=None):
+ self.source = source
+ self.target = target
+ self.owner = owner
+ self.group = group
+ self.perms = perms
+ self.on_change_action = on_change_action
+
+ def __call__(self, manager, service_name, event_name):
+ pre_checksum = ''
+ if self.on_change_action and os.path.isfile(self.target):
+ pre_checksum = host.file_hash(self.target)
+ service = manager.get_service(service_name)
+ context = {}
+ for ctx in service.get('required_data', []):
+ context.update(ctx)
+ templating.render(self.source, self.target, context,
+ self.owner, self.group, self.perms)
+ if self.on_change_action:
+ if pre_checksum == host.file_hash(self.target):
+ hookenv.log(
+ 'No change detected: {}'.format(self.target),
+ hookenv.DEBUG)
+ else:
+ self.on_change_action()
+
+
+# Convenience aliases for templates
+render_template = template = TemplateCallback
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/strutils.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/strutils.py
new file mode 100644
index 0000000..7e3f969
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/strutils.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import six
+import re
+
+
+def bool_from_string(value):
+ """Interpret string value as boolean.
+
+ Returns True if value translates to True otherwise False.
+ """
+ if isinstance(value, six.string_types):
+ value = six.text_type(value)
+ else:
+ msg = "Unable to interpret non-string value '%s' as boolean" % (value)
+ raise ValueError(msg)
+
+ value = value.strip().lower()
+
+ if value in ['y', 'yes', 'true', 't', 'on']:
+ return True
+ elif value in ['n', 'no', 'false', 'f', 'off']:
+ return False
+
+ msg = "Unable to interpret string value '%s' as boolean" % (value)
+ raise ValueError(msg)
+
+
+def bytes_from_string(value):
+ """Interpret human readable string value as bytes.
+
+ Returns int
+ """
+ BYTE_POWER = {
+ 'K': 1,
+ 'KB': 1,
+ 'M': 2,
+ 'MB': 2,
+ 'G': 3,
+ 'GB': 3,
+ 'T': 4,
+ 'TB': 4,
+ 'P': 5,
+ 'PB': 5,
+ }
+ if isinstance(value, six.string_types):
+ value = six.text_type(value)
+ else:
+ msg = "Unable to interpret non-string value '%s' as boolean" % (value)
+ raise ValueError(msg)
+ matches = re.match("([0-9]+)([a-zA-Z]+)", value)
+ if not matches:
+ msg = "Unable to interpret string value '%s' as bytes" % (value)
+ raise ValueError(msg)
+ return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/sysctl.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/sysctl.py
new file mode 100644
index 0000000..21cc8ab
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/sysctl.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import yaml
+
+from subprocess import check_call
+
+from charmhelpers.core.hookenv import (
+ log,
+ DEBUG,
+ ERROR,
+)
+
+__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
+
+
+def create(sysctl_dict, sysctl_file):
+ """Creates a sysctl.conf file from a YAML associative array
+
+ :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
+ :type sysctl_dict: str
+ :param sysctl_file: path to the sysctl file to be saved
+ :type sysctl_file: str or unicode
+ :returns: None
+ """
+ try:
+ sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
+ except yaml.YAMLError:
+ log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
+ level=ERROR)
+ return
+
+ with open(sysctl_file, "w") as fd:
+ for key, value in sysctl_dict_parsed.items():
+ fd.write("{}={}\n".format(key, value))
+
+ log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
+ level=DEBUG)
+
+ check_call(["sysctl", "-p", sysctl_file])
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/templating.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/templating.py
new file mode 100644
index 0000000..4531999
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/templating.py
@@ -0,0 +1,68 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from charmhelpers.core import host
+from charmhelpers.core import hookenv
+
+
+def render(source, target, context, owner='root', group='root',
+ perms=0o444, templates_dir=None, encoding='UTF-8'):
+ """
+ Render a template.
+
+ The `source` path, if not absolute, is relative to the `templates_dir`.
+
+ The `target` path should be absolute.
+
+ The context should be a dict containing the values to be replaced in the
+ template.
+
+ The `owner`, `group`, and `perms` options will be passed to `write_file`.
+
+ If omitted, `templates_dir` defaults to the `templates` folder in the charm.
+
+ Note: Using this requires python-jinja2; if it is not installed, calling
+ this will attempt to use charmhelpers.fetch.apt_install to install it.
+ """
+ try:
+ from jinja2 import FileSystemLoader, Environment, exceptions
+ except ImportError:
+ try:
+ from charmhelpers.fetch import apt_install
+ except ImportError:
+ hookenv.log('Could not import jinja2, and could not import '
+ 'charmhelpers.fetch to install it',
+ level=hookenv.ERROR)
+ raise
+ apt_install('python-jinja2', fatal=True)
+ from jinja2 import FileSystemLoader, Environment, exceptions
+
+ if templates_dir is None:
+ templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
+ loader = Environment(loader=FileSystemLoader(templates_dir))
+ try:
+ source = source
+ template = loader.get_template(source)
+ except exceptions.TemplateNotFound as e:
+ hookenv.log('Could not load template %s from %s.' %
+ (source, templates_dir),
+ level=hookenv.ERROR)
+ raise e
+ content = template.render(context)
+ host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
+ host.write_file(target, content.encode(encoding), owner, group, perms)
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/unitdata.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/unitdata.py
new file mode 100644
index 0000000..338104e
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/unitdata.py
@@ -0,0 +1,521 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+#
+#
+# Authors:
+# Kapil Thangavelu <kapil.foss@gmail.com>
+#
+"""
+Intro
+-----
+
+A simple way to store state in units. This provides a key value
+storage with support for versioned, transactional operation,
+and can calculate deltas from previous values to simplify unit logic
+when processing changes.
+
+
+Hook Integration
+----------------
+
+There are several extant frameworks for hook execution, including
+
+ - charmhelpers.core.hookenv.Hooks
+ - charmhelpers.core.services.ServiceManager
+
+The storage classes are framework agnostic, one simple integration is
+via the HookData contextmanager. It will record the current hook
+execution environment (including relation data, config data, etc.),
+setup a transaction and allow easy access to the changes from
+previously seen values. One consequence of the integration is the
+reservation of particular keys ('rels', 'unit', 'env', 'config',
+'charm_revisions') for their respective values.
+
+Here's a fully worked integration example using hookenv.Hooks::
+
+ from charmhelper.core import hookenv, unitdata
+
+ hook_data = unitdata.HookData()
+ db = unitdata.kv()
+ hooks = hookenv.Hooks()
+
+ @hooks.hook
+ def config_changed():
+ # Print all changes to configuration from previously seen
+ # values.
+ for changed, (prev, cur) in hook_data.conf.items():
+ print('config changed', changed,
+ 'previous value', prev,
+ 'current value', cur)
+
+ # Get some unit specific bookeeping
+ if not db.get('pkg_key'):
+ key = urllib.urlopen('https://example.com/pkg_key').read()
+ db.set('pkg_key', key)
+
+ # Directly access all charm config as a mapping.
+ conf = db.getrange('config', True)
+
+ # Directly access all relation data as a mapping
+ rels = db.getrange('rels', True)
+
+ if __name__ == '__main__':
+ with hook_data():
+ hook.execute()
+
+
+A more basic integration is via the hook_scope context manager which simply
+manages transaction scope (and records hook name, and timestamp)::
+
+ >>> from unitdata import kv
+ >>> db = kv()
+ >>> with db.hook_scope('install'):
+ ... # do work, in transactional scope.
+ ... db.set('x', 1)
+ >>> db.get('x')
+ 1
+
+
+Usage
+-----
+
+Values are automatically json de/serialized to preserve basic typing
+and complex data struct capabilities (dicts, lists, ints, booleans, etc).
+
+Individual values can be manipulated via get/set::
+
+ >>> kv.set('y', True)
+ >>> kv.get('y')
+ True
+
+ # We can set complex values (dicts, lists) as a single key.
+ >>> kv.set('config', {'a': 1, 'b': True'})
+
+ # Also supports returning dictionaries as a record which
+ # provides attribute access.
+ >>> config = kv.get('config', record=True)
+ >>> config.b
+ True
+
+
+Groups of keys can be manipulated with update/getrange::
+
+ >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
+ >>> kv.getrange('gui.', strip=True)
+ {'z': 1, 'y': 2}
+
+When updating values, its very helpful to understand which values
+have actually changed and how have they changed. The storage
+provides a delta method to provide for this::
+
+ >>> data = {'debug': True, 'option': 2}
+ >>> delta = kv.delta(data, 'config.')
+ >>> delta.debug.previous
+ None
+ >>> delta.debug.current
+ True
+ >>> delta
+ {'debug': (None, True), 'option': (None, 2)}
+
+Note the delta method does not persist the actual change, it needs to
+be explicitly saved via 'update' method::
+
+ >>> kv.update(data, 'config.')
+
+Values modified in the context of a hook scope retain historical values
+associated to the hookname.
+
+ >>> with db.hook_scope('config-changed'):
+ ... db.set('x', 42)
+ >>> db.gethistory('x')
+ [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
+ (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
+
+"""
+
+import collections
+import contextlib
+import datetime
+import itertools
+import json
+import os
+import pprint
+import sqlite3
+import sys
+
+__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
+
+
+class Storage(object):
+ """Simple key value database for local unit state within charms.
+
+ Modifications are not persisted unless :meth:`flush` is called.
+
+ To support dicts, lists, integer, floats, and booleans values
+ are automatically json encoded/decoded.
+ """
+ def __init__(self, path=None):
+ self.db_path = path
+ if path is None:
+ if 'UNIT_STATE_DB' in os.environ:
+ self.db_path = os.environ['UNIT_STATE_DB']
+ else:
+ self.db_path = os.path.join(
+ os.environ.get('CHARM_DIR', ''), '.unit-state.db')
+ self.conn = sqlite3.connect('%s' % self.db_path)
+ self.cursor = self.conn.cursor()
+ self.revision = None
+ self._closed = False
+ self._init()
+
+ def close(self):
+ if self._closed:
+ return
+ self.flush(False)
+ self.cursor.close()
+ self.conn.close()
+ self._closed = True
+
+ def get(self, key, default=None, record=False):
+ self.cursor.execute('select data from kv where key=?', [key])
+ result = self.cursor.fetchone()
+ if not result:
+ return default
+ if record:
+ return Record(json.loads(result[0]))
+ return json.loads(result[0])
+
+ def getrange(self, key_prefix, strip=False):
+ """
+ Get a range of keys starting with a common prefix as a mapping of
+ keys to values.
+
+ :param str key_prefix: Common prefix among all keys
+ :param bool strip: Optionally strip the common prefix from the key
+ names in the returned dict
+ :return dict: A (possibly empty) dict of key-value mappings
+ """
+ self.cursor.execute("select key, data from kv where key like ?",
+ ['%s%%' % key_prefix])
+ result = self.cursor.fetchall()
+
+ if not result:
+ return {}
+ if not strip:
+ key_prefix = ''
+ return dict([
+ (k[len(key_prefix):], json.loads(v)) for k, v in result])
+
+ def update(self, mapping, prefix=""):
+ """
+ Set the values of multiple keys at once.
+
+ :param dict mapping: Mapping of keys to values
+ :param str prefix: Optional prefix to apply to all keys in `mapping`
+ before setting
+ """
+ for k, v in mapping.items():
+ self.set("%s%s" % (prefix, k), v)
+
+ def unset(self, key):
+ """
+ Remove a key from the database entirely.
+ """
+ self.cursor.execute('delete from kv where key=?', [key])
+ if self.revision and self.cursor.rowcount:
+ self.cursor.execute(
+ 'insert into kv_revisions values (?, ?, ?)',
+ [key, self.revision, json.dumps('DELETED')])
+
+ def unsetrange(self, keys=None, prefix=""):
+ """
+ Remove a range of keys starting with a common prefix, from the database
+ entirely.
+
+ :param list keys: List of keys to remove.
+ :param str prefix: Optional prefix to apply to all keys in ``keys``
+ before removing.
+ """
+ if keys is not None:
+ keys = ['%s%s' % (prefix, key) for key in keys]
+ self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
+ if self.revision and self.cursor.rowcount:
+ self.cursor.execute(
+ 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
+ list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
+ else:
+ self.cursor.execute('delete from kv where key like ?',
+ ['%s%%' % prefix])
+ if self.revision and self.cursor.rowcount:
+ self.cursor.execute(
+ 'insert into kv_revisions values (?, ?, ?)',
+ ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
+
+ def set(self, key, value):
+ """
+ Set a value in the database.
+
+ :param str key: Key to set the value for
+ :param value: Any JSON-serializable value to be set
+ """
+ serialized = json.dumps(value)
+
+ self.cursor.execute('select data from kv where key=?', [key])
+ exists = self.cursor.fetchone()
+
+ # Skip mutations to the same value
+ if exists:
+ if exists[0] == serialized:
+ return value
+
+ if not exists:
+ self.cursor.execute(
+ 'insert into kv (key, data) values (?, ?)',
+ (key, serialized))
+ else:
+ self.cursor.execute('''
+ update kv
+ set data = ?
+ where key = ?''', [serialized, key])
+
+ # Save
+ if not self.revision:
+ return value
+
+ self.cursor.execute(
+ 'select 1 from kv_revisions where key=? and revision=?',
+ [key, self.revision])
+ exists = self.cursor.fetchone()
+
+ if not exists:
+ self.cursor.execute(
+ '''insert into kv_revisions (
+ revision, key, data) values (?, ?, ?)''',
+ (self.revision, key, serialized))
+ else:
+ self.cursor.execute(
+ '''
+ update kv_revisions
+ set data = ?
+ where key = ?
+ and revision = ?''',
+ [serialized, key, self.revision])
+
+ return value
+
+ def delta(self, mapping, prefix):
+ """
+ return a delta containing values that have changed.
+ """
+ previous = self.getrange(prefix, strip=True)
+ if not previous:
+ pk = set()
+ else:
+ pk = set(previous.keys())
+ ck = set(mapping.keys())
+ delta = DeltaSet()
+
+ # added
+ for k in ck.difference(pk):
+ delta[k] = Delta(None, mapping[k])
+
+ # removed
+ for k in pk.difference(ck):
+ delta[k] = Delta(previous[k], None)
+
+ # changed
+ for k in pk.intersection(ck):
+ c = mapping[k]
+ p = previous[k]
+ if c != p:
+ delta[k] = Delta(p, c)
+
+ return delta
+
+ @contextlib.contextmanager
+ def hook_scope(self, name=""):
+ """Scope all future interactions to the current hook execution
+ revision."""
+ assert not self.revision
+ self.cursor.execute(
+ 'insert into hooks (hook, date) values (?, ?)',
+ (name or sys.argv[0],
+ datetime.datetime.utcnow().isoformat()))
+ self.revision = self.cursor.lastrowid
+ try:
+ yield self.revision
+ self.revision = None
+ except:
+ self.flush(False)
+ self.revision = None
+ raise
+ else:
+ self.flush()
+
+ def flush(self, save=True):
+ if save:
+ self.conn.commit()
+ elif self._closed:
+ return
+ else:
+ self.conn.rollback()
+
+ def _init(self):
+ self.cursor.execute('''
+ create table if not exists kv (
+ key text,
+ data text,
+ primary key (key)
+ )''')
+ self.cursor.execute('''
+ create table if not exists kv_revisions (
+ key text,
+ revision integer,
+ data text,
+ primary key (key, revision)
+ )''')
+ self.cursor.execute('''
+ create table if not exists hooks (
+ version integer primary key autoincrement,
+ hook text,
+ date text
+ )''')
+ self.conn.commit()
+
+ def gethistory(self, key, deserialize=False):
+ self.cursor.execute(
+ '''
+ select kv.revision, kv.key, kv.data, h.hook, h.date
+ from kv_revisions kv,
+ hooks h
+ where kv.key=?
+ and kv.revision = h.version
+ ''', [key])
+ if deserialize is False:
+ return self.cursor.fetchall()
+ return map(_parse_history, self.cursor.fetchall())
+
+ def debug(self, fh=sys.stderr):
+ self.cursor.execute('select * from kv')
+ pprint.pprint(self.cursor.fetchall(), stream=fh)
+ self.cursor.execute('select * from kv_revisions')
+ pprint.pprint(self.cursor.fetchall(), stream=fh)
+
+
+def _parse_history(d):
+ return (d[0], d[1], json.loads(d[2]), d[3],
+ datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
+
+
+class HookData(object):
+ """Simple integration for existing hook exec frameworks.
+
+ Records all unit information, and stores deltas for processing
+ by the hook.
+
+ Sample::
+
+ from charmhelper.core import hookenv, unitdata
+
+ changes = unitdata.HookData()
+ db = unitdata.kv()
+ hooks = hookenv.Hooks()
+
+ @hooks.hook
+ def config_changed():
+ # View all changes to configuration
+ for changed, (prev, cur) in changes.conf.items():
+ print('config changed', changed,
+ 'previous value', prev,
+ 'current value', cur)
+
+ # Get some unit specific bookeeping
+ if not db.get('pkg_key'):
+ key = urllib.urlopen('https://example.com/pkg_key').read()
+ db.set('pkg_key', key)
+
+ if __name__ == '__main__':
+ with changes():
+ hook.execute()
+
+ """
+ def __init__(self):
+ self.kv = kv()
+ self.conf = None
+ self.rels = None
+
+ @contextlib.contextmanager
+ def __call__(self):
+ from charmhelpers.core import hookenv
+ hook_name = hookenv.hook_name()
+
+ with self.kv.hook_scope(hook_name):
+ self._record_charm_version(hookenv.charm_dir())
+ delta_config, delta_relation = self._record_hook(hookenv)
+ yield self.kv, delta_config, delta_relation
+
+ def _record_charm_version(self, charm_dir):
+ # Record revisions.. charm revisions are meaningless
+ # to charm authors as they don't control the revision.
+ # so logic dependnent on revision is not particularly
+ # useful, however it is useful for debugging analysis.
+ charm_rev = open(
+ os.path.join(charm_dir, 'revision')).read().strip()
+ charm_rev = charm_rev or '0'
+ revs = self.kv.get('charm_revisions', [])
+ if charm_rev not in revs:
+ revs.append(charm_rev.strip() or '0')
+ self.kv.set('charm_revisions', revs)
+
+ def _record_hook(self, hookenv):
+ data = hookenv.execution_environment()
+ self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
+ self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
+ self.kv.set('env', dict(data['env']))
+ self.kv.set('unit', data['unit'])
+ self.kv.set('relid', data.get('relid'))
+ return conf_delta, rels_delta
+
+
+class Record(dict):
+
+ __slots__ = ()
+
+ def __getattr__(self, k):
+ if k in self:
+ return self[k]
+ raise AttributeError(k)
+
+
+class DeltaSet(Record):
+
+ __slots__ = ()
+
+
+Delta = collections.namedtuple('Delta', ['previous', 'current'])
+
+
+_KV = None
+
+
+def kv():
+ global _KV
+ if _KV is None:
+ _KV = Storage()
+ return _KV
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/__init__.py
new file mode 100644
index 0000000..1cfb99f
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/__init__.py
@@ -0,0 +1,468 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import importlib
+from tempfile import NamedTemporaryFile
+import time
+from yaml import safe_load
+from charmhelpers.core.host import (
+ lsb_release
+)
+import subprocess
+from charmhelpers.core.hookenv import (
+ config,
+ log,
+)
+import os
+
+import six
+if six.PY3:
+ from urllib.parse import urlparse, urlunparse
+else:
+ from urlparse import urlparse, urlunparse
+
+
+CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
+deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
+"""
+PROPOSED_POCKET = """# Proposed
+deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
+"""
+CLOUD_ARCHIVE_POCKETS = {
+ # Folsom
+ 'folsom': 'precise-updates/folsom',
+ 'precise-folsom': 'precise-updates/folsom',
+ 'precise-folsom/updates': 'precise-updates/folsom',
+ 'precise-updates/folsom': 'precise-updates/folsom',
+ 'folsom/proposed': 'precise-proposed/folsom',
+ 'precise-folsom/proposed': 'precise-proposed/folsom',
+ 'precise-proposed/folsom': 'precise-proposed/folsom',
+ # Grizzly
+ 'grizzly': 'precise-updates/grizzly',
+ 'precise-grizzly': 'precise-updates/grizzly',
+ 'precise-grizzly/updates': 'precise-updates/grizzly',
+ 'precise-updates/grizzly': 'precise-updates/grizzly',
+ 'grizzly/proposed': 'precise-proposed/grizzly',
+ 'precise-grizzly/proposed': 'precise-proposed/grizzly',
+ 'precise-proposed/grizzly': 'precise-proposed/grizzly',
+ # Havana
+ 'havana': 'precise-updates/havana',
+ 'precise-havana': 'precise-updates/havana',
+ 'precise-havana/updates': 'precise-updates/havana',
+ 'precise-updates/havana': 'precise-updates/havana',
+ 'havana/proposed': 'precise-proposed/havana',
+ 'precise-havana/proposed': 'precise-proposed/havana',
+ 'precise-proposed/havana': 'precise-proposed/havana',
+ # Icehouse
+ 'icehouse': 'precise-updates/icehouse',
+ 'precise-icehouse': 'precise-updates/icehouse',
+ 'precise-icehouse/updates': 'precise-updates/icehouse',
+ 'precise-updates/icehouse': 'precise-updates/icehouse',
+ 'icehouse/proposed': 'precise-proposed/icehouse',
+ 'precise-icehouse/proposed': 'precise-proposed/icehouse',
+ 'precise-proposed/icehouse': 'precise-proposed/icehouse',
+ # Juno
+ 'juno': 'trusty-updates/juno',
+ 'trusty-juno': 'trusty-updates/juno',
+ 'trusty-juno/updates': 'trusty-updates/juno',
+ 'trusty-updates/juno': 'trusty-updates/juno',
+ 'juno/proposed': 'trusty-proposed/juno',
+ 'trusty-juno/proposed': 'trusty-proposed/juno',
+ 'trusty-proposed/juno': 'trusty-proposed/juno',
+ # Kilo
+ 'kilo': 'trusty-updates/kilo',
+ 'trusty-kilo': 'trusty-updates/kilo',
+ 'trusty-kilo/updates': 'trusty-updates/kilo',
+ 'trusty-updates/kilo': 'trusty-updates/kilo',
+ 'kilo/proposed': 'trusty-proposed/kilo',
+ 'trusty-kilo/proposed': 'trusty-proposed/kilo',
+ 'trusty-proposed/kilo': 'trusty-proposed/kilo',
+ # Liberty
+ 'liberty': 'trusty-updates/liberty',
+ 'trusty-liberty': 'trusty-updates/liberty',
+ 'trusty-liberty/updates': 'trusty-updates/liberty',
+ 'trusty-updates/liberty': 'trusty-updates/liberty',
+ 'liberty/proposed': 'trusty-proposed/liberty',
+ 'trusty-liberty/proposed': 'trusty-proposed/liberty',
+ 'trusty-proposed/liberty': 'trusty-proposed/liberty',
+ # Mitaka
+ 'mitaka': 'trusty-updates/mitaka',
+ 'trusty-mitaka': 'trusty-updates/mitaka',
+ 'trusty-mitaka/updates': 'trusty-updates/mitaka',
+ 'trusty-updates/mitaka': 'trusty-updates/mitaka',
+ 'mitaka/proposed': 'trusty-proposed/mitaka',
+ 'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
+ 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
+}
+
+# The order of this list is very important. Handlers should be listed in from
+# least- to most-specific URL matching.
+FETCH_HANDLERS = (
+ 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
+ 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
+ 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
+)
+
+APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
+APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
+APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
+
+
+class SourceConfigError(Exception):
+ pass
+
+
+class UnhandledSource(Exception):
+ pass
+
+
+class AptLockError(Exception):
+ pass
+
+
+class BaseFetchHandler(object):
+
+ """Base class for FetchHandler implementations in fetch plugins"""
+
+ def can_handle(self, source):
+ """Returns True if the source can be handled. Otherwise returns
+ a string explaining why it cannot"""
+ return "Wrong source type"
+
+ def install(self, source):
+ """Try to download and unpack the source. Return the path to the
+ unpacked files or raise UnhandledSource."""
+ raise UnhandledSource("Wrong source type {}".format(source))
+
+ def parse_url(self, url):
+ return urlparse(url)
+
+ def base_url(self, url):
+ """Return url without querystring or fragment"""
+ parts = list(self.parse_url(url))
+ parts[4:] = ['' for i in parts[4:]]
+ return urlunparse(parts)
+
+
+def filter_installed_packages(packages):
+ """Returns a list of packages that require installation"""
+ cache = apt_cache()
+ _pkgs = []
+ for package in packages:
+ try:
+ p = cache[package]
+ p.current_ver or _pkgs.append(package)
+ except KeyError:
+ log('Package {} has no installation candidate.'.format(package),
+ level='WARNING')
+ _pkgs.append(package)
+ return _pkgs
+
+
+def apt_cache(in_memory=True):
+ """Build and return an apt cache"""
+ from apt import apt_pkg
+ apt_pkg.init()
+ if in_memory:
+ apt_pkg.config.set("Dir::Cache::pkgcache", "")
+ apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
+ return apt_pkg.Cache()
+
+
+def apt_install(packages, options=None, fatal=False):
+ """Install one or more packages"""
+ if options is None:
+ options = ['--option=Dpkg::Options::=--force-confold']
+
+ cmd = ['apt-get', '--assume-yes']
+ cmd.extend(options)
+ cmd.append('install')
+ if isinstance(packages, six.string_types):
+ cmd.append(packages)
+ else:
+ cmd.extend(packages)
+ log("Installing {} with options: {}".format(packages,
+ options))
+ _run_apt_command(cmd, fatal)
+
+
+def apt_upgrade(options=None, fatal=False, dist=False):
+ """Upgrade all packages"""
+ if options is None:
+ options = ['--option=Dpkg::Options::=--force-confold']
+
+ cmd = ['apt-get', '--assume-yes']
+ cmd.extend(options)
+ if dist:
+ cmd.append('dist-upgrade')
+ else:
+ cmd.append('upgrade')
+ log("Upgrading with options: {}".format(options))
+ _run_apt_command(cmd, fatal)
+
+
+def apt_update(fatal=False):
+ """Update local apt cache"""
+ cmd = ['apt-get', 'update']
+ _run_apt_command(cmd, fatal)
+
+
+def apt_purge(packages, fatal=False):
+ """Purge one or more packages"""
+ cmd = ['apt-get', '--assume-yes', 'purge']
+ if isinstance(packages, six.string_types):
+ cmd.append(packages)
+ else:
+ cmd.extend(packages)
+ log("Purging {}".format(packages))
+ _run_apt_command(cmd, fatal)
+
+
+def apt_mark(packages, mark, fatal=False):
+ """Flag one or more packages using apt-mark"""
+ cmd = ['apt-mark', mark]
+ if isinstance(packages, six.string_types):
+ cmd.append(packages)
+ else:
+ cmd.extend(packages)
+ log("Holding {}".format(packages))
+
+ if fatal:
+ subprocess.check_call(cmd, universal_newlines=True)
+ else:
+ subprocess.call(cmd, universal_newlines=True)
+
+
+def apt_hold(packages, fatal=False):
+ return apt_mark(packages, 'hold', fatal=fatal)
+
+
+def apt_unhold(packages, fatal=False):
+ return apt_mark(packages, 'unhold', fatal=fatal)
+
+
+def add_source(source, key=None):
+ """Add a package source to this system.
+
+ @param source: a URL or sources.list entry, as supported by
+ add-apt-repository(1). Examples::
+
+ ppa:charmers/example
+ deb https://stub:key@private.example.com/ubuntu trusty main
+
+ In addition:
+ 'proposed:' may be used to enable the standard 'proposed'
+ pocket for the release.
+ 'cloud:' may be used to activate official cloud archive pockets,
+ such as 'cloud:icehouse'
+ 'distro' may be used as a noop
+
+ @param key: A key to be added to the system's APT keyring and used
+ to verify the signatures on packages. Ideally, this should be an
+ ASCII format GPG public key including the block headers. A GPG key
+ id may also be used, but be aware that only insecure protocols are
+ available to retrieve the actual public key from a public keyserver
+ placing your Juju environment at risk. ppa and cloud archive keys
+ are securely added automtically, so sould not be provided.
+ """
+ if source is None:
+ log('Source is not present. Skipping')
+ return
+
+ if (source.startswith('ppa:') or
+ source.startswith('http') or
+ source.startswith('deb ') or
+ source.startswith('cloud-archive:')):
+ subprocess.check_call(['add-apt-repository', '--yes', source])
+ elif source.startswith('cloud:'):
+ apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
+ fatal=True)
+ pocket = source.split(':')[-1]
+ if pocket not in CLOUD_ARCHIVE_POCKETS:
+ raise SourceConfigError(
+ 'Unsupported cloud: source option %s' %
+ pocket)
+ actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
+ with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
+ apt.write(CLOUD_ARCHIVE.format(actual_pocket))
+ elif source == 'proposed':
+ release = lsb_release()['DISTRIB_CODENAME']
+ with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
+ apt.write(PROPOSED_POCKET.format(release))
+ elif source == 'distro':
+ pass
+ else:
+ log("Unknown source: {!r}".format(source))
+
+ if key:
+ if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
+ with NamedTemporaryFile('w+') as key_file:
+ key_file.write(key)
+ key_file.flush()
+ key_file.seek(0)
+ subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
+ elif 'http://' in key:
+ with NamedTemporaryFile('w+') as key_file:
+ subprocess.check_call(['wget', key, '-O-'], stdout=key_file)
+ subprocess.check_call(['apt-key', 'add', key_file.name])
+ else:
+ # Note that hkp: is in no way a secure protocol. Using a
+ # GPG key id is pointless from a security POV unless you
+ # absolutely trust your network and DNS.
+ subprocess.check_call(['apt-key', 'adv', '--keyserver',
+ 'hkp://keyserver.ubuntu.com:80', '--recv',
+ key])
+
+
+def configure_sources(update=False,
+ sources_var='install_sources',
+ keys_var='install_keys'):
+ """
+ Configure multiple sources from charm configuration.
+
+ The lists are encoded as yaml fragments in the configuration.
+ The frament needs to be included as a string. Sources and their
+ corresponding keys are of the types supported by add_source().
+
+ Example config:
+ install_sources: |
+ - "ppa:foo"
+ - "http://example.com/repo precise main"
+ install_keys: |
+ - null
+ - "a1b2c3d4"
+
+ Note that 'null' (a.k.a. None) should not be quoted.
+ """
+ sources = safe_load((config(sources_var) or '').strip()) or []
+ keys = safe_load((config(keys_var) or '').strip()) or None
+
+ if isinstance(sources, six.string_types):
+ sources = [sources]
+
+ if keys is None:
+ for source in sources:
+ add_source(source, None)
+ else:
+ if isinstance(keys, six.string_types):
+ keys = [keys]
+
+ if len(sources) != len(keys):
+ raise SourceConfigError(
+ 'Install sources and keys lists are different lengths')
+ for source, key in zip(sources, keys):
+ add_source(source, key)
+ if update:
+ apt_update(fatal=True)
+
+
+def install_remote(source, *args, **kwargs):
+ """
+ Install a file tree from a remote source
+
+ The specified source should be a url of the form:
+ scheme://[host]/path[#[option=value][&...]]
+
+ Schemes supported are based on this modules submodules.
+ Options supported are submodule-specific.
+ Additional arguments are passed through to the submodule.
+
+ For example::
+
+ dest = install_remote('http://example.com/archive.tgz',
+ checksum='deadbeef',
+ hash_type='sha1')
+
+ This will download `archive.tgz`, validate it using SHA1 and, if
+ the file is ok, extract it and return the directory in which it
+ was extracted. If the checksum fails, it will raise
+ :class:`charmhelpers.core.host.ChecksumError`.
+ """
+ # We ONLY check for True here because can_handle may return a string
+ # explaining why it can't handle a given source.
+ handlers = [h for h in plugins() if h.can_handle(source) is True]
+ installed_to = None
+ for handler in handlers:
+ try:
+ installed_to = handler.install(source, *args, **kwargs)
+ except UnhandledSource as e:
+ log('Install source attempt unsuccessful: {}'.format(e),
+ level='WARNING')
+ if not installed_to:
+ raise UnhandledSource("No handler found for source {}".format(source))
+ return installed_to
+
+
+def install_from_config(config_var_name):
+ charm_config = config()
+ source = charm_config[config_var_name]
+ return install_remote(source)
+
+
+def plugins(fetch_handlers=None):
+ if not fetch_handlers:
+ fetch_handlers = FETCH_HANDLERS
+ plugin_list = []
+ for handler_name in fetch_handlers:
+ package, classname = handler_name.rsplit('.', 1)
+ try:
+ handler_class = getattr(
+ importlib.import_module(package),
+ classname)
+ plugin_list.append(handler_class())
+ except (ImportError, AttributeError):
+ # Skip missing plugins so that they can be ommitted from
+ # installation if desired
+ log("FetchHandler {} not found, skipping plugin".format(
+ handler_name))
+ return plugin_list
+
+
+def _run_apt_command(cmd, fatal=False):
+ """
+ Run an APT command, checking output and retrying if the fatal flag is set
+ to True.
+
+ :param: cmd: str: The apt command to run.
+ :param: fatal: bool: Whether the command's output should be checked and
+ retried.
+ """
+ env = os.environ.copy()
+
+ if 'DEBIAN_FRONTEND' not in env:
+ env['DEBIAN_FRONTEND'] = 'noninteractive'
+
+ if fatal:
+ retry_count = 0
+ result = None
+
+ # If the command is considered "fatal", we need to retry if the apt
+ # lock was not acquired.
+
+ while result is None or result == APT_NO_LOCK:
+ try:
+ result = subprocess.check_call(cmd, env=env)
+ except subprocess.CalledProcessError as e:
+ retry_count = retry_count + 1
+ if retry_count > APT_NO_LOCK_RETRY_COUNT:
+ raise
+ result = e.returncode
+ log("Couldn't acquire DPKG lock. Will retry in {} seconds."
+ "".format(APT_NO_LOCK_RETRY_DELAY))
+ time.sleep(APT_NO_LOCK_RETRY_DELAY)
+
+ else:
+ subprocess.call(cmd, env=env)
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/archiveurl.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/archiveurl.py
new file mode 100644
index 0000000..efd7f9f
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/archiveurl.py
@@ -0,0 +1,167 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import hashlib
+import re
+
+from charmhelpers.fetch import (
+ BaseFetchHandler,
+ UnhandledSource
+)
+from charmhelpers.payload.archive import (
+ get_archive_handler,
+ extract,
+)
+from charmhelpers.core.host import mkdir, check_hash
+
+import six
+if six.PY3:
+ from urllib.request import (
+ build_opener, install_opener, urlopen, urlretrieve,
+ HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
+ )
+ from urllib.parse import urlparse, urlunparse, parse_qs
+ from urllib.error import URLError
+else:
+ from urllib import urlretrieve
+ from urllib2 import (
+ build_opener, install_opener, urlopen,
+ HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
+ URLError
+ )
+ from urlparse import urlparse, urlunparse, parse_qs
+
+
+def splituser(host):
+ '''urllib.splituser(), but six's support of this seems broken'''
+ _userprog = re.compile('^(.*)@(.*)$')
+ match = _userprog.match(host)
+ if match:
+ return match.group(1, 2)
+ return None, host
+
+
+def splitpasswd(user):
+ '''urllib.splitpasswd(), but six's support of this is missing'''
+ _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
+ match = _passwdprog.match(user)
+ if match:
+ return match.group(1, 2)
+ return user, None
+
+
+class ArchiveUrlFetchHandler(BaseFetchHandler):
+ """
+ Handler to download archive files from arbitrary URLs.
+
+ Can fetch from http, https, ftp, and file URLs.
+
+ Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
+
+ Installs the contents of the archive in $CHARM_DIR/fetched/.
+ """
+ def can_handle(self, source):
+ url_parts = self.parse_url(source)
+ if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
+ # XXX: Why is this returning a boolean and a string? It's
+ # doomed to fail since "bool(can_handle('foo://'))" will be True.
+ return "Wrong source type"
+ if get_archive_handler(self.base_url(source)):
+ return True
+ return False
+
+ def download(self, source, dest):
+ """
+ Download an archive file.
+
+ :param str source: URL pointing to an archive file.
+ :param str dest: Local path location to download archive file to.
+ """
+ # propogate all exceptions
+ # URLError, OSError, etc
+ proto, netloc, path, params, query, fragment = urlparse(source)
+ if proto in ('http', 'https'):
+ auth, barehost = splituser(netloc)
+ if auth is not None:
+ source = urlunparse((proto, barehost, path, params, query, fragment))
+ username, password = splitpasswd(auth)
+ passman = HTTPPasswordMgrWithDefaultRealm()
+ # Realm is set to None in add_password to force the username and password
+ # to be used whatever the realm
+ passman.add_password(None, source, username, password)
+ authhandler = HTTPBasicAuthHandler(passman)
+ opener = build_opener(authhandler)
+ install_opener(opener)
+ response = urlopen(source)
+ try:
+ with open(dest, 'w') as dest_file:
+ dest_file.write(response.read())
+ except Exception as e:
+ if os.path.isfile(dest):
+ os.unlink(dest)
+ raise e
+
+ # Mandatory file validation via Sha1 or MD5 hashing.
+ def download_and_validate(self, url, hashsum, validate="sha1"):
+ tempfile, headers = urlretrieve(url)
+ check_hash(tempfile, hashsum, validate)
+ return tempfile
+
+ def install(self, source, dest=None, checksum=None, hash_type='sha1'):
+ """
+ Download and install an archive file, with optional checksum validation.
+
+ The checksum can also be given on the `source` URL's fragment.
+ For example::
+
+ handler.install('http://example.com/file.tgz#sha1=deadbeef')
+
+ :param str source: URL pointing to an archive file.
+ :param str dest: Local destination path to install to. If not given,
+ installs to `$CHARM_DIR/archives/archive_file_name`.
+ :param str checksum: If given, validate the archive file after download.
+ :param str hash_type: Algorithm used to generate `checksum`.
+ Can be any hash alrgorithm supported by :mod:`hashlib`,
+ such as md5, sha1, sha256, sha512, etc.
+
+ """
+ url_parts = self.parse_url(source)
+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
+ if not os.path.exists(dest_dir):
+ mkdir(dest_dir, perms=0o755)
+ dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
+ try:
+ self.download(source, dld_file)
+ except URLError as e:
+ raise UnhandledSource(e.reason)
+ except OSError as e:
+ raise UnhandledSource(e.strerror)
+ options = parse_qs(url_parts.fragment)
+ for key, value in options.items():
+ if not six.PY3:
+ algorithms = hashlib.algorithms
+ else:
+ algorithms = hashlib.algorithms_available
+ if key in algorithms:
+ if len(value) != 1:
+ raise TypeError(
+ "Expected 1 hash value, not %d" % len(value))
+ expected = value[0]
+ check_hash(dld_file, expected, key)
+ if checksum:
+ check_hash(dld_file, checksum, hash_type)
+ return extract(dld_file, dest)
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/bzrurl.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/bzrurl.py
new file mode 100644
index 0000000..3531315
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/bzrurl.py
@@ -0,0 +1,78 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+from charmhelpers.fetch import (
+ BaseFetchHandler,
+ UnhandledSource
+)
+from charmhelpers.core.host import mkdir
+
+import six
+if six.PY3:
+ raise ImportError('bzrlib does not support Python3')
+
+try:
+ from bzrlib.branch import Branch
+ from bzrlib import bzrdir, workingtree, errors
+except ImportError:
+ from charmhelpers.fetch import apt_install
+ apt_install("python-bzrlib")
+ from bzrlib.branch import Branch
+ from bzrlib import bzrdir, workingtree, errors
+
+
+class BzrUrlFetchHandler(BaseFetchHandler):
+ """Handler for bazaar branches via generic and lp URLs"""
+ def can_handle(self, source):
+ url_parts = self.parse_url(source)
+ if url_parts.scheme not in ('bzr+ssh', 'lp'):
+ return False
+ else:
+ return True
+
+ def branch(self, source, dest):
+ url_parts = self.parse_url(source)
+ # If we use lp:branchname scheme we need to load plugins
+ if not self.can_handle(source):
+ raise UnhandledSource("Cannot handle {}".format(source))
+ if url_parts.scheme == "lp":
+ from bzrlib.plugin import load_plugins
+ load_plugins()
+ try:
+ local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
+ except errors.AlreadyControlDirError:
+ local_branch = Branch.open(dest)
+ try:
+ remote_branch = Branch.open(source)
+ remote_branch.push(local_branch)
+ tree = workingtree.WorkingTree.open(dest)
+ tree.update()
+ except Exception as e:
+ raise e
+
+ def install(self, source):
+ url_parts = self.parse_url(source)
+ branch_name = url_parts.path.strip("/").split("/")[-1]
+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
+ branch_name)
+ if not os.path.exists(dest_dir):
+ mkdir(dest_dir, perms=0o755)
+ try:
+ self.branch(source, dest_dir)
+ except OSError as e:
+ raise UnhandledSource(e.strerror)
+ return dest_dir
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/giturl.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/giturl.py
new file mode 100644
index 0000000..f023b26
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/giturl.py
@@ -0,0 +1,73 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# This file is part of charm-helpers.
+#
+# charm-helpers is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License version 3 as
+# published by the Free Software Foundation.
+#
+# charm-helpers is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+from charmhelpers.fetch import (
+ BaseFetchHandler,
+ UnhandledSource
+)
+from charmhelpers.core.host import mkdir
+
+import six
+if six.PY3:
+ raise ImportError('GitPython does not support Python 3')
+
+try:
+ from git import Repo
+except ImportError:
+ from charmhelpers.fetch import apt_install
+ apt_install("python-git")
+ from git import Repo
+
+from git.exc import GitCommandError # noqa E402
+
+
+class GitUrlFetchHandler(BaseFetchHandler):
+ """Handler for git branches via generic and github URLs"""
+ def can_handle(self, source):
+ url_parts = self.parse_url(source)
+ # TODO (mattyw) no support for ssh git@ yet
+ if url_parts.scheme not in ('http', 'https', 'git'):
+ return False
+ else:
+ return True
+
+ def clone(self, source, dest, branch, depth=None):
+ if not self.can_handle(source):
+ raise UnhandledSource("Cannot handle {}".format(source))
+
+ if depth:
+ Repo.clone_from(source, dest, branch=branch, depth=depth)
+ else:
+ Repo.clone_from(source, dest, branch=branch)
+
+ def install(self, source, branch="master", dest=None, depth=None):
+ url_parts = self.parse_url(source)
+ branch_name = url_parts.path.strip("/").split("/")[-1]
+ if dest:
+ dest_dir = os.path.join(dest, branch_name)
+ else:
+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
+ branch_name)
+ if not os.path.exists(dest_dir):
+ mkdir(dest_dir, perms=0o755)
+ try:
+ self.clone(source, dest_dir, branch, depth)
+ except GitCommandError as e:
+ raise UnhandledSource(e)
+ except OSError as e:
+ raise UnhandledSource(e.strerror)
+ return dest_dir
diff --git a/charms/trusty/contrail-analytics/hooks/config-changed b/charms/trusty/contrail-analytics/hooks/config-changed
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/config-changed
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/contrail-analytics-api-relation-joined b/charms/trusty/contrail-analytics/hooks/contrail-analytics-api-relation-joined
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/contrail-analytics-api-relation-joined
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/contrail-api-relation-broken b/charms/trusty/contrail-analytics/hooks/contrail-api-relation-broken
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/contrail-api-relation-broken
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/contrail-api-relation-changed b/charms/trusty/contrail-analytics/hooks/contrail-api-relation-changed
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/contrail-api-relation-changed
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/contrail-api-relation-departed b/charms/trusty/contrail-analytics/hooks/contrail-api-relation-departed
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/contrail-api-relation-departed
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-broken b/charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-broken
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-broken
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-changed b/charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-changed
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-changed
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-departed b/charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-departed
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-departed
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/contrail_analytics_hooks.py b/charms/trusty/contrail-analytics/hooks/contrail_analytics_hooks.py
new file mode 100755
index 0000000..c05a53f
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/contrail_analytics_hooks.py
@@ -0,0 +1,305 @@
+#!/usr/bin/env python
+
+from socket import gethostbyname
+import sys
+import shutil
+
+from apt_pkg import version_compare
+import yaml
+
+from charmhelpers.contrib.openstack.utils import configure_installation_source
+
+from charmhelpers.core.hookenv import (
+ Hooks,
+ UnregisteredHookError,
+ config,
+ local_unit,
+ log,
+ relation_get,
+ relation_ids,
+ relation_set,
+ unit_get
+)
+
+from charmhelpers.core.host import (
+ restart_on_change,
+ service_restart
+)
+
+from charmhelpers.fetch import (
+ apt_install,
+ apt_upgrade,
+ configure_sources
+)
+
+from contrail_analytics_utils import (
+ CONTRAIL_VERSION,
+ cassandra_units,
+ fix_hostname,
+ fix_nodemgr,
+ fix_permissions,
+ fix_services,
+ kafka_units,
+ provision_analytics,
+ units,
+ unprovision_analytics,
+ write_alarm_config,
+ write_analytics_api_config,
+ write_collector_config,
+ write_nodemgr_config,
+ write_query_engine_config,
+ write_snmp_collector_config,
+ write_topology_config,
+ write_vnc_api_config,
+ write_keystone_auth_config
+)
+
+PACKAGES = [ "contrail-analytics", "contrail-utils", "contrail-nodemgr",
+ "python-jinja2" ]
+
+hooks = Hooks()
+config = config()
+
+def add_analytics():
+ # check relation dependencies
+ if not config.get("analytics-configured") \
+ and config.get("cassandra-ready") \
+ and config.get("kafka-ready") \
+ and config.get("zookeeper-ready") \
+ and config.get("contrail-api-ready") \
+ and config.get("contrail-discovery-ready") \
+ and config.get("identity-admin-ready"):
+ # provision analytics on 3.0.2.0+
+ if version_compare(CONTRAIL_VERSION, "3.0.2.0-34") >= 0:
+ provision_analytics()
+ config["analytics-configured"] = True
+
+@hooks.hook("cassandra-relation-changed")
+def cassandra_changed():
+ if not relation_get("native_transport_port"):
+ log("Relation not ready")
+ return
+ if not config.get("cassandra-ready"):
+ units = len(cassandra_units())
+ required = config["cassandra-units"]
+ if units < required:
+ log("{} cassandra unit(s) ready, require {} more".format(units, required - units))
+ return
+ config["cassandra-ready"] = True
+ cassandra_relation()
+ add_analytics()
+
+@hooks.hook("cassandra-relation-departed")
+@hooks.hook("cassandra-relation-broken")
+def cassandra_departed():
+ if not units("cassandra"):
+ remove_analytics()
+ config["cassandra-ready"] = False
+ cassandra_relation()
+
+@restart_on_change({"/etc/contrail/contrail-collector.conf": ["contrail-collector"],
+ "/etc/contrail/contrail-query-engine.conf": ["contrail-query-engine"],
+ "/etc/contrail/contrail-analytics-api.conf": ["contrail-analytics-api"]})
+def cassandra_relation():
+ write_collector_config()
+ write_query_engine_config()
+ write_analytics_api_config()
+
+@hooks.hook("config-changed")
+def config_changed():
+ vip = config.get("vip")
+ for rid in relation_ids("contrail-analytics-api"):
+ relation_set(relation_id=rid, vip=vip)
+
+@hooks.hook("contrail-analytics-api-relation-joined")
+def contrail_analytics_api_joined():
+ relation_set(port=8081, vip=config.get("vip"))
+
+@hooks.hook("contrail-api-relation-changed")
+def contrail_api_changed():
+ if not relation_get("port"):
+ log("Relation not ready")
+ return
+ contrail_api_relation()
+ config["contrail-api-ready"] = True
+ add_analytics()
+
+@hooks.hook("contrail-api-relation-departed")
+@hooks.hook("contrail-api-relation-broken")
+def contrail_api_departed():
+ if not units("contrail-api"):
+ remove_analytics()
+ config["contrail-api-ready"] = False
+ contrail_api_relation()
+
+@restart_on_change({"/etc/contrail/contrail-snmp-collector.conf": ["contrail-snmp-collector"],
+ "/etc/contrail/vnc_api_lib.ini": ["contrail-topology"]})
+def contrail_api_relation():
+ write_snmp_collector_config()
+ write_vnc_api_config()
+ write_analytics_api_config()
+
+@hooks.hook("contrail-discovery-relation-changed")
+def contrail_discovery_changed():
+ if not relation_get("port"):
+ log("Relation not ready")
+ return
+ contrail_discovery_relation()
+ config["contrail-discovery-ready"] = True
+ add_analytics()
+
+@hooks.hook("contrail-discovery-relation-departed")
+@hooks.hook("contrail-discovery-relation-broken")
+def contrail_discovery_departed():
+ if not units("contrail-discovery"):
+ remove_analytics()
+ config["contrail-discovery-ready"] = False
+ contrail_discovery_relation()
+
+@restart_on_change({"/etc/contrail/contrail-collector.conf": ["contrail-collector"],
+ "/etc/contrail/contrail-query-engine.conf": ["contrail-query-engine"],
+ "/etc/contrail/contrail-analytics-api.conf": ["contrail-analytics-api"],
+ "/etc/contrail/contrail-alarm-gen.conf": ["contrail-alarm-gen"],
+ "/etc/contrail/contrail-topology.conf": ["contrail-topology"],
+ "/etc/contrail/contrail-snmp-collector.conf": ["contrail-snmp-collector"],
+ "/etc/contrail/contrail-analytics-nodemgr.conf": ["contrail-analytics-nodemgr"]})
+def contrail_discovery_relation():
+ write_collector_config()
+ write_query_engine_config()
+ write_analytics_api_config()
+ write_alarm_config()
+ write_topology_config()
+ write_snmp_collector_config()
+ write_nodemgr_config()
+
+@hooks.hook("http-services-relation-joined")
+def http_services_joined():
+ name = local_unit().replace("/", "-")
+ addr = gethostbyname(unit_get("private-address"))
+ services = [ { "service_name": "contrail-analytics-api",
+ "service_host": "0.0.0.0",
+ "service_port": 8081,
+ "service_options": [ "mode http", "balance leastconn", "option httpchk GET /analytics HTTP/1.0" ],
+ "servers": [ [ name, addr, 8081, "check" ] ] } ]
+ relation_set(services=yaml.dump(services))
+
+@hooks.hook("identity-admin-relation-changed")
+def identity_admin_changed():
+ if not relation_get("service_hostname"):
+ log("Relation not ready")
+ return
+ identity_admin_relation()
+ config["identity-admin-ready"] = True
+ add_analytics()
+
+@hooks.hook("identity-admin-relation-departed")
+@hooks.hook("identity-admin-relation-broken")
+def identity_admin_departed():
+ if not units("identity-admin"):
+ remove_analytics()
+ config["identity-admin-ready"] = False
+ identity_admin_relation()
+
+@restart_on_change({"/etc/contrail/contrail-snmp-collector.conf": ["contrail-snmp-collector"],
+ "/etc/contrail/vnc_api_lib.ini": ["contrail-topology"],
+ "/etc/contrail/contrail-keystone-auth.conf": ["contrail-keystone-auth"]})
+def identity_admin_relation():
+ write_snmp_collector_config()
+ write_vnc_api_config()
+ write_keystone_auth_config()
+
+@hooks.hook()
+def install():
+ fix_hostname()
+ shutil.copy('files/contrail', '/etc/apt/preferences.d')
+ configure_installation_source(config["openstack-origin"])
+ configure_sources(True, "install-sources", "install-keys")
+ apt_upgrade(fatal=True, dist=True)
+ apt_install(PACKAGES, fatal=True)
+ fix_permissions()
+ fix_services()
+ fix_nodemgr()
+
+@hooks.hook("kafka-relation-changed")
+def kafka_changed():
+ if not relation_get("port"):
+ log("Relation not ready")
+ return
+ if not config.get("kafka-ready"):
+ units = len(kafka_units())
+ required = config["kafka-units"]
+ if units < required:
+ log("{} kafka unit(s) ready, require {} more".format(units, required - units))
+ return
+ config["kafka-ready"] = True
+ kafka_relation()
+ add_analytics()
+
+@hooks.hook("kafka-relation-departed")
+@hooks.hook("kafka-relation-broken")
+def kafka_departed():
+ if not units("kafka"):
+ remove_analytics()
+ config["kafka-ready"] = False
+ kafka_relation()
+
+@restart_on_change({"/etc/contrail/contrail-collector.conf": ["contrail-collector"],
+ "/etc/contrail/contrail-alarm-gen.conf": ["contrail-alarm-gen"]})
+def kafka_relation():
+ write_collector_config()
+ write_alarm_config()
+
+def main():
+ try:
+ hooks.execute(sys.argv)
+ except UnregisteredHookError as e:
+ log("Unknown hook {} - skipping.".format(e))
+
+def remove_analytics():
+ if config.get("analytics-configured"):
+ # unprovision analytics on 3.0.2.0+
+ if version_compare(CONTRAIL_VERSION, "3.0.2.0-34") >= 0:
+ unprovision_analytics()
+ config["analytics-configured"] = False
+
+@hooks.hook("upgrade-charm")
+def upgrade_charm():
+ write_collector_config()
+ write_query_engine_config()
+ write_analytics_api_config()
+ write_alarm_config()
+ write_topology_config()
+ write_snmp_collector_config()
+ write_vnc_api_config()
+ write_nodemgr_config()
+ service_restart("supervisor-analytics")
+
+@hooks.hook("zookeeper-relation-changed")
+def zookeeper_changed():
+ if not relation_get("port"):
+ log("Relation not ready")
+ return
+ zookeeper_relation()
+ config["zookeeper-ready"] = True
+ add_analytics()
+
+@hooks.hook("zookeeper-relation-departed")
+@hooks.hook("zookeeper-relation-broken")
+def zookeeper_departed():
+ if not units("zookeeper"):
+ remove_analytics()
+ config["zookeeper-ready"] = False
+ zookeeper_relation()
+
+@restart_on_change({"/etc/contrail/contrail-collector.conf": ["contrail-collector"],
+ "/etc/contrail/contrail-alarm-gen.conf": ["contrail-alarm-gen"],
+ "/etc/contrail/contrail-topology.conf": ["contrail-topology"],
+ "/etc/contrail/contrail-snmp-collector.conf": ["contrail-snmp-collector"]})
+def zookeeper_relation():
+ write_collector_config()
+ write_alarm_config()
+ write_topology_config()
+ write_snmp_collector_config()
+
+if __name__ == "__main__":
+ main()
diff --git a/charms/trusty/contrail-analytics/hooks/contrail_analytics_utils.py b/charms/trusty/contrail-analytics/hooks/contrail_analytics_utils.py
new file mode 100644
index 0000000..86a6765
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/contrail_analytics_utils.py
@@ -0,0 +1,318 @@
+import os
+import pwd
+import shutil
+from socket import gaierror, gethostbyname, gethostname
+from subprocess import (
+ CalledProcessError,
+ check_call,
+ check_output
+)
+
+import apt_pkg
+from apt_pkg import version_compare
+
+from charmhelpers.core.hookenv import (
+ config,
+ log,
+ related_units,
+ relation_get,
+ relation_ids,
+ relation_type,
+ remote_unit,
+ unit_get
+)
+
+from charmhelpers.core.host import service_restart
+
+from charmhelpers.core.templating import render
+
+apt_pkg.init()
+
+def dpkg_version(pkg):
+ try:
+ return check_output(["dpkg-query", "-f", "${Version}\\n", "-W", pkg]).rstrip()
+ except CalledProcessError:
+ return None
+
+CONTRAIL_VERSION = dpkg_version("contrail-analytics")
+
+config = config()
+
+def contrail_ctx():
+ return { "host_ip": gethostbyname(unit_get("private-address")) }
+
+def cassandra_ctx():
+ key = "native_transport_port" \
+ if version_compare(CONTRAIL_VERSION, "3.0.2.0-34") >= 0 \
+ else "rpc_port"
+ servers = [ gethostbyname(relation_get("private-address", unit, rid))
+ + ":" + port
+ for rid in relation_ids("cassandra")
+ for unit, port in
+ ((unit, relation_get(key, unit, rid)) for unit in related_units(rid))
+ if port ] \
+ if config.get("cassandra-ready") else []
+ return { "cassandra_servers": servers }
+
+def cassandra_units():
+ """Return a list of cassandra units"""
+ return [ unit for rid in relation_ids("cassandra")
+ for unit in related_units(rid)
+ if relation_get("native_transport_port", unit, rid) ]
+
+def contrail_api_ctx():
+ ctxs = [ { "api_server": vip if vip \
+ else gethostbyname(relation_get("private-address", unit, rid)),
+ "api_port": port }
+ for rid in relation_ids("contrail-api")
+ for unit, port, vip in
+ ((unit, relation_get("port", unit, rid), relation_get("vip", unit, rid))
+ for unit in related_units(rid))
+ if port ]
+ return ctxs[0] if ctxs else {}
+
+def discovery_ctx():
+ ctxs = [ { "disc_server_ip": vip if vip \
+ else gethostbyname(relation_get("private-address", unit, rid)),
+ "disc_server_port": port }
+ for rid in relation_ids("contrail-discovery")
+ for unit, port, vip in
+ ((unit, relation_get("port", unit, rid), relation_get("vip", unit, rid))
+ for unit in related_units(rid))
+ if port ]
+ return ctxs[0] if ctxs else {}
+
+def fix_hostname():
+ # ensure hostname is resolvable
+ hostname = gethostname()
+ try:
+ gethostbyname(hostname)
+ except gaierror:
+ check_call(["sed", "-E", "-i", "-e",
+ "/127.0.0.1[[:blank:]]+/a \\\n127.0.1.1 " + hostname,
+ "/etc/hosts"])
+
+def fix_nodemgr():
+ # add files missing from contrail-nodemgr package
+ shutil.copy("files/contrail-nodemgr-analytics.ini",
+ "/etc/contrail/supervisord_analytics_files")
+ shutil.copy("files/contrail-analytics-api.ini",
+ "/etc/contrail/supervisord_analytics_files")
+ shutil.copy("files/contrail-collector.ini",
+ "/etc/contrail/supervisord_analytics_files")
+ shutil.copy("files/contrail-alarm-gen.ini",
+ "/etc/contrail/supervisord_analytics_files")
+ shutil.copy("files/contrail-topology.ini",
+ "/etc/contrail/supervisord_analytics_files")
+ shutil.copy("files/contrail-snmp-collector.ini",
+ "/etc/contrail/supervisord_analytics_files")
+ pw = pwd.getpwnam("contrail")
+ os.chown("/etc/contrail/supervisord_analytics_files/contrail-nodemgr-analytics.ini",
+ pw.pw_uid, pw.pw_gid)
+ shutil.copy("files/contrail-analytics-nodemgr", "/etc/init.d")
+ os.chmod("/etc/init.d/contrail-analytics-nodemgr", 0755)
+
+ # fake ntp status when inside a container
+ if is_container():
+ shutil.copy("files/ntpq-nodemgr", "/usr/local/bin/ntpq")
+
+ service_restart("supervisor-analytics")
+
+def fix_permissions():
+ os.chmod("/etc/contrail", 0755)
+ os.chown("/etc/contrail", 0, 0)
+
+def fix_services():
+ # redis listens on localhost by default
+ check_output(["sed", "-i", "-e",
+ "s/^bind /# bind /",
+ "/etc/redis/redis.conf"])
+ service_restart("redis-server")
+
+def identity_admin_ctx():
+ ctxs = [ { "auth_host": gethostbyname(hostname),
+ "auth_port": relation_get("service_port", unit, rid),
+ "admin_user": relation_get("service_username", unit, rid),
+ "admin_password": relation_get("service_password", unit, rid),
+ "admin_tenant_name": relation_get("service_tenant_name", unit, rid) }
+ for rid in relation_ids("identity-admin")
+ for unit, hostname in
+ ((unit, relation_get("service_hostname", unit, rid)) for unit in related_units(rid))
+ if hostname ]
+ return ctxs[0] if ctxs else {}
+
+def is_container():
+ """Return boolean determining if inside container"""
+ try:
+ check_call(["running-in-container"])
+ return True
+ except CalledProcessError:
+ return False
+
+def kafka_ctx():
+ servers = [ gethostbyname(relation_get("private-address", unit, rid))
+ + ":" + port
+ for rid in relation_ids("kafka")
+ for unit, port in
+ ((unit, relation_get("port", unit, rid))
+ for unit in related_units(rid))
+ if port ] \
+ if config.get("kafka-ready") else []
+ return { "kafka_servers": servers }
+
+def kafka_units():
+ """Return a list of kafka units"""
+ return [ unit for rid in relation_ids("kafka")
+ for unit in related_units(rid)
+ if relation_get("port", unit, rid) ]
+
+def provision_analytics():
+ hostname = gethostname()
+ ip = gethostbyname(unit_get("private-address"))
+ api_ip, api_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
+ port)
+ for rid in relation_ids("contrail-api")
+ for unit, port in
+ ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
+ if port ][0]
+ user, password, tenant = [ (relation_get("service_username", unit, rid),
+ relation_get("service_password", unit, rid),
+ relation_get("service_tenant_name", unit, rid))
+ for rid in relation_ids("identity-admin")
+ for unit in related_units(rid)
+ if relation_get("service_hostname", unit, rid) ][0]
+ log("Provisioning analytics {}".format(ip))
+ check_call(["contrail-provision-analytics",
+ "--host_name", hostname,
+ "--host_ip", ip,
+ "--api_server_ip", api_ip,
+ "--api_server_port", str(api_port),
+ "--oper", "add",
+ "--admin_user", user,
+ "--admin_password", password,
+ "--admin_tenant_name", tenant])
+
+def units(relation):
+ """Return a list of units for the specified relation"""
+ return [ unit for rid in relation_ids(relation)
+ for unit in related_units(rid) ]
+
+def unprovision_analytics():
+ if not remote_unit():
+ return
+ hostname = gethostname()
+ ip = gethostbyname(unit_get("private-address"))
+ relation = relation_type()
+ api_ip = None
+ api_port = None
+ if relation == "contrail-api":
+ api_ip = gethostbyname(relation_get("private-address"))
+ api_port = relation_get("port")
+ else:
+ api_ip, api_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
+ relation_get("port", unit, rid))
+ for rid in relation_ids("contrail-api")
+ for unit in related_units(rid) ][0]
+ user = None
+ password = None
+ tenant = None
+ if relation == "identity-admin":
+ user = relation_get("service_username")
+ password = relation_get("service_password")
+ tenant = relation_get("service_tenant_name")
+ else:
+ user, password, tenant = [ (relation_get("service_username", unit, rid),
+ relation_get("service_password", unit, rid),
+ relation_get("service_tenant_name", unit, rid))
+ for rid in relation_ids("identity-admin")
+ for unit in related_units(rid) ][0]
+ log("Unprovisioning analytics {}".format(ip))
+ check_call(["contrail-provision-analytics",
+ "--host_name", hostname,
+ "--host_ip", ip,
+ "--api_server_ip", api_ip,
+ "--api_server_port", str(api_port),
+ "--oper", "del",
+ "--admin_user", user,
+ "--admin_password", password,
+ "--admin_tenant_name", tenant])
+
+def write_alarm_config():
+ ctx = {}
+ ctx.update(contrail_ctx())
+ ctx.update(kafka_ctx())
+ ctx.update(zookeeper_ctx())
+ ctx.update(discovery_ctx())
+ render("contrail-alarm-gen.conf",
+ "/etc/contrail/contrail-alarm-gen.conf", ctx)
+
+def write_analytics_api_config():
+ ctx = {}
+ ctx.update(contrail_ctx())
+ ctx.update(cassandra_ctx())
+ ctx.update(discovery_ctx())
+ ctx.update(contrail_api_ctx())
+ render("contrail-analytics-api.conf",
+ "/etc/contrail/contrail-analytics-api.conf", ctx)
+
+def write_collector_config():
+ ctx = {}
+ ctx.update(contrail_ctx())
+ ctx.update(cassandra_ctx())
+ ctx.update(kafka_ctx())
+ ctx.update(discovery_ctx())
+ if version_compare(CONTRAIL_VERSION, "3.0.2.0-34") >= 0:
+ ctx["zookeeper"] = True
+ ctx.update(zookeeper_ctx())
+ render("contrail-collector.conf",
+ "/etc/contrail/contrail-collector.conf", ctx)
+
+def write_nodemgr_config():
+ ctx = discovery_ctx()
+ render("contrail-analytics-nodemgr.conf",
+ "/etc/contrail/contrail-analytics-nodemgr.conf", ctx)
+
+def write_query_engine_config():
+ ctx = {}
+ ctx.update(cassandra_ctx())
+ ctx.update(discovery_ctx())
+ render("contrail-query-engine.conf",
+ "/etc/contrail/contrail-query-engine.conf", ctx)
+
+def write_snmp_collector_config():
+ ctx = {}
+ ctx.update(contrail_api_ctx())
+ ctx.update(zookeeper_ctx())
+ ctx.update(discovery_ctx())
+ ctx.update(identity_admin_ctx())
+ render("contrail-snmp-collector.conf",
+ "/etc/contrail/contrail-snmp-collector.conf", ctx, "root",
+ "contrail", 0440)
+
+def write_topology_config():
+ ctx = {}
+ ctx.update(zookeeper_ctx())
+ ctx.update(discovery_ctx())
+ render("contrail-topology.conf",
+ "/etc/contrail/contrail-topology.conf", ctx)
+
+def write_vnc_api_config():
+ ctx = {}
+ ctx.update(contrail_api_ctx())
+ ctx.update(identity_admin_ctx())
+ render("vnc_api_lib.ini", "/etc/contrail/vnc_api_lib.ini", ctx)
+
+def write_keystone_auth_config():
+ ctx = {}
+ ctx.update(contrail_api_ctx())
+ ctx.update(identity_admin_ctx())
+ render("contrail-keystone-auth.conf",
+ "/etc/contrail/contrail-keystone-auth.conf", ctx)
+
+def zookeeper_ctx():
+ return { "zk_servers": [ gethostbyname(relation_get("private-address", unit, rid))
+ + ":" + port
+ for rid in relation_ids("zookeeper")
+ for unit, port in
+ ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
+ if port ] }
diff --git a/charms/trusty/contrail-analytics/hooks/http-services-relation-joined b/charms/trusty/contrail-analytics/hooks/http-services-relation-joined
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/http-services-relation-joined
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/identity-admin-relation-broken b/charms/trusty/contrail-analytics/hooks/identity-admin-relation-broken
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/identity-admin-relation-broken
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/identity-admin-relation-changed b/charms/trusty/contrail-analytics/hooks/identity-admin-relation-changed
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/identity-admin-relation-changed
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/identity-admin-relation-departed b/charms/trusty/contrail-analytics/hooks/identity-admin-relation-departed
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/identity-admin-relation-departed
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/install b/charms/trusty/contrail-analytics/hooks/install
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/install
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/kafka-relation-broken b/charms/trusty/contrail-analytics/hooks/kafka-relation-broken
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/kafka-relation-broken
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/kafka-relation-changed b/charms/trusty/contrail-analytics/hooks/kafka-relation-changed
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/kafka-relation-changed
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/kafka-relation-departed b/charms/trusty/contrail-analytics/hooks/kafka-relation-departed
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/kafka-relation-departed
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/start b/charms/trusty/contrail-analytics/hooks/start
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/start
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/stop b/charms/trusty/contrail-analytics/hooks/stop
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/stop
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/upgrade-charm b/charms/trusty/contrail-analytics/hooks/upgrade-charm
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/upgrade-charm
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/zookeeper-relation-broken b/charms/trusty/contrail-analytics/hooks/zookeeper-relation-broken
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/zookeeper-relation-broken
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/zookeeper-relation-changed b/charms/trusty/contrail-analytics/hooks/zookeeper-relation-changed
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/zookeeper-relation-changed
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/zookeeper-relation-departed b/charms/trusty/contrail-analytics/hooks/zookeeper-relation-departed
new file mode 120000
index 0000000..f633cfc
--- /dev/null
+++ b/charms/trusty/contrail-analytics/hooks/zookeeper-relation-departed
@@ -0,0 +1 @@
+contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/icon.svg b/charms/trusty/contrail-analytics/icon.svg
new file mode 100644
index 0000000..6f77c1a
--- /dev/null
+++ b/charms/trusty/contrail-analytics/icon.svg
@@ -0,0 +1,309 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="96"
+ height="96"
+ id="svg6517"
+ version="1.1"
+ inkscape:version="0.91 r13725"
+ sodipodi:docname="icon.svg">
+ <defs
+ id="defs6519">
+ <linearGradient
+ id="Background">
+ <stop
+ id="stop4178"
+ offset="0"
+ style="stop-color:#b8b8b8;stop-opacity:1" />
+ <stop
+ id="stop4180"
+ offset="1"
+ style="stop-color:#c9c9c9;stop-opacity:1" />
+ </linearGradient>
+ <filter
+ style="color-interpolation-filters:sRGB;"
+ inkscape:label="Inner Shadow"
+ id="filter1121">
+ <feFlood
+ flood-opacity="0.59999999999999998"
+ flood-color="rgb(0,0,0)"
+ result="flood"
+ id="feFlood1123" />
+ <feComposite
+ in="flood"
+ in2="SourceGraphic"
+ operator="out"
+ result="composite1"
+ id="feComposite1125" />
+ <feGaussianBlur
+ in="composite1"
+ stdDeviation="1"
+ result="blur"
+ id="feGaussianBlur1127" />
+ <feOffset
+ dx="0"
+ dy="2"
+ result="offset"
+ id="feOffset1129" />
+ <feComposite
+ in="offset"
+ in2="SourceGraphic"
+ operator="atop"
+ result="composite2"
+ id="feComposite1131" />
+ </filter>
+ <filter
+ style="color-interpolation-filters:sRGB;"
+ inkscape:label="Drop Shadow"
+ id="filter950">
+ <feFlood
+ flood-opacity="0.25"
+ flood-color="rgb(0,0,0)"
+ result="flood"
+ id="feFlood952" />
+ <feComposite
+ in="flood"
+ in2="SourceGraphic"
+ operator="in"
+ result="composite1"
+ id="feComposite954" />
+ <feGaussianBlur
+ in="composite1"
+ stdDeviation="1"
+ result="blur"
+ id="feGaussianBlur956" />
+ <feOffset
+ dx="0"
+ dy="1"
+ result="offset"
+ id="feOffset958" />
+ <feComposite
+ in="SourceGraphic"
+ in2="offset"
+ operator="over"
+ result="composite2"
+ id="feComposite960" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath873">
+ <g
+ transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)"
+ id="g875"
+ inkscape:label="Layer 1"
+ style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline">
+ <path
+ style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
+ d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
+ id="path877"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="sssssssss" />
+ </g>
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter891"
+ inkscape:label="Badge Shadow">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.71999962"
+ id="feGaussianBlur893" />
+ </filter>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="4.0745362"
+ inkscape:cx="48.413329"
+ inkscape:cy="49.018169"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer1"
+ showgrid="true"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:window-width="1920"
+ inkscape:window-height="1025"
+ inkscape:window-x="0"
+ inkscape:window-y="27"
+ inkscape:window-maximized="1"
+ showborder="true"
+ showguides="true"
+ inkscape:guide-bbox="true"
+ inkscape:showpageshadow="false">
+ <inkscape:grid
+ type="xygrid"
+ id="grid821" />
+ <sodipodi:guide
+ orientation="1,0"
+ position="16,48"
+ id="guide823" />
+ <sodipodi:guide
+ orientation="0,1"
+ position="64,80"
+ id="guide825" />
+ <sodipodi:guide
+ orientation="1,0"
+ position="80,40"
+ id="guide827" />
+ <sodipodi:guide
+ orientation="0,1"
+ position="64,16"
+ id="guide829" />
+ </sodipodi:namedview>
+ <metadata
+ id="metadata6522">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="BACKGROUND"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(268,-635.29076)"
+ style="display:inline">
+ <path
+ style="fill:#ebebeb;fill-opacity:1;stroke:none;display:inline;filter:url(#filter1121)"
+ d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 C -264.11215,731.29077 -268,727.39888 -268,700.15563 Z"
+ id="path6455"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="sssssssss" />
+ </g>
+ <g
+ inkscape:groupmode="layer"
+ id="layer3"
+ inkscape:label="PLACE YOUR PICTOGRAM HERE"
+ style="display:inline">
+ <g
+ style="display:inline"
+ transform="matrix(0.30759127,0,0,0.30759127,8.28218,8.97257)"
+ id="g3732">
+ <path
+ style="fill:#a3cfe8"
+ d="M 95,165.62616 C 84.317392,162.68522 76.316695,156.3432 71.320441,146.85577 68.731857,141.94027 68.5,140.61329 68.5,130.71353 c 0,-11.83269 0.397793,-12.66977 6.034392,-12.69822 C 78.926707,117.99315 81,121.97863 81,130.44413 c 0,9.5666 3.34886,15.50194 11.662711,20.67036 3.651393,2.26995 4.798754,2.40131 23.683989,2.71173 l 19.8467,0.32623 -0.71218,2.17377 c -0.91082,2.78009 -0.90418,5.58369 0.0199,8.42378 l 0.73211,2.25 -18.36663,-0.0675 C 106.56201,166.89096 97.76974,166.38867 95,165.62616 Z m 46.00868,-0.11571 c -1.77687,-2.14099 -1.82625,-7.82041 -0.0862,-9.917 1.07681,-1.29747 3.57513,-1.59374 13.45,-1.595 9.54779,-0.001 12.86912,-0.37349 15.61365,-1.75 9.3963,-4.71272 7.35301,-19.21115 -2.93942,-20.85698 -2.07398,-0.33164 -4.19534,-0.89289 -4.71413,-1.24723 -0.51879,-0.35433 -1.44954,-3.43526 -2.06833,-6.84652 -1.37797,-7.59639 -3.48916,-12.20669 -7.30276,-15.94738 -3.66382,-3.59378 -3.6595,-4.21104 0.0385,-5.50018 2.54055,-0.88564 3,-1.56686 3,-4.447985 0,-4.258462 1.35388,-4.297632 5.25974,-0.152175 4.55275,4.83203 8.57589,11.55276 10.42257,17.41111 1.15326,3.65858 2.26012,5.35908 3.72889,5.72883 3.21482,0.8093 9.54053,7.29049 11.64977,11.9361 2.26213,4.98232 2.53846,14.30356 0.56413,19.02881 -1.97355,4.72336 -7.28419,10.42159 -12.03042,12.90844 -3.50369,1.8358 -6.19345,2.20312 -18.636,2.54499 -12.76506,0.35072 -14.7134,0.19219 -15.95,-1.29783 z M 36.760565,161.75 c -3.478655,-4.56459 -7.187084,-12.21027 -9.336932,-19.25 -2.778434,-9.09804 -2.583706,-24.94034 0.417306,-33.95043 3.497444,-10.500559 9.898641,-21.56636 12.457102,-21.534693 0.661077,0.0082 2.925911,1.473635 5.032964,3.256562 l 3.831004,3.241685 -2.568452,5.113673 C 42.599304,106.57918 40.65102,115.46967 40.594928,126 c -0.0579,10.86969 1.439444,17.99787 5.535634,26.35262 1.578191,3.21895 2.85983,6.14395 2.848087,6.5 C 48.949775,159.72808 41.428955,165 40.208913,165 c -0.534344,0 -2.086101,-1.4625 -3.448348,-3.25 z m 175.995035,-0.0376 -3.7444,-3.21245 1.79249,-3 c 8.93434,-14.95294 9.53034,-38.50427 1.41338,-55.849827 l -3.07866,-6.578941 4.1278,-3.035616 C 215.5365,88.366027 217.71535,87 218.10811,87 c 1.50502,0 6.33619,6.757331 8.97827,12.55785 7.79191,17.10669 7.87368,37.40315 0.21328,52.94215 -2.91602,5.91511 -7.82715,12.49548 -9.29966,12.46052 -0.825,-0.0196 -3.18498,-1.48122 -5.2444,-3.24807 z M 81.482645,115.96644 c -1.483807,-2.86937 -1.949857,-3.10137 -5.058516,-2.51818 -4.663007,0.87478 -4.493442,-0.95188 0.628511,-6.77072 5.256509,-5.97171 14.327595,-10.460488 22.924736,-11.34418 4.557714,-0.468483 7.786604,-1.496091 10.894994,-3.467375 10.33444,-6.553906 24.98246,-8.287165 35.62763,-4.215718 4.82222,1.84435 5,2.051462 5,5.824988 0,3.32368 -0.46902,4.186565 -3.11582,5.732379 -2.93452,1.713856 -3.47765,1.727036 -9.3345,0.226582 -5.19732,-1.331492 -7.06708,-1.394156 -11.38418,-0.381538 -6.35168,1.489842 -8.08332,2.337822 -13.18203,6.455152 -3.63495,2.93531 -4.49954,3.19704 -9.10062,2.75494 -6.189167,-0.59471 -12.218344,1.78693 -18.196739,7.18806 l -4.06908,3.67616 -1.634386,-3.16055 z"
+ id="path3746"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:#9a9a9c"
+ d="m 93.286039,164.54925 c -16.494387,-5.15489 -26.958648,-21.00658 -24.875196,-37.68196 0.843223,-6.74892 1.329136,-7.48226 5.337762,-8.05574 4.602358,-0.65842 6.634722,2.66079 6.356138,10.38072 -0.355642,9.8553 5.007342,19.02839 13.395257,22.91187 3.449975,1.59728 6.65053,1.85496 23.27568,1.8739 l 19.27568,0.022 -1.5223,2.9438 c -1.13702,2.19876 -1.27006,3.60722 -0.52568,5.5651 0.54814,1.44171 0.99662,2.817 0.99662,3.0562 0,1.13237 -37.784447,0.21221 -41.713961,-1.01585 z M 140.3757,163.25 c -0.75749,-2.06167 -0.6343,-3.56348 0.49217,-6 l 1.50255,-3.25 12.9105,0 c 14.6294,0 17.5288,-0.97189 20.29597,-6.80328 3.45454,-7.27989 -1.32251,-15.43619 -9.78395,-16.70506 l -4.53221,-0.67965 -0.51854,-5.71858 c -0.55357,-6.10485 -4.15117,-14.35103 -7.6341,-17.49842 -2.70447,-2.44391 -2.6528,-3.02579 0.39191,-4.41306 1.58875,-0.72388 2.50558,-1.96702 2.51531,-3.410511 0.008,-1.249292 0.39216,-2.865775 0.85274,-3.592185 C 158.67512,92.329247 172,111.55317 172,117.01025 c 0,0.94756 2.19487,3.0552 4.99312,4.79469 16.07824,9.99478 15.53196,32.74917 -0.99499,41.44506 -5.0138,2.63808 -5.82451,2.75 -19.91928,2.75 l -14.69277,0 -1.01038,-2.75 z M 35.40716,159.29417 c -2.083023,-3.13821 -5.109308,-9.54119 -6.725077,-14.22886 -2.485242,-7.21018 -2.938617,-10.06664 -2.943307,-18.54417 -0.0036,-6.59373 0.591734,-12.07325 1.74079,-16.02114 2.125307,-7.30206 7.833992,-18.506493 10.893586,-21.380833 l 2.245692,-2.109718 4.114129,3.025565 4.114129,3.025564 -2.940589,6.48533 c -7.687874,16.955242 -7.684823,36.645922 0.0082,53.085582 l 2.95122,6.30662 -3.826883,3.03094 C 42.934289,163.63607 40.758205,165 40.203333,165 c -0.554872,0 -2.71315,-2.56762 -4.796173,-5.70583 z m 178.33231,2.91881 c -4.12643,-2.97696 -4.12127,-2.77305 -0.30142,-11.89827 C 216.73845,142.43037 218,135.70645 218,126 c 0,-9.70412 -1.26117,-16.4284 -4.56034,-24.31471 -1.42316,-3.401907 -2.66678,-6.795138 -2.76361,-7.540509 -0.0968,-0.74537 1.55376,-2.77037 3.66797,-4.5 L 218.18803,86.5 l 2.46357,3 c 10.21069,12.43401 14.79345,33.98475 10.72523,50.43611 -2.37412,9.60065 -10.56942,25.165 -13.17772,25.02687 -0.38451,-0.0204 -2.39135,-1.25787 -4.45964,-2.75 z M 81.841186,115.55079 c -0.878315,-1.9277 -1.99166,-2.51327 -5.228562,-2.75 L 72.5,112.5 77.225927,107.42203 C 83.456988,100.72681 89.946931,97.312559 99.091117,95.919125 103.166,95.298175 107.175,94.376154 108,93.87019 c 0.825,-0.505965 4.40457,-2.344245 7.95461,-4.085068 8.22915,-4.035307 19.81365,-4.987772 28.27907,-2.325071 7.55962,2.37779 7.79351,2.597566 7.12811,6.697941 C 150.57502,99.006294 146.1878,101.20891 141,99.36016 132.99683,96.508113 122.06502,98.684599 115.29736,104.47747 111.53712,107.6961 110.64067,108 104.90676,108 97.846719,108 92.517648,110.09663 87.188282,114.97101 85.366837,116.63695 83.669689,118 83.416843,118 c -0.252846,0 -0.961892,-1.10215 -1.575657,-2.44921 z"
+ id="path3744"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:#50a1d2"
+ d="m 93.286039,164.54925 c -16.494387,-5.15489 -26.958648,-21.00658 -24.875196,-37.68196 0.843223,-6.74892 1.329136,-7.48226 5.337762,-8.05574 4.602358,-0.65842 6.634722,2.66079 6.356138,10.38072 -0.355642,9.8553 5.007342,19.02839 13.395257,22.91187 3.449975,1.59728 6.65053,1.85496 23.27568,1.8739 l 19.27568,0.022 -1.5223,2.9438 c -1.13702,2.19876 -1.27006,3.60722 -0.52568,5.5651 0.54814,1.44171 0.99662,2.817 0.99662,3.0562 0,1.13237 -37.784447,0.21221 -41.713961,-1.01585 z M 140.3757,163.25 c -0.75749,-2.06167 -0.6343,-3.56348 0.49217,-6 l 1.50255,-3.25 12.9105,0 c 14.6294,0 17.5288,-0.97189 20.29597,-6.80328 3.45454,-7.27989 -1.32251,-15.43619 -9.78395,-16.70506 l -4.53221,-0.67965 -0.51854,-5.71858 c -0.55357,-6.10485 -4.15117,-14.35103 -7.6341,-17.49842 -2.70447,-2.44391 -2.6528,-3.02579 0.39191,-4.41306 1.58875,-0.72388 2.50558,-1.96702 2.51531,-3.410511 0.008,-1.249292 0.39216,-2.865775 0.85274,-3.592185 C 158.67512,92.329247 172,111.55317 172,117.01025 c 0,0.94756 2.19487,3.0552 4.99312,4.79469 16.07824,9.99478 15.53196,32.74917 -0.99499,41.44506 -5.0138,2.63808 -5.82451,2.75 -19.91928,2.75 l -14.69277,0 -1.01038,-2.75 z M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.841186,115.55079 c -0.878315,-1.9277 -1.99166,-2.51327 -5.228562,-2.75 L 72.5,112.5 77.225927,107.42203 C 83.456988,100.72681 89.946931,97.312559 99.091117,95.919125 103.166,95.298175 107.175,94.376154 108,93.87019 c 0.825,-0.505965 4.40457,-2.344245 7.95461,-4.085068 8.22915,-4.035307 19.81365,-4.987772 28.27907,-2.325071 7.55962,2.37779 7.79351,2.597566 7.12811,6.697941 C 150.57502,99.006294 146.1878,101.20891 141,99.36016 132.99683,96.508113 122.06502,98.684599 115.29736,104.47747 111.53712,107.6961 110.64067,108 104.90676,108 97.846719,108 92.517648,110.09663 87.188282,114.97101 85.366837,116.63695 83.669689,118 83.416843,118 c -0.252846,0 -0.961892,-1.10215 -1.575657,-2.44921 z"
+ id="path3742"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:#258bc8"
+ d="m 140.94241,163.34852 c -0.60534,-1.59216 -0.6633,-3.68963 -0.14507,-5.25 0.8603,-2.5903 0.90545,-2.60011 14.28284,-3.09996 7.93908,-0.29664 14.30706,-1.00877 15.59227,-1.74367 10.44037,-5.96999 7.38458,-21.04866 -4.67245,-23.05598 l -4.5,-0.74919 -0.58702,-5.97486 c -0.62455,-6.35693 -3.09323,-12.09225 -7.29978,-16.95905 l -2.57934,-2.98419 2.20484,-0.81562 c 2.73303,-1.01102 3.71477,-2.49335 3.78569,-5.716 0.0511,-2.322172 0.38375,-2.144343 4.67651,2.5 4.32664,4.681 10.2991,15.64731 10.2991,18.91066 0,0.80001 0.94975,1.756 2.11054,2.12443 3.25146,1.03197 9.8171,7.40275 11.96188,11.60686 2.54215,4.98304 2.56222,14.86412 0.0414,20.41386 -2.26808,4.99343 -8.79666,10.73297 -13.97231,12.28363 C 170.01108,165.47775 162.34653,166 155.10923,166 l -13.15873,0 -1.00809,-2.65148 z M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.664567,115.0093 c -1.516672,-2.56752 -2.095101,-2.81369 -5.364599,-2.28313 l -3.66463,0.59469 2.22168,-3.12006 C 80.37626,102.44974 90.120126,97.000633 99.857357,96.219746 105.13094,95.796826 107.53051,95.01192 111.5,92.411404 c 10.08936,-6.609802 24.47284,-8.157994 35.30015,-3.799597 4.05392,1.631857 4.28296,1.935471 4,5.302479 -0.41543,4.943233 -3.85308,6.604794 -10.30411,4.980399 -9.07108,-2.284124 -18.26402,-0.195093 -26.41897,6.003525 -2.78485,2.11679 -4.55576,2.61322 -9.5,2.66311 -6.674981,0.0673 -12.069467,2.29808 -17.866999,7.38838 l -3.345536,2.93742 -1.699968,-2.87782 z"
+ id="path3740"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:#6c6d71"
+ d="M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.778822,114.41391 c -0.987352,-2.167 -1.713119,-2.52365 -4.478561,-2.2008 C 75.485117,112.42502 74,112.28006 74,111.89098 c 0,-0.38909 2.038348,-2.80473 4.529662,-5.36811 5.687016,-5.85151 13.385461,-9.421936 22.389748,-10.384041 4.19603,-0.448345 7.72119,-1.408591 8.81929,-2.402352 1.0061,-0.910509 4.51398,-2.848867 7.79529,-4.307463 11.5167,-5.119364 33.48865,-2.808232 33.4507,3.51853 -0.03,5.002939 -4.29101,7.838526 -9.20479,6.125573 -1.69309,-0.590214 -6.0487,-1.063234 -9.67912,-1.051155 -7.46196,0.02483 -12.78325,2.004318 -18.21979,6.777668 -3.02474,2.65576 -4.03125,2.9899 -7.5746,2.51464 -5.45614,-0.73182 -12.97717,1.85611 -18.074646,6.21936 -2.22732,1.9065 -4.325286,3.46637 -4.662147,3.46637 -0.336861,0 -1.14271,-1.16374 -1.790775,-2.58609 z"
+ id="path3738"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:#0076c2"
+ d="m 81.778822,114.41391 c -0.987352,-2.167 -1.713119,-2.52365 -4.478561,-2.2008 C 75.485117,112.42502 74,112.28006 74,111.89098 c 0,-0.38909 2.038348,-2.80473 4.529662,-5.36811 5.687016,-5.85151 13.385461,-9.421936 22.389748,-10.384041 4.19603,-0.448345 7.72119,-1.408591 8.81929,-2.402352 1.0061,-0.910509 4.51398,-2.848867 7.79529,-4.307463 11.5167,-5.119364 33.48865,-2.808232 33.4507,3.51853 -0.03,5.002939 -4.29101,7.838526 -9.20479,6.125573 -1.69309,-0.590214 -6.0487,-1.063234 -9.67912,-1.051155 -7.46196,0.02483 -12.78325,2.004318 -18.21979,6.777668 -3.02474,2.65576 -4.03125,2.9899 -7.5746,2.51464 -5.45614,-0.73182 -12.97717,1.85611 -18.074646,6.21936 -2.22732,1.9065 -4.325286,3.46637 -4.662147,3.46637 -0.336861,0 -1.14271,-1.16374 -1.790775,-2.58609 z"
+ id="path3736"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:#0275bc"
+ d="m 84,115.94098 c 0,-0.58246 -0.519529,-0.73793 -1.154508,-0.34549 -0.691266,0.42723 -0.883989,0.27582 -0.48031,-0.37735 0.370809,-0.59998 1.542397,-1.02548 2.603528,-0.94554 1.457446,0.10978 1.667267,0.4611 0.857865,1.43636 C 84.525185,117.27704 84,117.34375 84,115.94098 Z m 0.09671,-3.86005 c -1.011759,-0.64056 -0.689769,-0.84554 1.15404,-0.73469 1.406534,0.0846 2.348958,0.49126 2.094276,0.90376 -0.60193,0.97493 -1.516575,0.92732 -3.248316,-0.16907 z m 6.3078,-0.92642 c 0.398903,-0.64544 0.136326,-1.16792 -0.595491,-1.18492 -0.765174,-0.0178 -0.541923,-0.47628 0.537358,-1.10362 1.338377,-0.77794 2.163776,-0.75328 3,0.0896 0.874885,0.8819 0.691151,0.98669 -0.76042,0.43369 -1.280472,-0.48782 -1.688838,-0.3648 -1.233688,0.37165 0.374196,0.60547 0.153488,1.42647 -0.490464,1.82445 -0.731227,0.45192 -0.902922,0.29014 -0.457295,-0.4309 z M 78.5,109.91171 l -3,-0.7763 3.217276,0.16818 c 2.186877,0.11431 3.688589,-0.46785 4.688882,-1.81771 1.457369,-1.96667 1.489127,-1.96706 3.282724,-0.0406 1.583464,1.70072 1.591856,1.78019 0.06676,0.63224 -1.483392,-1.11656 -2.007002,-1.0195 -3.5,0.64877 -1.381497,1.54369 -2.394984,1.79632 -4.755647,1.18547 z M 78.5,107 c -0.60158,-0.97338 0.120084,-1.39478 1.85526,-1.08333 1.302991,0.23387 3.690445,-2.0337 3.117418,-2.96088 -0.277916,-0.44968 0.02157,-1.14322 0.665519,-1.5412 0.731227,-0.45192 0.902922,-0.29014 0.457295,0.4309 -1.008441,1.63169 1.517118,1.38391 3.845638,-0.37729 1.067621,-0.80751 2.867621,-1.42334 4,-1.36852 2.027174,0.0981 2.02808,0.11053 0.05887,0.80463 -4.600356,1.62151 -9.243399,4.08158 -10.452051,5.53791 C 80.556518,108.23929 79.380215,108.42422 78.5,107 Z m 12.25,-0.66228 c 0.6875,-0.27741 1.8125,-0.27741 2.5,0 0.6875,0.27741 0.125,0.50439 -1.25,0.50439 -1.375,0 -1.9375,-0.22698 -1.25,-0.50439 z m -1.953895,-1.90746 c 1.232615,-0.86336 3.020243,-1.36556 3.972506,-1.116 1.314258,0.34442 1.203531,0.48168 -0.459594,0.56974 -1.205041,0.0638 -2.469098,0.566 -2.809017,1.116 -0.339919,0.55 -1.141604,1 -1.781523,1 -0.639919,0 -0.154987,-0.70638 1.077628,-1.56974 z m 12.467645,-0.14784 c 1.52006,-0.22986 3.77006,-0.22371 5,0.0136 1.22994,0.23736 -0.0138,0.42542 -2.76375,0.41792 -2.75,-0.008 -3.756313,-0.20172 -2.23625,-0.43157 z m 13.52519,-3.66627 c 1.62643,-1.858573 1.61751,-1.921032 -0.18038,-1.262823 -1.58361,0.579759 -1.69145,0.451477 -0.6626,-0.788214 0.96581,-1.163733 1.50975,-1.222146 2.54116,-0.272892 0.80101,0.737212 0.96515,1.63324 0.42127,2.299789 -0.49007,0.6006 -0.69137,1.29168 -0.44733,1.53571 0.24403,0.24404 -0.41735,0.44371 -1.46974,0.44371 -1.81559,0 -1.82594,-0.1 -0.20238,-1.95528 z m -13.35766,0.48689 c 1.8068,-0.70764 6.56872,-0.33535 6.56872,0.51354 0,0.21088 -1.9125,0.35179 -4.25,0.31313 -3.00669,-0.0497 -3.68502,-0.29156 -2.31872,-0.82667 z M 120,98.984687 c -1.33333,-0.875277 -1.33333,-1.094097 0,-1.969374 0.825,-0.541578 2.175,-0.939378 3,-0.883999 0.99463,0.06677 0.88566,0.259531 -0.32343,0.572152 -1.07213,0.27721 -1.60009,1.05346 -1.28138,1.883999 0.63873,1.664515 0.5666,1.685055 -1.39519,0.397222 z m 23.8125,0.332199 c 0.72187,-0.288871 1.58437,-0.253344 1.91667,0.07895 0.33229,0.332292 -0.25834,0.568641 -1.3125,0.52522 -1.16495,-0.04798 -1.4019,-0.284941 -0.60417,-0.604167 z M 100,98.073324 c 0,-0.509672 -0.7875,-1.132471 -1.75,-1.383998 -1.31691,-0.344145 -1.19317,-0.486031 0.5,-0.573325 1.2375,-0.0638 2.25,0.305488 2.25,0.820641 0,0.515152 1.4625,1.118136 3.25,1.339962 3.19982,0.397095 3.1921,0.405793 -0.5,0.563359 -2.0625,0.08802 -3.75,-0.256967 -3.75,-0.766639 z m 29.75,-0.79672 c 1.7875,-0.221826 4.7125,-0.221826 6.5,0 1.7875,0.221827 0.325,0.403322 -3.25,0.403322 -3.575,0 -5.0375,-0.181495 -3.25,-0.403322 z M 142.5,97 c -1.75921,-0.755957 -1.6618,-0.867892 0.80902,-0.929715 1.63221,-0.04084 2.5501,0.348653 2.19098,0.929715 -0.33992,0.55 -0.70398,0.968372 -0.80902,0.929715 C 144.58594,97.891058 143.6,97.472686 142.5,97 Z m -32.85536,-1.199796 c 0.45361,-0.715112 0.83163,-1.600204 0.84005,-1.966871 0.008,-0.366666 0.42496,-1.041666 0.92564,-1.5 0.52889,-0.484163 0.60891,-0.309578 0.19098,0.416667 -0.93393,1.62288 0.27843,1.533702 3.39869,-0.25 2.99559,-1.712435 4,-1.837986 4,-0.5 0,0.55 -0.56916,1 -1.26481,1 -0.69564,0 -2.98616,0.922592 -5.09004,2.050204 -2.18676,1.172033 -3.47198,1.493283 -3.00051,0.75 z M 147,95.559017 C 147,94.701558 147.45,94 148,94 c 0.55,0 1,0.423442 1,0.940983 0,0.517541 -0.45,1.219098 -1,1.559017 -0.55,0.339919 -1,-0.08352 -1,-0.940983 z M 116.5,95 c 0.33992,-0.55 1.04148,-1 1.55902,-1 0.51754,0 0.94098,0.45 0.94098,1 0,0.55 -0.70156,1 -1.55902,1 -0.85746,0 -1.2809,-0.45 -0.94098,-1 z m 8.5,0.185596 c 0,-1.012848 13.57404,-0.944893 14.59198,0.07305 C 139.99972,95.666391 136.88333,96 132.66667,96 128.45,96 125,95.633518 125,95.185596 Z M 150.15789,94 c 0,-1.375 0.22698,-1.9375 0.50439,-1.25 0.27741,0.6875 0.27741,1.8125 0,2.5 -0.27741,0.6875 -0.50439,0.125 -0.50439,-1.25 z M 120.75,93.337719 c 0.6875,-0.277412 1.8125,-0.277412 2.5,0 0.6875,0.277413 0.125,0.504386 -1.25,0.504386 -1.375,0 -1.9375,-0.226973 -1.25,-0.504386 z m 21.51903,-0.03071 c 0.97297,-0.253543 2.32297,-0.236869 3,0.03705 0.67703,0.273923 -0.11903,0.481368 -1.76903,0.460988 -1.65,-0.02038 -2.20394,-0.244498 -1.23097,-0.498042 z M 126,91.822487 c 0,-1.159476 11.18403,-0.998163 13,0.187505 1.04165,0.680102 -0.71538,0.92675 -5.75,0.807174 C 129.2625,92.722461 126,92.274855 126,91.822487 Z M 147,92 c 0,-0.55 0.45,-1 1,-1 0.55,0 1,0.45 1,1 0,0.55 -0.45,1 -1,1 -0.55,0 -1,-0.45 -1,-1 z m -22.5,-2.531662 c 5.25889,-1.588265 12.55323,-1.437163 18.5,0.383229 3.35111,1.025823 3.2873,1.051779 -1.5,0.610174 -8.02324,-0.740105 -13.71413,-0.773698 -18,-0.106252 -3.61325,0.562697 -3.51656,0.476921 1,-0.887151 z m -1.6875,-2.151452 c 0.72187,-0.288871 1.58437,-0.253344 1.91667,0.07895 0.33229,0.332292 -0.25834,0.568641 -1.3125,0.52522 -1.16495,-0.04798 -1.4019,-0.284941 -0.60417,-0.604167 z m 8.45653,-1.009877 c 0.97297,-0.253543 2.32297,-0.236869 3,0.03705 0.67703,0.273923 -0.11903,0.481368 -1.76903,0.460988 -1.65,-0.02038 -2.20394,-0.244498 -1.23097,-0.498042 z"
+ id="path3734"
+ inkscape:connector-curvature="0" />
+ </g>
+ </g>
+ <g
+ inkscape:groupmode="layer"
+ id="layer2"
+ inkscape:label="BADGE"
+ style="display:none"
+ sodipodi:insensitive="true">
+ <g
+ style="display:inline"
+ transform="translate(-340.00001,-581)"
+ id="g4394"
+ clip-path="none">
+ <g
+ id="g855">
+ <g
+ inkscape:groupmode="maskhelper"
+ id="g870"
+ clip-path="url(#clipPath873)"
+ style="opacity:0.6;filter:url(#filter891)">
+ <path
+ transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)"
+ d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
+ sodipodi:ry="12"
+ sodipodi:rx="12"
+ sodipodi:cy="552.36218"
+ sodipodi:cx="252"
+ id="path844"
+ style="color:#000000;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
+ sodipodi:type="arc" />
+ </g>
+ <g
+ id="g862">
+ <path
+ sodipodi:type="arc"
+ style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
+ id="path4398"
+ sodipodi:cx="252"
+ sodipodi:cy="552.36218"
+ sodipodi:rx="12"
+ sodipodi:ry="12"
+ d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
+ transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)" />
+ <path
+ transform="matrix(1.25,0,0,1.25,33,-100.45273)"
+ d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
+ sodipodi:ry="12"
+ sodipodi:rx="12"
+ sodipodi:cy="552.36218"
+ sodipodi:cx="252"
+ id="path4400"
+ style="color:#000000;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
+ sodipodi:type="arc" />
+ <path
+ sodipodi:type="star"
+ style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
+ id="path4459"
+ sodipodi:sides="5"
+ sodipodi:cx="666.19574"
+ sodipodi:cy="589.50385"
+ sodipodi:r1="7.2431178"
+ sodipodi:r2="4.3458705"
+ sodipodi:arg1="1.0471976"
+ sodipodi:arg2="1.6755161"
+ inkscape:flatsided="false"
+ inkscape:rounded="0.1"
+ inkscape:randomized="0"
+ d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 -0.18379,0.41279 0.0427,4.27917 -0.34859,4.5051 z"
+ transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)" />
+ </g>
+ </g>
+ </g>
+ </g>
+</svg>
diff --git a/charms/trusty/contrail-analytics/metadata.yaml b/charms/trusty/contrail-analytics/metadata.yaml
new file mode 100644
index 0000000..8c33069
--- /dev/null
+++ b/charms/trusty/contrail-analytics/metadata.yaml
@@ -0,0 +1,28 @@
+name: contrail-analytics
+summary: OpenContrail Analytics Node
+maintainer: Robert Ayres <robert.ayres@ubuntu.com>
+description: |
+ OpenContrail is a network virtualization solution that provides an overlay
+ virtual-network to virtual-machines, containers or network namespaces.
+ .
+ This charm provides the analytics node component.
+categories:
+ - openstack
+provides:
+ contrail-analytics-api:
+ interface: contrail-analytics-api
+ http-services:
+ interface: http
+requires:
+ cassandra:
+ interface: cassandra
+ contrail-api:
+ interface: contrail-api
+ contrail-discovery:
+ interface: contrail-discovery
+ identity-admin:
+ interface: keystone-admin
+ kafka:
+ interface: kafka
+ zookeeper:
+ interface: zookeeper
diff --git a/charms/trusty/contrail-analytics/templates/contrail-alarm-gen.conf b/charms/trusty/contrail-analytics/templates/contrail-alarm-gen.conf
new file mode 100644
index 0000000..034c7d9
--- /dev/null
+++ b/charms/trusty/contrail-analytics/templates/contrail-alarm-gen.conf
@@ -0,0 +1,22 @@
+###############################################################################
+# [ WARNING ]
+# Configuration file maintained by Juju. Local changes may be overwritten.
+###############################################################################
+
+[DEFAULTS]
+host_ip = {{ host_ip }}
+collectors = 127.0.0.1:8086
+http_server_port = 5995
+log_local = 1
+log_level = SYS_NOTICE
+log_file = /var/log/contrail/contrail-alarm-gen.log
+kafka_broker_list = {{ kafka_servers|join(" ") }}
+zk_list = {{ zk_servers|join(",") }}
+
+[DISCOVERY]
+disc_server_ip = {{ disc_server_ip }}
+disc_server_port = {{ disc_server_port }}
+
+[REDIS]
+redis_server_port = 6379
+
diff --git a/charms/trusty/contrail-analytics/templates/contrail-analytics-api.conf b/charms/trusty/contrail-analytics/templates/contrail-analytics-api.conf
new file mode 100644
index 0000000..3f72020
--- /dev/null
+++ b/charms/trusty/contrail-analytics/templates/contrail-analytics-api.conf
@@ -0,0 +1,26 @@
+###############################################################################
+# [ WARNING ]
+# Configuration file maintained by Juju. Local changes may be overwritten.
+###############################################################################
+
+[DEFAULTS]
+host_ip = {{ host_ip }}
+collectors = 127.0.0.1:8086
+cassandra_server_list = {{ cassandra_servers|join(" ") }}
+http_server_port = 8090
+rest_api_port = 8081
+rest_api_ip = 0.0.0.0
+log_local = 1
+log_level = SYS_NOTICE
+log_file = /var/log/contrail/contrail-analytics-api.log
+api_server = {{ api_server }}:{{ api_port }}
+
+[DISCOVERY]
+disc_server_ip = {{ disc_server_ip }}
+disc_server_port = {{ disc_server_port }}
+
+[REDIS]
+server = 127.0.0.1
+redis_server_port = 6379
+redis_query_port = 6379
+
diff --git a/charms/trusty/contrail-analytics/templates/contrail-analytics-nodemgr.conf b/charms/trusty/contrail-analytics/templates/contrail-analytics-nodemgr.conf
new file mode 100644
index 0000000..5bf7c03
--- /dev/null
+++ b/charms/trusty/contrail-analytics/templates/contrail-analytics-nodemgr.conf
@@ -0,0 +1,9 @@
+###############################################################################
+# [ WARNING ]
+# Configuration file maintained by Juju. Local changes may be overwritten.
+###############################################################################
+
+[DISCOVERY]
+server = {{ disc_server_ip }}
+port = {{ disc_server_port }}
+
diff --git a/charms/trusty/contrail-analytics/templates/contrail-collector.conf b/charms/trusty/contrail-analytics/templates/contrail-collector.conf
new file mode 100644
index 0000000..5b2eeda
--- /dev/null
+++ b/charms/trusty/contrail-analytics/templates/contrail-collector.conf
@@ -0,0 +1,31 @@
+###############################################################################
+# [ WARNING ]
+# Configuration file maintained by Juju. Local changes may be overwritten.
+###############################################################################
+
+[DEFAULT]
+cassandra_server_list = {{ cassandra_servers|join(" ") }}
+{%- if zookeeper %}
+zookeeper_server_list = {{ zk_servers|join(",") }}
+{%- endif %}
+kafka_broker_list = {{ kafka_servers|join(" ") }}
+hostip = {{ host_ip }}
+http_server_port = 8089
+log_file = /var/log/contrail/contrail-collector.log
+log_level = SYS_NOTICE
+log_local = 1
+syslog_port = 514
+sflow_port = 6343
+
+[COLLECTOR]
+port = 8086
+server = 0.0.0.0
+
+[DISCOVERY]
+port = {{ disc_server_port }}
+server = {{ disc_server_ip }}
+
+[REDIS]
+port = 6379
+server = 127.0.0.1
+
diff --git a/charms/trusty/contrail-analytics/templates/contrail-keystone-auth.conf b/charms/trusty/contrail-analytics/templates/contrail-keystone-auth.conf
new file mode 100644
index 0000000..7e10256
--- /dev/null
+++ b/charms/trusty/contrail-analytics/templates/contrail-keystone-auth.conf
@@ -0,0 +1,11 @@
+[KEYSTONE]
+auth_url=http://{{ auth_host }}:{{ auth_port }}/v2.0
+auth_host = {{ auth_host }}
+auth_protocol = http
+auth_port = {{ auth_port }}
+admin_user = {{ admin_user }}
+admin_password = {{ admin_password }}
+admin_tenant_name = {{ admin_tenant_name }}
+memcache_servers=127.0.0.1:11211
+insecure=False
+
diff --git a/charms/trusty/contrail-analytics/templates/contrail-query-engine.conf b/charms/trusty/contrail-analytics/templates/contrail-query-engine.conf
new file mode 100644
index 0000000..6cba023
--- /dev/null
+++ b/charms/trusty/contrail-analytics/templates/contrail-query-engine.conf
@@ -0,0 +1,24 @@
+###############################################################################
+# [ WARNING ]
+# Configuration file maintained by Juju. Local changes may be overwritten.
+###############################################################################
+
+[DEFAULT]
+analytics_data_ttl = 48
+cassandra_server_list = {{ cassandra_servers|join(" ") }}
+collectors =
+http_server_port = 8091
+log_file = /var/log/contrail/contrail-query-engine.log
+log_level = SYS_NOTICE
+log_local = 1
+max_slice = 100
+max_tasks = 16
+
+[DISCOVERY]
+port = {{ disc_server_port }}
+server = {{ disc_server_ip }}
+
+[REDIS]
+port = 6379
+server = 127.0.0.1
+
diff --git a/charms/trusty/contrail-analytics/templates/contrail-snmp-collector.conf b/charms/trusty/contrail-analytics/templates/contrail-snmp-collector.conf
new file mode 100644
index 0000000..e455b9a
--- /dev/null
+++ b/charms/trusty/contrail-analytics/templates/contrail-snmp-collector.conf
@@ -0,0 +1,28 @@
+###############################################################################
+# [ WARNING ]
+# Configuration file maintained by Juju. Local changes may be overwritten.
+###############################################################################
+
+[DEFAULTS]
+api_server = {{ api_server ~ ":" ~ api_port if api_server and api_port }}
+collectors = 127.0.0.1:8086
+fast_scan_frequency = 60
+http_server_port = 5920
+log_local = 1
+log_level = SYS_NOTICE
+log_file = /var/log/contrail/contrail-snmp-collector.log
+scan_frequency = 600
+zookeeper = {{ zk_servers|join(",") }}
+
+[DISCOVERY]
+disc_server_ip = {{ disc_server_ip }}
+disc_server_port = {{ disc_server_port }}
+
+[KEYSTONE]
+auth_host = {{ auth_host }}
+auth_protocol = http
+auth_port = {{ auth_port }}
+admin_user = {{ admin_user }}
+admin_password = {{ admin_password }}
+admin_tenant_name = {{ admin_tenant_name }}
+
diff --git a/charms/trusty/contrail-analytics/templates/contrail-topology.conf b/charms/trusty/contrail-analytics/templates/contrail-topology.conf
new file mode 100644
index 0000000..18177fa
--- /dev/null
+++ b/charms/trusty/contrail-analytics/templates/contrail-topology.conf
@@ -0,0 +1,19 @@
+###############################################################################
+# [ WARNING ]
+# Configuration file maintained by Juju. Local changes may be overwritten.
+###############################################################################
+
+[DEFAULTS]
+analytics_api = 127.0.0.1:8081
+collectors = 127.0.0.1:8086
+http_server_port = 5921
+log_local = 1
+log_level = SYS_NOTICE
+log_file = /var/log/contrail/contrail-topology.log
+scan_frequency = 60
+zookeeper = {{ zk_servers|join(",") }}
+
+[DISCOVERY]
+disc_server_ip = {{ disc_server_ip }}
+disc_server_port = {{ disc_server_port }}
+
diff --git a/charms/trusty/contrail-analytics/templates/vnc_api_lib.ini b/charms/trusty/contrail-analytics/templates/vnc_api_lib.ini
new file mode 100644
index 0000000..fd68487
--- /dev/null
+++ b/charms/trusty/contrail-analytics/templates/vnc_api_lib.ini
@@ -0,0 +1,16 @@
+###############################################################################
+# [ WARNING ]
+# Configuration file maintained by Juju. Local changes may be overwritten.
+###############################################################################
+
+[global]
+WEB_SERVER = {{ api_server }}
+WEB_PORT = {{ api_port }}
+
+[auth]
+AUTHN_TYPE = keystone
+AUTHN_PROTOCOL = http
+AUTHN_SERVER = {{ auth_host }}
+AUTHN_PORT = {{ auth_port }}
+AUTHN_URL = /v2.0/tokens
+